Removed unwanted line in M4READER_Amr.h

vss core files upload on honeycomb

Change-Id: I61206ae2398ce8ac544c6fb01a76fe8917bce75b
diff --git a/libvideoeditor/vss/src/Android.mk b/libvideoeditor/vss/src/Android.mk
new file mode 100755
index 0000000..ae0778d
--- /dev/null
+++ b/libvideoeditor/vss/src/Android.mk
@@ -0,0 +1,92 @@
+#
+# Copyright (C) 2011 NXP Software
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvss
+#
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_core
+
+LOCAL_SRC_FILES:=          \
+      M4PTO3GPP_API.c \
+      M4PTO3GPP_VideoPreProcessing.c \
+      M4VIFI_xVSS_RGB565toYUV420.c \
+      M4xVSS_API.c \
+      M4xVSS_internal.c \
+      M4VSS3GPP_AudioMixing.c \
+      M4VSS3GPP_Clip.c \
+      M4VSS3GPP_ClipAnalysis.c \
+      M4VSS3GPP_Codecs.c \
+      M4VSS3GPP_Edit.c \
+      M4VSS3GPP_EditAudio.c \
+      M4VSS3GPP_EditVideo.c \
+      M4VSS3GPP_MediaAndCodecSubscription.c \
+      glvaudioresampler.c \
+      M4ChannelCoverter.c \
+      M4VD_EXTERNAL_BitstreamParser.c \
+      M4VD_EXTERNAL_Interface.c \
+      M4AIR_API.c \
+      M4READER_Pcm.c \
+      M4PCMR_CoreReader.c \
+      M4AD_Null.c \
+      M4AMRR_CoreReader.c \
+      M4READER_Amr.c \
+      M4VD_Tools.c
+
+
+LOCAL_MODULE_TAGS := development
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+    libvideoeditor_osal \
+    libvideoeditor_3gpwriter \
+    libvideoeditor_mcs \
+    libvideoeditor_videofilters \
+    libvideoeditor_stagefrightshells
+
+LOCAL_C_INCLUDES += \
+    $(TOP)/frameworks/media/libvideoeditor/osal/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/mcs/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/common/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/stagefrightshells/inc
+
+ifeq ($(TARGET_SIMULATOR),true)
+else
+    LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+    -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+    -DM4xVSS_RESERVED_MOOV_DISK_SPACEno \
+    -DDECODE_GIF_ON_SAVING
+
+# Don't prelink this library.  For more efficient code, you may want
+# to add this library to the prelink map and set this to true.
+LOCAL_PRELINK_MODULE := false
+
+
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/src/M4AD_Null.c b/libvideoeditor/vss/src/M4AD_Null.c
new file mode 100755
index 0000000..faac43b
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AD_Null.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file    M4AD_Null.c
+ * @brief   Implementation of the MP3 decoder public interface
+ * @note    This file implements a "null" audio decoder, that is a decoder
+ *          that do nothing except getting AU from the reader
+*************************************************************************
+*/
+#include "M4OSA_Debug.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Debug.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4AD_Common.h"
+#include "M4AD_Null.h"
+
+#define M4AD_FORCE_16BITS
+
+/**
+ ************************************************************************
+ * NULL Audio Decoder version information
+ ************************************************************************
+*/
+/* CHANGE_VERSION_HERE */
+#define M4AD_NULL_MAJOR    1
+#define M4AD_NULL_MINOR    1
+#define M4AD_NULL_REVISION 4
+
+/**
+ ************************************************************************
+ * structure    M4AD_NullContext
+ * @brief        Internal null decoder context
+ ************************************************************************
+*/
+typedef struct
+{
+    /**< Pointer to the stream handler provided by the user */
+    M4_AudioStreamHandler*    m_pAudioStreamhandler;
+} M4AD_NullContext;
+
+
+/**
+ ************************************************************************
+ * NXP MP3 decoder functions definition
+ ************************************************************************
+*/
+
+/**
+ ************************************************************************
+ * @brief   Creates an instance of the null decoder
+ * @note    Allocates the context
+ *
+ * @param    pContext:        (OUT)    Context of the decoder
+ * @param    pStreamHandler: (IN)    Pointer to an audio stream description
+ * @param    pUserData:        (IN)    Pointer to User data
+ *
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_STATE             State automaton is not applied
+ * @return    M4ERR_ALLOC             a memory allocation has failed
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_create(  M4AD_Context* pContext,
+                                M4_AudioStreamHandler *pStreamHandler,
+                                void* pUserData)
+{
+    M4AD_NullContext* pC;
+
+    M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
+                "M4AD_NULL_create: invalid context pointer");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+                "M4AD_NULL_create: invalid pointer pStreamHandler");
+
+    pC = (M4AD_NullContext*)M4OSA_malloc(sizeof(M4AD_NullContext),
+                 M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_NullContext");
+    if (pC == (M4AD_NullContext*)0)
+    {
+        M4OSA_TRACE1_0("Can not allocate null decoder context");
+        return M4ERR_ALLOC;
+    }
+
+    *pContext = pC;
+
+    pC->m_pAudioStreamhandler = pStreamHandler;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief    Destroys the instance of the null decoder
+ * @note     After this call the context is invalid
+ *
+ * @param    context:    (IN)    Context of the decoder
+ *
+ * @return   M4NO_ERROR            There is no error
+ * @return   M4ERR_PARAMETER     The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_destroy(M4AD_Context context)
+{
+    M4AD_NullContext* pC = (M4AD_NullContext*)context;
+
+    M4OSA_DEBUG_IF1((context == M4OSA_NULL), M4ERR_PARAMETER, "M4AD_NULL_destroy: invalid context");
+
+    M4OSA_free((M4OSA_MemAddr32)pC);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Simply output the given audio data
+ * @note
+ *
+ * @param   context:          (IN)    Context of the decoder
+ * @param   pInputBuffer:     (IN/OUT)Input Data buffer. It contains at least one audio frame.
+ *                                    The size of the buffer must be updated inside the function
+ *                                    to reflect the size of the actually decoded data.
+ *                                    (e.g. the first frame in pInputBuffer)
+ * @param   pDecodedPCMBuffer: (OUT)  Output PCM buffer (decoded data).
+ * @param   jumping:           (IN)   M4OSA_TRUE if a jump was just done, M4OSA_FALSE otherwise.
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_step(M4AD_Context context, M4AD_Buffer *pInputBuffer,
+                            M4AD_Buffer *pDecodedPCMBuffer, M4OSA_Bool jumping)
+{
+    M4AD_NullContext* pC = (M4AD_NullContext*)context;
+
+    /*The VPS sends a zero buffer at the end*/
+    if (0 == pInputBuffer->m_bufferSize)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    if (pInputBuffer->m_bufferSize > pDecodedPCMBuffer->m_bufferSize)
+    {
+        return M4ERR_PARAMETER;
+    }
+#ifdef M4AD_FORCE_16BITS
+    /*if read samples are 8 bits, complete them to 16 bits*/
+    if (pC->m_pAudioStreamhandler->m_byteSampleSize == 1)
+    {
+        M4OSA_UInt32 i;
+        M4OSA_Int16  val;
+
+        for (i = 0; i < pInputBuffer->m_bufferSize; i++)
+        {
+            val = (M4OSA_Int16)((M4OSA_UInt8)(pInputBuffer->m_dataAddress[i]) - 128);
+
+            pDecodedPCMBuffer->m_dataAddress[i*2]   = (M4OSA_Int8)(val>>8);
+            pDecodedPCMBuffer->m_dataAddress[i*2+1] = (M4OSA_Int8)(val&0x00ff);
+        }
+    }
+    else
+    {
+        M4OSA_memcpy(pDecodedPCMBuffer->m_dataAddress, pInputBuffer->m_dataAddress,
+                    pInputBuffer->m_bufferSize );
+    }
+#else /*M4AD_FORCE_16BITS*/
+    M4OSA_memcpy(pDecodedPCMBuffer->m_dataAddress, pInputBuffer->m_dataAddress,
+                    pInputBuffer->m_bufferSize );
+#endif /*M4AD_FORCE_16BITS*/
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Gets the decoder version
+ * @note    The version is given in a M4_VersionInfo structure
+ *
+ * @param   pValue:     (OUT)       Pointer to the version structure
+ *
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         pVersionInfo pointer is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_getVersion(M4_VersionInfo* pVersionInfo)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_DEBUG_IF1((pVersionInfo == 0), M4ERR_PARAMETER,
+        "M4AD_NULL_getVersion: invalid pointer pVersionInfo");
+
+    /* Up until now, the null decoder version is not available */
+
+    /* CHANGE_VERSION_HERE */
+    pVersionInfo->m_major        = M4AD_NULL_MAJOR;      /*major version of the component*/
+    pVersionInfo->m_minor        = M4AD_NULL_MINOR;      /*minor version of the component*/
+    pVersionInfo->m_revision    = M4AD_NULL_REVISION;    /*revision version of the component*/
+    pVersionInfo->m_structSize=sizeof(M4_VersionInfo);
+
+    return err;
+}
+
+
+/**
+ ************************************************************************
+ * getInterface function definitions of NXP MP3 decoder
+ ************************************************************************
+*/
+
+/**
+ ************************************************************************
+ * @brief Retrieves the interface implemented by the decoder
+ * @param pDecoderType        : pointer on an M4AD_Type (allocated by the caller)
+ *                              that will be filled with the decoder type supported by
+ *                              this decoder
+ * @param pDecoderInterface   : address of a pointer that will be set to the interface
+ *                              implemented by this decoder. The interface is a structure
+ *                              allocated by the function and must be un-allocated by the
+ *                              caller.
+ *
+ * @return    M4NO_ERROR  if OK
+ * @return    M4ERR_ALLOC if allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_getInterface( M4AD_Type *pDecoderType, M4AD_Interface **pDecoderInterface)
+{
+    *pDecoderInterface = (  M4AD_Interface*)M4OSA_malloc( sizeof(M4AD_Interface),
+                            M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_Interface" );
+    if (M4OSA_NULL == *pDecoderInterface)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    *pDecoderType = M4AD_kTypePCM;
+
+    (*pDecoderInterface)->m_pFctCreateAudioDec       = M4AD_NULL_create;
+    (*pDecoderInterface)->m_pFctDestroyAudioDec      = M4AD_NULL_destroy;
+    (*pDecoderInterface)->m_pFctStepAudioDec         = M4AD_NULL_step;
+    (*pDecoderInterface)->m_pFctGetVersionAudioDec   = M4AD_NULL_getVersion;
+    (*pDecoderInterface)->m_pFctStartAudioDec        = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctResetAudioDec        = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctSetOptionAudioDec    = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctGetOptionAudioDec    = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4AIR_API.c b/libvideoeditor/vss/src/M4AIR_API.c
new file mode 100755
index 0000000..6a3546d
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AIR_API.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4AIR_API.c
+ * @brief  Area of Interest Resizer  API
+ *************************************************************************
+ */
+
+#define M4AIR_YUV420_FORMAT_SUPPORTED
+#define M4AIR_YUV420A_FORMAT_SUPPORTED
+
+/************************* COMPILATION CHECKS ***************************/
+#ifndef M4AIR_YUV420_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR565_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB565_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR888_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB888_FORMAT_SUPPORTED
+#ifndef M4AIR_JPG_FORMAT_SUPPORTED
+
+#error "Please define at least one input format for the AIR component"
+
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+/******************************* INCLUDES *******************************/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Mutex.h"
+#include "M4OSA_Memory.h"
+#include "M4VIFI_FiltersAPI.h"
+#include "M4AIR_API.h"
+
+/************************ M4AIR INTERNAL TYPES DEFINITIONS ***********************/
+
+/**
+ ******************************************************************************
+ * enum         M4AIR_States
+ * @brief       The following enumeration defines the internal states of the AIR.
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4AIR_kCreated,        /**< State after M4AIR_create has been called */
+    M4AIR_kConfigured      /**< State after M4AIR_configure has been called */
+}M4AIR_States;
+
+
+/**
+ ******************************************************************************
+ * struct         M4AIR_InternalContext
+ * @brief         The following structure is the internal context of the AIR.
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4AIR_States            m_state;        /**< Internal state */
+    M4AIR_InputFormatType   m_inputFormat;  /**< Input format like YUV420Planar,
+                                                 RGB565, JPG, etc ... */
+    M4AIR_Params            m_params;       /**< Current input Parameter of  the processing */
+    M4OSA_UInt32            u32_x_inc[4];   /**< ratio between input and ouput width for YUV */
+    M4OSA_UInt32            u32_y_inc[4];   /**< ratio between input and ouput height for YUV */
+    M4OSA_UInt32            u32_x_accum_start[4];    /**< horizontal initial accumulator value */
+    M4OSA_UInt32            u32_y_accum_start[4];    /**< Vertical initial accumulator value */
+    M4OSA_UInt32            u32_x_accum[4]; /**< save of horizontal accumulator value */
+    M4OSA_UInt32            u32_y_accum[4]; /**< save of vertical accumulator value */
+    M4OSA_UInt8*            pu8_data_in[4]; /**< Save of input plane pointers
+                                                             in case of stripe mode */
+    M4OSA_UInt32            m_procRows;     /**< Number of processed rows,
+                                                     used in stripe mode only */
+    M4OSA_Bool                m_bOnlyCopy;  /**< Flag to know if we just perform a copy
+                                                        or a bilinear interpolation */
+    M4OSA_Bool                m_bFlipX;     /**< Depend on output orientation, used during
+                                                processing to revert processing order in X
+                                                coordinates */
+    M4OSA_Bool                m_bFlipY;     /**< Depend on output orientation, used during
+                                                processing to revert processing order in Y
+                                                coordinates */
+    M4OSA_Bool                m_bRevertXY;  /**< Depend on output orientation, used during
+                                                processing to revert X and Y processing order
+                                                 (+-90° rotation) */
+}M4AIR_InternalContext;
+
+/********************************* MACROS *******************************/
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer)\
+     if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
+
+
+/********************** M4AIR PUBLIC API IMPLEMENTATION ********************/
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+ * @brief    This function initialize an instance of the AIR.
+ * @param    pContext:      (IN/OUT) Address of the context to create
+ * @param    inputFormat:   (IN) input format type.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
+ * @return    M4ERR_ALLOC: No more memory is available
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+{
+    M4OSA_ERR err = M4NO_ERROR ;
+    M4AIR_InternalContext* pC = M4OSA_NULL ;
+
+    /* Check that the address on the context is not NULL */
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    *pContext = M4OSA_NULL ;
+
+    /* Internal Context creation */
+    pC = (M4AIR_InternalContext*)M4OSA_malloc(sizeof(M4AIR_InternalContext),
+         M4AIR,(M4OSA_Char *)"AIR internal context") ;
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, pC) ;
+
+
+    /* Check if the input format is supported */
+    switch(inputFormat)
+    {
+#ifdef M4AIR_YUV420_FORMAT_SUPPORTED
+        case M4AIR_kYUV420P:
+        break ;
+#endif
+#ifdef M4AIR_YUV420A_FORMAT_SUPPORTED
+        case M4AIR_kYUV420AP:
+        break ;
+#endif
+        default:
+            err = M4ERR_AIR_FORMAT_NOT_SUPPORTED;
+            goto M4AIR_create_cleanup ;
+    }
+
+    /**< Save input format and update state */
+    pC->m_inputFormat = inputFormat;
+    pC->m_state = M4AIR_kCreated;
+
+    /* Return the context to the caller */
+    *pContext = pC ;
+
+    return M4NO_ERROR ;
+
+M4AIR_create_cleanup:
+    /* Error management : we destroy the context if needed */
+    if(M4OSA_NULL != pC)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC) ;
+    }
+
+    *pContext = M4OSA_NULL ;
+
+    return err ;
+}
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+ * @brief    This function destroys an instance of the AIR component
+ * @param    pContext:    (IN) Context identifying the instance to destroy
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return    M4ERR_STATE: Internal state is incompatible with this function call.
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+{
+    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    /**< Check state */
+    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+    {
+        return M4ERR_STATE;
+    }
+    M4OSA_free((M4OSA_MemAddr32)pC) ;
+
+    return M4NO_ERROR ;
+
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+ * @brief   This function will configure the AIR.
+ * @note    It will set the input and output coordinates and sizes,
+ *          and indicates if we will proceed in stripe or not.
+ *          In case a M4AIR_get in stripe mode was on going, it will cancel this previous
+ *          processing and reset the get process.
+ * @param    pContext:                (IN) Context identifying the instance
+ * @param    pParams->m_bOutputStripe:(IN) Stripe mode.
+ * @param    pParams->m_inputCoord:    (IN) X,Y coordinates of the first valid pixel in input.
+ * @param    pParams->m_inputSize:    (IN) input ROI size.
+ * @param    pParams->m_outputSize:    (IN) output size.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return    M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+{
+    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+    M4OSA_UInt32    i,u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+    M4OSA_UInt32    nb_planes;
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    if(M4AIR_kYUV420AP == pC->m_inputFormat)
+    {
+        nb_planes = 4;
+    }
+    else
+    {
+        nb_planes = 3;
+    }
+
+    /**< Check state */
+    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+    {
+        return M4ERR_STATE;
+    }
+
+    /** Save parameters */
+    pC->m_params = *pParams;
+
+    /* Check for the input&output width and height are even */
+        if( ((pC->m_params.m_inputSize.m_height)&0x1)    ||
+            ((pC->m_params.m_inputSize.m_height)&0x1))
+        {
+            return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+        }
+
+     if( ((pC->m_params.m_inputSize.m_width)&0x1)    ||
+            ((pC->m_params.m_inputSize.m_width)&0x1))
+        {
+            return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+        }
+    if(((pC->m_params.m_inputSize.m_width) == (pC->m_params.m_outputSize.m_width))
+        &&((pC->m_params.m_inputSize.m_height) == (pC->m_params.m_outputSize.m_height)))
+    {
+        /**< No resize in this case, we will just copy input in output */
+        pC->m_bOnlyCopy = M4OSA_TRUE;
+    }
+    else
+    {
+        pC->m_bOnlyCopy = M4OSA_FALSE;
+
+        /**< Initialize internal variables used for resize filter */
+        for(i=0;i<nb_planes;i++)
+        {
+
+            u32_width_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_width:\
+                (pC->m_params.m_inputSize.m_width+1)>>1;
+            u32_height_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_height:\
+                (pC->m_params.m_inputSize.m_height+1)>>1;
+            u32_width_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_width:\
+                (pC->m_params.m_outputSize.m_width+1)>>1;
+            u32_height_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_height:\
+                (pC->m_params.m_outputSize.m_height+1)>>1;
+
+                /* Compute horizontal ratio between src and destination width.*/
+                if (u32_width_out >= u32_width_in)
+                {
+                    pC->u32_x_inc[i]   = ((u32_width_in-1) * 0x10000) / (u32_width_out-1);
+                }
+                else
+                {
+                    pC->u32_x_inc[i]   = (u32_width_in * 0x10000) / (u32_width_out);
+                }
+
+                /* Compute vertical ratio between src and destination height.*/
+                if (u32_height_out >= u32_height_in)
+                {
+                    pC->u32_y_inc[i]   = ((u32_height_in - 1) * 0x10000) / (u32_height_out-1);
+                }
+                else
+                {
+                    pC->u32_y_inc[i] = (u32_height_in * 0x10000) / (u32_height_out);
+                }
+
+                /*
+                Calculate initial accumulator value : u32_y_accum_start.
+                u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+                */
+                if (pC->u32_y_inc[i] >= 0x10000)
+                {
+                    /*
+                        Keep the fractionnal part, assimung that integer  part is coded
+                        on the 16 high bits and the fractionnal on the 15 low bits
+                    */
+                    pC->u32_y_accum_start[i] = pC->u32_y_inc[i] & 0xffff;
+
+                    if (!pC->u32_y_accum_start[i])
+                    {
+                        pC->u32_y_accum_start[i] = 0x10000;
+                    }
+
+                    pC->u32_y_accum_start[i] >>= 1;
+                }
+                else
+                {
+                    pC->u32_y_accum_start[i] = 0;
+                }
+                /**< Take into account that Y coordinate can be odd
+                    in this case we have to put a 0.5 offset
+                    for U and V plane as there a 2 times sub-sampled vs Y*/
+                if((pC->m_params.m_inputCoord.m_y&0x1)&&((i==1)||(i==2)))
+                {
+                    pC->u32_y_accum_start[i] += 0x8000;
+                }
+
+                /*
+                    Calculate initial accumulator value : u32_x_accum_start.
+                    u32_x_accum_start is coded on 15 bits, and represents a value between
+                    0 and 0.5
+                */
+
+                if (pC->u32_x_inc[i] >= 0x10000)
+                {
+                    pC->u32_x_accum_start[i] = pC->u32_x_inc[i] & 0xffff;
+
+                    if (!pC->u32_x_accum_start[i])
+                    {
+                        pC->u32_x_accum_start[i] = 0x10000;
+                    }
+
+                    pC->u32_x_accum_start[i] >>= 1;
+                }
+                else
+                {
+                    pC->u32_x_accum_start[i] = 0;
+                }
+                /**< Take into account that X coordinate can be odd
+                    in this case we have to put a 0.5 offset
+                    for U and V plane as there a 2 times sub-sampled vs Y*/
+                if((pC->m_params.m_inputCoord.m_x&0x1)&&((i==1)||(i==2)))
+                {
+                    pC->u32_x_accum_start[i] += 0x8000;
+                }
+        }
+    }
+
+    /**< Reset variable used for stripe mode */
+    pC->m_procRows = 0;
+
+    /**< Initialize var for X/Y processing order according to orientation */
+    pC->m_bFlipX = M4OSA_FALSE;
+    pC->m_bFlipY = M4OSA_FALSE;
+    pC->m_bRevertXY = M4OSA_FALSE;
+    switch(pParams->m_outputOrientation)
+    {
+        case M4COMMON_kOrientationTopLeft:
+            break;
+        case M4COMMON_kOrientationTopRight:
+            pC->m_bFlipX = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationBottomRight:
+            pC->m_bFlipX = M4OSA_TRUE;
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationBottomLeft:
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationLeftTop:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationRightTop:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationRightBottom:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            pC->m_bFlipX = M4OSA_TRUE;
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationLeftBottom:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            pC->m_bFlipX = M4OSA_TRUE;
+            break;
+        default:
+        return M4ERR_PARAMETER;
+    }
+    /**< Update state */
+    pC->m_state = M4AIR_kConfigured;
+
+    return M4NO_ERROR ;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+ * @brief   This function will provide the requested resized area of interest according to
+ *          settings  provided in M4AIR_configure.
+ * @note    In case the input format type is JPEG, input plane(s)
+ *          in pIn is not used. In normal mode, dimension specified in output plane(s) structure
+ *          must be the same than the one specified in M4AIR_configure. In stripe mode, only the
+ *          width will be the same, height will be taken as the stripe height (typically 16).
+ *          In normal mode, this function is call once to get the full output picture.
+ *          In stripe mode, it is called for each stripe till the whole picture has been
+ *          retrieved,and  the position of the output stripe in the output picture
+ *          is internally incremented at each step.
+ *          Any call to M4AIR_configure during stripe process will reset this one to the
+ *          beginning of the output picture.
+ * @param    pContext:    (IN) Context identifying the instance
+ * @param    pIn:            (IN) Plane structure containing input Plane(s).
+ * @param    pOut:        (IN/OUT)  Plane structure containing output Plane(s).
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+{
+    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+    M4OSA_UInt32 i,j,k,u32_x_frac,u32_y_frac,u32_x_accum,u32_y_accum,u32_shift;
+        M4OSA_UInt8    *pu8_data_in, *pu8_data_in_org, *pu8_data_in_tmp, *pu8_data_out;
+        M4OSA_UInt8    *pu8_src_top;
+        M4OSA_UInt8    *pu8_src_bottom;
+    M4OSA_UInt32    u32_temp_value;
+    M4OSA_Int32    i32_tmp_offset;
+    M4OSA_UInt32    nb_planes;
+
+
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    /**< Check state */
+    if(M4AIR_kConfigured != pC->m_state)
+    {
+        return M4ERR_STATE;
+    }
+
+    if(M4AIR_kYUV420AP == pC->m_inputFormat)
+    {
+        nb_planes = 4;
+    }
+    else
+    {
+        nb_planes = 3;
+    }
+
+    /**< Loop on each Plane */
+    for(i=0;i<nb_planes;i++)
+    {
+
+         /* Set the working pointers at the beginning of the input/output data field */
+
+        u32_shift = ((i==0)||(i==3))?0:1; /**< Depend on Luma or Chroma */
+
+        if((M4OSA_FALSE == pC->m_params.m_bOutputStripe)\
+            ||((M4OSA_TRUE == pC->m_params.m_bOutputStripe)&&(0 == pC->m_procRows)))
+        {
+            /**< For input, take care about ROI */
+            pu8_data_in     = pIn[i].pac_data + pIn[i].u_topleft \
+                + (pC->m_params.m_inputCoord.m_x>>u32_shift)
+                        + (pC->m_params.m_inputCoord.m_y >> u32_shift) * pIn[i].u_stride;
+
+            /** Go at end of line/column in case X/Y scanning is flipped */
+            if(M4OSA_TRUE == pC->m_bFlipX)
+            {
+                pu8_data_in += ((pC->m_params.m_inputSize.m_width)>>u32_shift) -1 ;
+            }
+            if(M4OSA_TRUE == pC->m_bFlipY)
+            {
+                pu8_data_in += ((pC->m_params.m_inputSize.m_height>>u32_shift) -1)\
+                     * pIn[i].u_stride;
+            }
+
+            /**< Initialize accumulators in case we are using it (bilinear interpolation) */
+            if( M4OSA_FALSE == pC->m_bOnlyCopy)
+            {
+                pC->u32_x_accum[i] = pC->u32_x_accum_start[i];
+                pC->u32_y_accum[i] = pC->u32_y_accum_start[i];
+            }
+
+        }
+        else
+        {
+            /**< In case of stripe mode for other than first stripe, we need to recover input
+                 pointer from internal context */
+            pu8_data_in = pC->pu8_data_in[i];
+        }
+
+        /**< In every mode, output data are at the beginning of the output plane */
+        pu8_data_out    = pOut[i].pac_data + pOut[i].u_topleft;
+
+        /**< Initialize input offset applied after each pixel */
+        if(M4OSA_FALSE == pC->m_bFlipY)
+        {
+            i32_tmp_offset = pIn[i].u_stride;
+        }
+        else
+        {
+            i32_tmp_offset = -pIn[i].u_stride;
+        }
+
+        /**< In this case, no bilinear interpolation is needed as input and output dimensions
+            are the same */
+        if( M4OSA_TRUE == pC->m_bOnlyCopy)
+        {
+            /**< No +-90° rotation */
+            if(M4OSA_FALSE == pC->m_bRevertXY)
+            {
+                /**< No flip on X abscissa */
+                if(M4OSA_FALSE == pC->m_bFlipX)
+                {
+                    /**< Loop on each row */
+                    for(j=0;j<pOut[i].u_height;j++)
+                    {
+                        /**< Copy one whole line */
+                        M4OSA_memcpy((M4OSA_MemAddr8)pu8_data_out, (M4OSA_MemAddr8)pu8_data_in,
+                             pOut[i].u_width);
+
+                        /**< Update pointers */
+                        pu8_data_out += pOut[i].u_stride;
+                        if(M4OSA_FALSE == pC->m_bFlipY)
+                        {
+                            pu8_data_in += pIn[i].u_stride;
+                        }
+                        else
+                        {
+                            pu8_data_in -= pIn[i].u_stride;
+                        }
+                    }
+                }
+                else
+                {
+                    /**< Loop on each row */
+                    for(j=0;j<pOut[i].u_height;j++)
+                    {
+                        /**< Loop on each pixel of 1 row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+                            *pu8_data_out++ = *pu8_data_in--;
+                        }
+
+                        /**< Update pointers */
+                        pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+
+                        pu8_data_in += pOut[i].u_width + i32_tmp_offset;
+
+                    }
+                }
+            }
+            /**< Here we have a +-90° rotation */
+            else
+            {
+
+                /**< Loop on each row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    pu8_data_in_tmp = pu8_data_in;
+
+                    /**< Loop on each pixel of 1 row */
+                    for(k=0;k<pOut[i].u_width;k++)
+                    {
+                        *pu8_data_out++ = *pu8_data_in_tmp;
+
+                        /**< Update input pointer in order to go to next/past line */
+                        pu8_data_in_tmp += i32_tmp_offset;
+                    }
+
+                    /**< Update pointers */
+                    pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+                    if(M4OSA_FALSE == pC->m_bFlipX)
+                    {
+                        pu8_data_in ++;
+                    }
+                    else
+                    {
+                        pu8_data_in --;
+                    }
+                }
+            }
+        }
+        /**< Bilinear interpolation */
+        else
+        {
+
+        if(3 != i)    /**< other than alpha plane */
+        {
+            /**No +-90° rotation */
+            if(M4OSA_FALSE == pC->m_bRevertXY)
+            {
+
+                /**< Loop on each row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* Vertical weight factor */
+                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+                    /* Reinit horizontal weight factor */
+                    u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+                        if(M4OSA_TRUE ==  pC->m_bFlipX)
+                        {
+
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                        weight factor */
+
+                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                   pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                   (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+                        }
+
+                        else
+                        {
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                        weight factor */
+
+                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                   pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                   (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                                    *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+
+                        }
+
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update vertical accumulator */
+                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
+                      if (pC->u32_y_accum[i]>>16)
+                    {
+                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+                          pC->u32_y_accum[i] &= 0xffff;
+                       }
+                }
+        }
+            /** +-90° rotation */
+            else
+            {
+                pu8_data_in_org = pu8_data_in;
+
+                /**< Loop on each output row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* horizontal weight factor */
+                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+                    /* Reinit accumulator */
+                    u32_y_accum = pC->u32_y_accum_start[i];
+
+                    if(M4OSA_TRUE ==  pC->m_bFlipX)
+                    {
+
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+
+                        }
+                    }
+                    else
+                    {
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+                        }
+                    }
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update horizontal accumulator */
+                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+                    pu8_data_in = pu8_data_in_org;
+                }
+
+            }
+            }/** 3 != i */
+            else
+            {
+            /**No +-90° rotation */
+            if(M4OSA_FALSE == pC->m_bRevertXY)
+            {
+
+                /**< Loop on each row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* Vertical weight factor */
+                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+                    /* Reinit horizontal weight factor */
+                    u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+                        if(M4OSA_TRUE ==  pC->m_bFlipX)
+                        {
+
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                         weight factor */
+
+                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                   pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                  (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                                u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+                        }
+
+                        else
+                        {
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                        weight factor */
+
+                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                   pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                   (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                                u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+
+                        }
+
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update vertical accumulator */
+                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
+                      if (pC->u32_y_accum[i]>>16)
+                    {
+                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+                          pC->u32_y_accum[i] &= 0xffff;
+                       }
+                }
+
+            } /**< M4OSA_FALSE == pC->m_bRevertXY */
+            /** +-90° rotation */
+            else
+            {
+                pu8_data_in_org = pu8_data_in;
+
+                /**< Loop on each output row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* horizontal weight factor */
+                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+                    /* Reinit accumulator */
+                    u32_y_accum = pC->u32_y_accum_start[i];
+
+                    if(M4OSA_TRUE ==  pC->m_bFlipX)
+                    {
+
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                            u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+
+                        }
+                    }
+                    else
+                    {
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                            u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+                        }
+                    }
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update horizontal accumulator */
+                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+                    pu8_data_in = pu8_data_in_org;
+
+                }
+                } /**< M4OSA_TRUE == pC->m_bRevertXY */
+        }/** 3 == i */
+            }
+        /**< In case of stripe mode, save current input pointer */
+        if(M4OSA_TRUE == pC->m_params.m_bOutputStripe)
+        {
+            pC->pu8_data_in[i] = pu8_data_in;
+        }
+    }
+
+    /**< Update number of processed rows, reset it if we have finished
+         with the whole processing */
+    pC->m_procRows += pOut[0].u_height;
+    if(M4OSA_FALSE == pC->m_bRevertXY)
+    {
+        if(pC->m_params.m_outputSize.m_height <= pC->m_procRows)    pC->m_procRows = 0;
+    }
+    else
+    {
+        if(pC->m_params.m_outputSize.m_width <= pC->m_procRows)    pC->m_procRows = 0;
+    }
+
+    return M4NO_ERROR ;
+
+}
+
+
+
diff --git a/libvideoeditor/vss/src/M4AMRR_CoreReader.c b/libvideoeditor/vss/src/M4AMRR_CoreReader.c
new file mode 100755
index 0000000..14f5271
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AMRR_CoreReader.c
@@ -0,0 +1,910 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file        M4AMRR_CoreReader.c
+ * @brief       Implementation of AMR parser
+ * @note        This file contains the API Implementation for
+ *              AMR Parser.
+ ******************************************************************************
+*/
+#include "M4AMRR_CoreReader.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+
+/**
+ ******************************************************************************
+ * Maximum bitrate per amr type
+ ******************************************************************************
+*/
+#define M4AMRR_NB_MAX_BIT_RATE    12200
+#define M4AMRR_WB_MAX_BIT_RATE    23850
+
+/**
+ ******************************************************************************
+ * AMR reader context ID
+ ******************************************************************************
+*/
+#define M4AMRR_CONTEXTID    0x414d5252
+
+/**
+ ******************************************************************************
+ * An AMR frame is 20ms
+ ******************************************************************************
+*/
+#define M4AMRR_FRAME_LENGTH     20
+
+/**
+ ******************************************************************************
+ * For the seek, the file is splitted in 40 segments for faster search
+ ******************************************************************************
+*/
+#define    M4AMRR_NUM_SEEK_ENTRIES 40
+
+#define M4AMRR_NB_SAMPLE_FREQUENCY 8000        /**< Narrow band sampling rate */
+#define M4AMRR_WB_SAMPLE_FREQUENCY 16000    /**< Wide band sampling rate */
+
+/**
+ ******************************************************************************
+ * AMR reader version numbers
+ ******************************************************************************
+*/
+/* CHANGE_VERSION_HERE */
+#define M4AMRR_VERSION_MAJOR 1
+#define M4AMRR_VERSION_MINOR 11
+#define M4AMRR_VERSION_REVISION 3
+
+/**
+ ******************************************************************************
+ * structure    M4_AMRR_Context
+ * @brief        Internal AMR reader context structure
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32             m_contextId ;      /* Fixed Id. to check for valid Context*/
+    M4OSA_FileReadPointer*   m_pOsaFilePtrFct;  /* File function pointer */
+    M4SYS_StreamDescription* m_pStreamHandler;  /* Stream Description */
+    M4OSA_UInt32*            m_pSeekIndex;      /* Seek Index Table */
+    M4OSA_UInt32             m_seekInterval;    /* Stores the seek Interval stored in the Index */
+    M4OSA_UInt32             m_maxAuSize;       /* Stores the max Au Size */
+    M4OSA_MemAddr32          m_pdataAddress;    /* Pointer to store AU data */
+    M4SYS_StreamType         m_streamType;      /* Stores the stream type AMR NB or WB */
+    M4OSA_Context            m_pAMRFile;        /* Data storage */
+    M4AMRR_State             m_status;          /* AMR Reader Status */
+    M4OSA_Int32              m_structSize;      /* size of structure*/
+} M4_AMRR_Context;
+
+/**
+ ******************************************************************************
+ * Parser internal functions, not usable from outside the reader context
+ ******************************************************************************
+*/
+M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType);
+M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType);
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+ * @brief    Internal function to the AMR Parser, returns the AU size of the Frame
+ * @note     This function takes the stream type and the frametype and returns the
+ *           frame lenght
+ * @param    frameType(IN)    : AMR frame type
+ * @param    streamType(IN)    : AMR stream type NB or WB
+ * @returns  The frame size based on the frame type.
+ ******************************************************************************
+ */
+M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+{
+    const M4OSA_UInt32    M4AMRR_NB_AUSIZE[]={13,14,16,18,20,21,27,32,6,6,6};
+    const M4OSA_UInt32    M4AMRR_WB_AUSIZE[]={18,24,33,37,41,47,51,59,61,6};
+
+    if ( streamType == M4SYS_kAMR )
+    {
+            return M4AMRR_NB_AUSIZE[frameType];
+    }
+    else /* M4SYS_kAMR_WB */
+    {
+            return M4AMRR_WB_AUSIZE[frameType];
+    }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+ * @brief    Internal function to the AMR Parser, returns the Bit rate of the Frame
+ * @note     This function takes the stream type and the frametype and returns the
+ *           bit rate for the given frame.
+ * @param    frameType(IN)    : AMR frame type
+ * @param    streamType(IN)    : AMR stream type NB or WB
+ * @returns  The frame's bit rate based on the frame type.
+ ******************************************************************************
+ */
+M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+{
+    const M4OSA_UInt32    M4AMRR_NB_BITRATE[]=
+        {4750,5150,5900,6700,7400,7950,10200,12200,12200,12200,12200};
+    const M4OSA_UInt32    M4AMRR_WB_BITRATE[]=
+        {6600,8850,12650,14250,15850,18250,19850,23050,23850,12200};
+
+    if ( streamType == M4SYS_kAMR )
+    {
+            return M4AMRR_NB_BITRATE[frameType];
+    }
+    else /* M4SYS_kAMR_WB */
+    {
+            return M4AMRR_WB_BITRATE[frameType];
+    }
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_openRead(M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+                        M4OSA_FileReadPointer* pFileFunction)
+/*********************************************************/
+{
+    M4_AMRR_Context*    pStreamContext;
+    M4OSA_FilePosition  filePos;
+
+    M4OSA_ERR err = M4ERR_FILE_NOT_FOUND ;
+    M4OSA_UInt32 size ;
+    M4OSA_UInt32 data ;
+    M4OSA_Char *M4_Token;
+    M4OSA_UInt32 *tokenPtr;
+
+    /* Header for AMR NB */
+    M4OSA_UInt32 M4_AMR_1       = 0x4d412123;
+    M4OSA_UInt32 M4_AMR_NB_2    = 0x00000a52;
+
+    /* Header for AMR WB */
+    M4OSA_UInt32 M4_AMR_WB_2    = 0x42572d52;
+    M4OSA_UInt32 M4_AMR_WB_3    = 0x0000000a;
+    *pContext = M4OSA_NULL ;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileDescriptor),M4ERR_PARAMETER,"File Desc. M4OSA_NULL");
+
+    M4_Token = (M4OSA_Char*)M4OSA_malloc(sizeof(M4OSA_MemAddr32)*3, M4AMR_READER,
+                 (M4OSA_Char *)("M4_Token"));
+    if(M4OSA_NULL == M4_Token)
+    {
+        M4OSA_DEBUG_IF3((M4OSA_NULL == M4_Token),M4ERR_ALLOC,"Mem Alloc failed - M4_Token");
+        return M4ERR_ALLOC ;
+    }
+
+    pStreamContext= (M4_AMRR_Context*)M4OSA_malloc(sizeof(M4_AMRR_Context), M4AMR_READER,
+                     (M4OSA_Char *)("pStreamContext"));
+    if(M4OSA_NULL == pStreamContext)
+    {
+        M4OSA_free((M4OSA_MemAddr32)M4_Token);
+        *pContext = M4OSA_NULL ;
+        return M4ERR_ALLOC ;
+    }
+
+    /* Initialize the context */
+    pStreamContext->m_contextId = M4AMRR_CONTEXTID;
+    pStreamContext->m_structSize=sizeof(M4_AMRR_Context);
+    pStreamContext->m_pOsaFilePtrFct=pFileFunction ;
+    pStreamContext->m_pStreamHandler = M4OSA_NULL ;
+    pStreamContext->m_pAMRFile = M4OSA_NULL ;
+    pStreamContext->m_status = M4AMRR_kOpening ;
+    pStreamContext->m_pSeekIndex = M4OSA_NULL ;
+    pStreamContext->m_seekInterval = 0;
+    pStreamContext->m_maxAuSize = 0 ;
+    pStreamContext->m_pdataAddress = M4OSA_NULL;
+    err=pStreamContext->m_pOsaFilePtrFct->openRead(&pStreamContext->m_pAMRFile,
+        (M4OSA_Char*)pFileDescriptor,M4OSA_kFileRead );
+    if ( err != M4NO_ERROR )
+    {
+        /* M4OSA_DEBUG_IF3((err != M4NO_ERROR),err,"File open failed"); */
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+        M4OSA_free((M4OSA_MemAddr32)M4_Token);

+        *pContext = M4OSA_NULL ;
+        return err ;
+    }
+
+    pStreamContext->m_status = M4AMRR_kOpening ;
+
+    size = 6;
+    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+                (M4OSA_MemAddr8)M4_Token, &size);
+    if(size != 6)
+    {
+        goto cleanup;
+    }
+
+    tokenPtr = (M4OSA_UInt32*)M4_Token ;
+    /* Check for the first 4 bytes of the header common to WB and NB*/
+    if (*tokenPtr != M4_AMR_1)
+    {
+        goto cleanup;
+    }
+
+    tokenPtr++;
+    data = *tokenPtr & 0x0000FFFF ;
+    /* Check if the next part is Narrow band header */
+    if (data!= M4_AMR_NB_2)
+    {
+        /* Stream is AMR Wide Band */
+        filePos = 4;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+        size = 5;
+        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+             (M4OSA_MemAddr8)M4_Token, &size);
+        if(size != 5)
+            goto cleanup;
+        tokenPtr=(M4OSA_UInt32*)M4_Token;
+        /* Check for the Wide band hader */
+        if(*tokenPtr!= M4_AMR_WB_2)
+            goto cleanup;
+        tokenPtr++;
+        data = *tokenPtr & 0x000000FF ;
+        if(data!= M4_AMR_WB_3)
+            goto cleanup;
+        pStreamContext->m_streamType = M4SYS_kAMR_WB ;
+    }
+    else
+    {
+        /* Stream is a Narrow band stream */
+        pStreamContext->m_streamType = M4SYS_kAMR ;
+    }
+    /*  No Profile level defined */
+    pStreamContext->m_status = M4AMRR_kOpened;
+
+    M4OSA_free((M4OSA_MemAddr32)M4_Token);
+    *pContext = pStreamContext ;
+    return M4NO_ERROR;
+
+cleanup:
+
+    if(M4OSA_NULL != pStreamContext->m_pAMRFile)
+    {
+        pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)M4_Token);
+    M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+
+    *pContext = M4OSA_NULL ;
+
+    return (M4OSA_ERR)M4ERR_AMR_NOT_COMPLIANT;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc )
+/*********************************************************/
+{
+    M4_AMRR_Context*    pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_Char            frameHeader, frameType ;
+    M4OSA_UInt32        size, auCount=0;
+    M4OSA_FilePosition  filePos;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamDesc),M4ERR_PARAMETER,"Stream Desc. M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+
+    if (M4OSA_NULL != pStreamContext->m_pStreamHandler)
+    {
+        return M4WAR_NO_MORE_STREAM ;
+    }
+
+    size = 1;
+    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+         (M4OSA_MemAddr8)&frameHeader, &size);
+
+    /* XFFF FXXX -> F is the Frame type */
+    frameType = ( frameHeader & 0x78 ) >> 3 ;
+
+    if ( frameType == 15 )
+    {
+        return M4WAR_NO_DATA_YET ;
+    }
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 11 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    /* Average bit rate is assigned the bitrate of the first frame */
+    pStreamDesc->averageBitrate = M4AMRR_getBitrate(frameType,pStreamContext->m_streamType);
+
+    filePos = -1;
+    pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekCurrent,
+         &filePos);
+
+    /* Initialize pStreamDesc */
+    pStreamDesc->profileLevel = 0xFF ;
+    pStreamDesc->decoderSpecificInfoSize = 0 ;
+    pStreamDesc->decoderSpecificInfo = M4OSA_NULL ;
+    pStreamDesc->maxBitrate = (pStreamContext->m_streamType ==
+        M4SYS_kAMR )?M4AMRR_NB_MAX_BIT_RATE:M4AMRR_WB_MAX_BIT_RATE;
+    pStreamDesc->profileLevel = 0xFF ;
+    pStreamDesc->streamID = 1;
+    pStreamDesc->streamType = pStreamContext->m_streamType;
+
+    /* Timescale equals Sampling Frequency: NB-8000 Hz, WB-16000 Hz */
+    pStreamDesc->timeScale = (pStreamContext->m_streamType == M4SYS_kAMR )?8000:16000;
+    M4OSA_TIME_SET_UNKNOWN(pStreamDesc->duration);
+
+    pStreamContext->m_pStreamHandler =
+         (M4SYS_StreamDescription*)M4OSA_malloc(sizeof(M4SYS_StreamDescription),
+             M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pStreamHandler"));
+    if(M4OSA_NULL == pStreamContext->m_pStreamHandler)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    /* Copy the Stream Desc. into the Context */
+    pStreamContext->m_pStreamHandler->averageBitrate = pStreamDesc->averageBitrate;
+    pStreamContext->m_pStreamHandler->decoderSpecificInfo = M4OSA_NULL ;
+    pStreamContext->m_pStreamHandler->decoderSpecificInfoSize = 0 ;
+    M4OSA_TIME_SET_UNKNOWN(pStreamContext->m_pStreamHandler->duration);
+    pStreamContext->m_pStreamHandler->profileLevel = 0xFF ;
+    pStreamContext->m_pStreamHandler->streamID = 1;
+    pStreamContext->m_pStreamHandler->streamType = pStreamDesc->streamType ;
+    pStreamContext->m_pStreamHandler->timeScale = pStreamDesc->timeScale ;
+
+    /* Count the number of Access Unit in the File to get the */
+    /* duration of the stream = 20 ms * number of access unit */
+    while(1)
+    {
+        size = 1;
+        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+             (M4OSA_MemAddr8)&frameHeader, &size);
+        if ( size == 0)
+            break ;
+        frameType = (frameHeader & 0x78) >> 3 ;
+        /* Get the frame size and skip so many bytes */
+        if(frameType != 15){
+            /* GLA 20050628 when frametype is >10 we read over a table */
+            if(frameType > 10)
+                continue ;
+
+            size = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+            if(size > pStreamContext->m_maxAuSize )
+            {
+                pStreamContext->m_maxAuSize = size ;
+            }
+            filePos = size-1;
+            pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+                 M4OSA_kFileSeekCurrent, &filePos);
+            auCount++;
+        }
+    }
+
+    /* Each Frame is 20 m Sec. */
+    pStreamContext->m_pStreamHandler->duration = auCount * M4AMRR_FRAME_LENGTH ;
+    pStreamDesc->duration = pStreamContext->m_pStreamHandler->duration ;
+
+    /* Put the file pointer back at the first Access unit */
+    if( pStreamContext->m_streamType == M4SYS_kAMR )
+    {
+        filePos = 6;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+    }
+    if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )
+    {
+        filePos = 9;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+    }
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs )
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_Int32 size = 0 ;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamIDs),M4ERR_PARAMETER,"Stream Ids. M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+
+    while( pStreamIDs[size] != 0 )
+    {
+        if( pStreamIDs[size++] != 1 )
+        {
+            return M4ERR_BAD_STREAM_ID ;
+        }
+    }
+
+    /* Allocate memory for data Address for use in NextAU() */
+    if(M4OSA_NULL == pStreamContext->m_pdataAddress)
+    {
+        size = pStreamContext->m_maxAuSize ;
+        /* dataAddress is owned by Parser, application should not delete or free it */
+        pStreamContext->m_pdataAddress =(M4OSA_MemAddr32)M4OSA_malloc(size + (4 - size % 4),
+            M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pdataAddress"));
+        if(M4OSA_NULL == pStreamContext->m_pdataAddress)
+        {
+                M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pdataAddress),M4ERR_ALLOC,
+                    "Mem Alloc failed - dataAddress");
+                return M4ERR_ALLOC;
+        }
+    }
+
+    /* Set the state of context to Reading */
+    pStreamContext->m_status = M4AMRR_kReading ;
+
+    return M4NO_ERROR ;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_Char        frameHeader ;
+    M4OSA_Char        frameType ;
+    M4OSA_Int32        auSize;
+    M4OSA_UInt32    size ;
+    M4OSA_FilePosition  filePos;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading), M4ERR_STATE, "Invalid State");
+
+    if ( StreamID != 1 )
+    {
+            return M4ERR_BAD_STREAM_ID;
+    }
+
+    /* Read the frame header byte */
+    size = pStreamContext->m_maxAuSize;
+    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+         (M4OSA_MemAddr8)pStreamContext->m_pdataAddress, &size);
+    if(size != pStreamContext->m_maxAuSize)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    frameHeader = ((M4OSA_MemAddr8)pStreamContext->m_pdataAddress)[0];
+
+    frameType = ( frameHeader & 0x78 ) >> 3 ;
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR ) &&
+        ( frameType > 11 ) && ( frameType != 15 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) &&
+        ( frameType > 9 ) && ( frameType != 15 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    /* Get the frame size */
+    if(frameType == 15)
+    {
+        auSize = 1;
+    }
+    else
+    {
+        auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+    }
+
+    size -= auSize ;
+    if(size != 0)
+    {
+        filePos = -((M4OSA_FilePosition)size);
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekCurrent, &filePos);
+    }
+
+    pAu->size = auSize ;
+
+    /* even when frameType == 15 (no data frame), ARM core decoder outputs full PCM buffer */
+    /*if(frameType == 15 )
+    {
+        pAu->CTS += 0;
+    }*/
+    /*else*/
+    {
+        pAu->CTS += M4AMRR_FRAME_LENGTH ;
+    }
+
+
+    pAu->DTS = pAu->CTS ;
+    pAu->attribute = M4SYS_kFragAttrOk;
+
+    pAu->stream = pStreamContext->m_pStreamHandler;
+    pAu->dataAddress = pStreamContext->m_pdataAddress ;
+
+    if(frameHeader & 0x80)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    /* Change the state to implement NextAu->freeAu->NextAu FSM */
+    pStreamContext->m_status = M4AMRR_kReading_nextAU ;
+
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading_nextAU), M4ERR_STATE,
+         "Invalid State");
+
+    if (( StreamID != 1 ) && ( StreamID != 0))
+    {
+            return M4ERR_BAD_STREAM_ID;
+    }
+
+    /* Change the state to Reading so as to allow access to next AU */
+    pStreamContext->m_status = M4AMRR_kReading ;
+
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+                         M4SYS_SeekAccessMode seekMode, M4OSA_Time* pObtainCTS)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_UInt32 count, prevAU, nextAU ;
+    M4OSA_UInt32 size ;
+    M4OSA_UInt32 auSize ;
+    M4OSA_UInt32 position, partSeekTime;
+    M4OSA_UInt32 auCount = 0, skipAuCount = 0 ;
+    M4OSA_Char    frameHeader ;
+    M4OSA_Char    frameType ;
+    M4OSA_FilePosition  filePos;
+    M4OSA_Double time_double;
+
+    /*Make explicit time cast, but take care that timescale is not used !!!*/
+    M4OSA_TIME_TO_MS(time_double, time, 1000);
+
+    M4OSA_INT64_FROM_INT32(*pObtainCTS, 0);
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading) && \
+        ( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+    M4OSA_DEBUG_IF1((time_double < 0),M4ERR_PARAMETER,"negative time");
+
+    /* Coming to seek for the first time, need to build the seekIndex Table */
+    if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
+    {
+        M4OSA_Double duration_double;
+
+        count = 0 ;
+        pStreamContext->m_pSeekIndex =
+             (M4OSA_UInt32*)M4OSA_malloc(M4AMRR_NUM_SEEK_ENTRIES * sizeof(M4OSA_UInt32),
+                 M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pSeekIndex"));
+
+        if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
+        {
+            M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pSeekIndex),M4ERR_ALLOC,
+                "Mem Alloc Failed - SeekIndex");
+            return M4ERR_ALLOC ;
+        }
+
+        /* point to the first AU */
+        if( pStreamContext->m_streamType == M4SYS_kAMR )
+        {
+            filePos = 6;
+        }
+        else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
+        {
+            filePos = 9;
+        }
+
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+
+        /* Set the postion to begining of first AU */
+        position = (pStreamContext->m_streamType != M4SYS_kAMR)?9:6;
+
+        /*Make explicit time cast, but take care that timescale is not used !!!*/
+        M4OSA_TIME_TO_MS(duration_double, pStreamContext->m_pStreamHandler->duration, 1000);
+
+        /* Calculate the seek Interval duration based on total dutation */
+        /* Interval = (duration / ENTRIES) in multiples of AU frame length */
+        pStreamContext->m_seekInterval =
+             (M4OSA_UInt32)(duration_double / M4AMRR_NUM_SEEK_ENTRIES) ;
+        pStreamContext->m_seekInterval /= M4AMRR_FRAME_LENGTH ;
+        pStreamContext->m_seekInterval *= M4AMRR_FRAME_LENGTH ;
+        skipAuCount = pStreamContext->m_seekInterval / M4AMRR_FRAME_LENGTH ;
+
+        pStreamContext->m_pSeekIndex[count++]=position;
+        while(count < M4AMRR_NUM_SEEK_ENTRIES )
+        {
+            size = 1;
+            pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+                 (M4OSA_MemAddr8)&frameHeader, &size);
+            if ( size == 0)
+            {
+                break ;
+            }
+            frameType = (frameHeader & 0x78) >> 3 ;
+            if(frameType != 15)
+            {
+                /**< bugfix Ronan Cousyn 05/04/2006: In the core reader AMR, the
+                 * function M4AMRR_seek doesn't check the frameType */
+                if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 10 ))
+                {
+                    return M4ERR_AMR_INVALID_FRAME_TYPE;
+                }
+                if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
+                {
+                    return M4ERR_AMR_INVALID_FRAME_TYPE;
+                }
+                auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+                position += auSize ;
+                filePos = auSize-1;
+                pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+                     M4OSA_kFileSeekCurrent, &filePos);
+                auCount++;
+            }
+            else
+            {
+                position ++;
+            }
+            /* Skip the number of AU's as per interval and store in the Index table */
+            if ( (skipAuCount != 0) && !(auCount % skipAuCount))
+            {
+                pStreamContext->m_pSeekIndex[count++] = position;
+            }
+        }
+    }/* End of Building the seek table */
+
+    /* Use the seek table to seek the required time in the stream */
+
+    /* If we are seeking the begining of the file point to first AU */
+    if ( seekMode == M4SYS_kBeginning )
+    {
+        if( pStreamContext->m_streamType == M4SYS_kAMR )
+        {
+            filePos = 6;
+        }
+        else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
+        {
+            filePos = 9;
+        }
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos );
+        return M4NO_ERROR ;
+    }
+
+    /* Get the Nearest Second */
+    if (0 != pStreamContext->m_seekInterval)
+    {
+        position = (M4OSA_UInt32)(time_double / pStreamContext->m_seekInterval);
+    }
+    else
+    {
+        /*avoid division by 0*/
+        position = 0;
+    }
+
+    /* We have only 40 seek Index. */
+    position=(position >= M4AMRR_NUM_SEEK_ENTRIES)?M4AMRR_NUM_SEEK_ENTRIES-1:position;
+
+    /* SeekIndex will point to nearest Au, we need to search for the
+    required time form that position */
+    partSeekTime = (M4OSA_UInt32)time_double - position * pStreamContext->m_seekInterval;
+
+    position = pStreamContext->m_pSeekIndex[position];
+
+    if(!position)
+    {
+        return M4WAR_INVALID_TIME ;
+    }
+
+    /* point the file pointer to nearest AU */
+    filePos = position;
+    pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekBeginning,
+         &filePos );
+
+    if ( partSeekTime == 0)
+    {
+        M4OSA_TIME_SET(*pObtainCTS, time);
+        return M4NO_ERROR;
+    }
+
+    M4OSA_INT64_FROM_DOUBLE(*pObtainCTS, (time_double - (M4OSA_Double)partSeekTime)) ;
+
+    switch(seekMode)
+    {
+        /* Get the AU before the target time */
+        case M4SYS_kPreviousRAP:
+        case M4SYS_kNoRAPprevious:
+            position = partSeekTime / M4AMRR_FRAME_LENGTH ;
+            if ( !(partSeekTime % M4AMRR_FRAME_LENGTH) )
+            {
+                position -- ;
+            }
+        break;
+        /* Get the Closest AU following the target time */
+        case M4SYS_kNextRAP:
+        case M4SYS_kNoRAPnext:
+            position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
+        break;
+        /*  Get the closest AU to target time */
+        case M4SYS_kClosestRAP:
+        case M4SYS_kNoRAPclosest:
+            prevAU = partSeekTime-(partSeekTime/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH;
+            nextAU =
+                 ((partSeekTime+M4AMRR_FRAME_LENGTH)/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH -\
+                     partSeekTime ;
+            if(prevAU < nextAU)
+            {
+                position = partSeekTime / M4AMRR_FRAME_LENGTH ;
+            }
+            else
+            {
+                position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
+            }
+        break;
+        case M4SYS_kBeginning:
+        break;
+    }
+
+    count = 0 ;
+    /* Skip the Access unit in the stream to skip the part seek time,
+       to reach the required target time */
+    while(count < position )
+    {
+        size = 1;
+        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+             (M4OSA_MemAddr8)&frameHeader, &size);
+        if ( size == 0)
+        {
+            /* If the target time is invalid, point to begining and return */
+            M4OSA_INT64_FROM_INT32(*pObtainCTS, 0);
+            filePos = pStreamContext->m_pSeekIndex[0];
+            pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+                 M4OSA_kFileSeekBeginning, &filePos);
+            return M4WAR_INVALID_TIME ;
+        }
+        *pObtainCTS += M4AMRR_FRAME_LENGTH; /*Should use M4OSA_INT64_ADD !!*/
+        count++;
+        frameType = (frameHeader & 0x78) >> 3 ;
+        if(frameType == 15)
+        {
+            auSize = 1 ;
+        }
+        else
+        {
+            auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+        }
+
+        filePos = auSize-1;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekCurrent, &filePos);
+    }
+
+    return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+
+    /* Close the AMR stream */
+    pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
+
+    pStreamContext->m_status=M4AMRR_kClosed ;
+
+    /* Check if AU data Address is allocated memory and free it */
+    if(M4OSA_NULL != pStreamContext->m_pdataAddress)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pdataAddress);
+    }
+
+    /* Check if the stream handler is allocated memory */
+    if(M4OSA_NULL != pStreamContext->m_pStreamHandler)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pStreamHandler);
+    }
+
+    /* Seek table is created only when seek is used, so check if memory is allocated */
+    if(M4OSA_NULL != pStreamContext->m_pSeekIndex)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pSeekIndex);
+    }
+
+    /* Free the context */
+    M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+
+    if (( streamId != 1 ) && ( streamId != 0))
+    {
+            return M4ERR_BAD_STREAM_ID;
+    }
+
+    *pState = pStreamContext->m_status ;
+
+    return M4NO_ERROR ;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getVersion    (M4_VersionInfo *pVersion)
+/*********************************************************/
+{
+    M4OSA_TRACE1_1("M4AMRR_getVersion called with pVersion: 0x%x\n", pVersion);
+    M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
+         "pVersion is NULL in M4AMRR_getVersion");
+
+    pVersion->m_major = M4AMRR_VERSION_MAJOR;
+    pVersion->m_minor = M4AMRR_VERSION_MINOR;
+    pVersion->m_revision = M4AMRR_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getmaxAUsize(M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+
+    /**
+     * Check input parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == Context),  M4ERR_PARAMETER,
+                "M4AMRR_getmaxAUsize: Context is M4OSA_NULL");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pMaxAuSize),M4ERR_PARAMETER,
+                "M4AMRR_getmaxAUsize: pMaxAuSize is M4OSA_NULL");
+
+    *pMaxAuSize = pStreamContext->m_maxAuSize;
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4ChannelCoverter.c b/libvideoeditor/vss/src/M4ChannelCoverter.c
new file mode 100755
index 0000000..5d89820
--- /dev/null
+++ b/libvideoeditor/vss/src/M4ChannelCoverter.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4ChannelCoverter.c
+ * @brief
+ * @note
+ ******************************************************************************
+ */
+
+void MonoTo2I_16( const short *src,
+                        short *dst,
+                        short n)
+{
+    short ii;
+    src += n-1;
+    dst += (n*2)-1;
+
+    for (ii = n; ii != 0; ii--){
+        *dst-- = *src;
+        *dst-- = *src--;
+    }
+
+    return;
+}
+
+void From2iToMono_16( const short *src,
+                            short *dst,
+                            short n)
+{
+    short ii;
+    long Temp;
+    for (ii = n; ii != 0; ii--){
+        Temp = (long)*(src++);
+        Temp += (long)*(src++);
+        *(dst++) = (short)(Temp >>1);
+    }
+
+    return;
+}
+
diff --git a/libvideoeditor/vss/src/M4PCMR_CoreReader.c b/libvideoeditor/vss/src/M4PCMR_CoreReader.c
new file mode 100755
index 0000000..15fd9c8
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PCMR_CoreReader.c
@@ -0,0 +1,717 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file    M4PCM_PCMReader.c
+ * @brief   PCM reader implementation
+ * @note    This file implements functions of the PCM reader
+ ************************************************************************
+ */
+#include "M4OSA_CharStar.h"
+#include "M4PCMR_CoreReader.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CharStar.h"
+/**
+ ******************************************************************************
+ * PCM reader version numbers
+ ******************************************************************************
+ */
+/* CHANGE_VERSION_HERE */
+#define M4PCMR_VERSION_MAJOR 1
+#define M4PCMR_VERSION_MINOR 0
+#define M4PCMR_VERSION_REVISION 0
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+ *                             M4OSA_FileReaderPointer* pFileFunction)
+ * @brief   This function opens a PCM file
+ * @note    This function :
+ *          - opens a PCM file
+ *          - initializes PCM context,
+ *          - verifies PCM file format
+ *          - Fill decoder config structure
+ *          - Changes state of the reader in 'Opening'
+ * @param   pContext: (OUT) Pointer on the PCM Reader context
+ * @param   pUrl: (IN) Name of the PCM file
+ * @param   pFileFunctions: (IN) Pointer on the file access functions
+ * @return  M4NO_ERROR                      there is no error during the opening
+ * @return  M4ERR_PARAMETER                 pContext and/or pUrl and/or pFileFunction is NULL
+ * @return  M4ERR_ALLOC                     there is no more memory available
+ * @return  M4ERR_FILE_NOT_FOUND            the file cannot be found
+ * @return  M4PCMC_ERR_PCM_NOT_COMPLIANT    the file does not seem to be compliant, no RIFF,
+ *                                             or lack of any mandatory chunk.
+ * @return  M4PCMC_ERR_PCM_NOT_SUPPORTED    the PCM format of this file is not supported by the
+ *                                           reader
+ * @return  Any M4OSA_FILE errors           see OSAL File specification for detailed errors
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+                             M4OSA_FileReadPointer* pFileFunction)
+{
+    M4OSA_ERR       err;
+    M4PCMR_Context *context;
+    M4OSA_Char*        pTempURL;
+    M4OSA_Char        value[6];
+
+    /* Check parameters */
+    if((M4OSA_NULL == pContext)|| (M4OSA_NULL == pUrl) ||(M4OSA_NULL == pFileFunction))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Allocates the context */
+    context = M4OSA_NULL;
+    context = (M4PCMR_Context *)M4OSA_malloc(sizeof(M4PCMR_Context), M4WAV_READER,
+         (M4OSA_Char *)"M4PCMR_openRead");
+    if (M4OSA_NULL == context)
+    {
+        return M4ERR_ALLOC;
+    }
+    *pContext = (M4OSA_Context)context;
+
+    /* Initialize the context */
+    context->m_offset = 0;
+
+    context->m_state            = M4PCMR_kInit;
+    context->m_microState       = M4PCMR_kInit;
+    context->m_pFileReadFunc    = M4OSA_NULL;
+    context->m_fileContext      = M4OSA_NULL;
+    context->m_pAuBuffer        = M4OSA_NULL;
+    context->m_pDecoderSpecInfo = M4OSA_NULL;
+
+    /* Set sample frequency */
+    pTempURL = (M4OSA_Char*)pUrl + (M4OSA_chrLength((M4OSA_Char*)pUrl)-11);
+    M4OSA_chrNCopy(value, pTempURL, 5);
+    M4OSA_chrGetUInt32(pTempURL, &(context->m_decoderConfig.SampleFrequency),
+         M4OSA_NULL, M4OSA_kchrDec);
+
+    /* Set number of channels */
+    pTempURL += 6;
+    M4OSA_chrNCopy(value, pTempURL, 1);
+    M4OSA_chrGetUInt16(pTempURL, &(context->m_decoderConfig.nbChannels),
+         M4OSA_NULL, M4OSA_kchrDec);
+
+    M4OSA_chrNCopy(pUrl,pUrl, (M4OSA_chrLength((M4OSA_Char*)pUrl)-12));
+    /* Open the file */
+    context->m_fileContext = M4OSA_NULL;
+    err = pFileFunction->openRead(&(context->m_fileContext), pUrl, M4OSA_kFileRead);
+    if(M4NO_ERROR != err)
+    {
+        return err;
+    }
+    context->m_decoderConfig.BitsPerSample = 16;
+    context->m_decoderConfig.AvgBytesPerSec = context->m_decoderConfig.SampleFrequency * 2 \
+        * context->m_decoderConfig.nbChannels;
+    err = pFileFunction->getOption(context->m_fileContext, M4OSA_kFileReadGetFileSize,
+         (M4OSA_DataOption*)&(context->m_decoderConfig.DataLength));
+    if(M4NO_ERROR != err)
+    {
+        return err;
+    }
+    context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels;  // Raw PCM.  Hence, get a
+                                                                        // chunk of data
+
+    if(context->m_decoderConfig.SampleFrequency == 8000)
+    {
+        /* AMR case, no pb */
+        context->m_blockSize = context->m_decoderConfig.nbChannels *\
+             (context->m_decoderConfig.SampleFrequency / 50) * \
+                (context->m_decoderConfig.BitsPerSample / 8);
+    }
+    if(context->m_decoderConfig.SampleFrequency == 16000)
+    {
+        /* AAC case, we can't read only 20 ms blocks */
+        context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels;
+    }
+    context->m_dataStartOffset = 0;
+    context->m_pFileReadFunc = pFileFunction;
+
+    context->m_pAuBuffer = (M4OSA_MemAddr32)M4OSA_malloc(context->m_blockSize, M4WAV_READER,
+         (M4OSA_Char *)"Core PCM reader Access Unit");
+    if (M4OSA_NULL == context->m_pAuBuffer)
+    {
+        err = M4ERR_ALLOC;
+        goto cleanup;
+    }
+
+    /* Change state */
+    context->m_state = M4PCMR_kOpening;
+
+    return M4NO_ERROR;
+
+cleanup:
+
+    /* Close the file */
+    if(context->m_pFileReadFunc != M4OSA_NULL)
+        context->m_pFileReadFunc->closeRead(context->m_fileContext);
+
+    /* Free internal context */
+    M4OSA_free((M4OSA_MemAddr32)context);
+    *pContext = M4OSA_NULL;
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
+ * @brief   This function get the (unique) stream of a PCM file
+ * @note    This function :
+ *          - Allocates and fills the decoder specific info structure
+ *          - Fills decoder specific infos structure
+ *          - Fills pStreamDesc structure allocated by the caller
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   pStreamDesc: (IN) Stream Description context
+ * @return  M4NO_ERROR          there is no error
+ * @return  M4ERR_PARAMETER     at least one parameter is NULL
+ * @return  M4ERR_ALLOC         there is no more memory available
+ * @return  M4ERR_STATE         this function cannot be called now
+ * @return  Any M4OSA_FILE      errors see OSAL File specification for detailed errors
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context)|| (M4OSA_NULL == pStreamDesc))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    if (c->m_state == M4PCMR_kOpening_streamRetrieved)
+    {
+        return M4WAR_NO_MORE_STREAM;
+    }
+    /* Check Reader's m_state */
+    if(c->m_state != M4PCMR_kOpening)
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Only one stream is contained in PCM file */
+    pStreamDesc->streamID = 1;
+    /* Not used */
+    pStreamDesc->profileLevel = 0;
+    pStreamDesc->decoderSpecificInfoSize = sizeof(M4PCMC_DecoderSpecificInfo);
+
+    /* Allocates decoder specific info structure */
+    pStreamDesc->decoderSpecificInfo = M4OSA_NULL;
+    pStreamDesc->decoderSpecificInfo =
+        (M4OSA_MemAddr32)M4OSA_malloc( sizeof(M4PCMC_DecoderSpecificInfo), M4WAV_READER,
+             (M4OSA_Char *)"M4PCMR_getNextStream");
+    if(pStreamDesc->decoderSpecificInfo == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+    /* Fill decoderSpecificInfo structure, with decoder config structure filled in 'openread'
+         function */
+    M4OSA_memcpy((M4OSA_MemAddr8)pStreamDesc->decoderSpecificInfo,
+         (M4OSA_MemAddr8)&c->m_decoderConfig, sizeof(M4PCMC_DecoderSpecificInfo));
+
+    /* Fill other fields of pStreamDesc structure */
+    pStreamDesc->timeScale = 1000;
+    pStreamDesc->duration = (M4OSA_Time)(((M4OSA_Double)(c->m_decoderConfig.DataLength)\
+         / (M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec))*pStreamDesc->timeScale);
+    pStreamDesc->averageBitrate = c->m_decoderConfig.AvgBytesPerSec * 8;/* in bits, multiply by 8*/
+    pStreamDesc->maxBitrate = pStreamDesc->averageBitrate; /* PCM stream has constant bitrate */
+
+    /* Determines Stream type */
+    switch(c->m_decoderConfig.BitsPerSample)
+    {
+        case 8:
+            switch(c->m_decoderConfig.nbChannels)
+            {
+                case 1:
+                    pStreamDesc->streamType = M4SYS_kPCM_8bitsU;
+                    break;
+//                case 2:
+//                    pStreamDesc->streamType = M4SYS_kPCM_8bitsS; /* ??? 8bits stereo not
+                                                                  //   defined ? */
+//                    break;
+                default:
+                    pStreamDesc->streamType = M4SYS_kAudioUnknown;
+            }
+            break;
+
+        case 16:
+            switch(c->m_decoderConfig.nbChannels)
+            {
+                case 1:
+                    pStreamDesc->streamType = M4SYS_kPCM_16bitsU;
+                    break;
+                case 2:
+                    pStreamDesc->streamType = M4SYS_kPCM_16bitsS;
+                    break;
+                default:
+                    pStreamDesc->streamType = M4SYS_kAudioUnknown;
+            }
+            break;
+
+        default:
+            pStreamDesc->streamType = M4SYS_kAudioUnknown;
+    }
+
+    c->m_pDecoderSpecInfo = pStreamDesc->decoderSpecificInfo;
+
+    c->m_state = M4PCMR_kOpening_streamRetrieved;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
+ * @brief   This function starts reading the unique stream of a PCM file
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to start reading a stream
+ *          - Check that provided StreamId is correct (always true, only one stream...)
+ *            In the player application, a StreamId table is initialized as follow:
+ *              M4SYS_StreamID pStreamID[2]={1,0};
+ *          - Change state of the reader in 'Reading'
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   streamID: (IN) Stream selection
+ * @return  M4NO_ERROR          there is no error
+ * @return  M4ERR_PARAMETER     at least one parameter is NULL
+ * @return  M4ERR_STATE         this function cannot be called now
+ * @return  M4ERR_BAD_STREAM_ID at least one of the streamID does not exist
+ *          (should never happen if table pStreamID is correctly initialized as above)
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamIDs))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kOpening_streamRetrieved)
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Check pStreamID and if they're OK, change reader's state */
+    if(pStreamIDs[0] == 1 || pStreamIDs[0] == 0)
+    /* First and unique stream contained in PCM file */
+    {
+        c->m_state = M4PCMR_kReading;
+        c->m_microState = M4PCMR_kReading;
+    }
+    else
+    {
+        return M4ERR_BAD_STREAM_ID;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+ * @brief   This function reads the next AU contained in the PCM file
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to read an AU
+ *          - Allocates memory to store read AU
+ *          - Read data from file and store them into previously allocated memory
+ *          - Fill AU structure fileds (CTS...)
+ *          - Change state of the reader in 'Reading' (not useful...)
+ *          - Change Micro state 'Reading' in M4PCMR_kReading_nextAU
+ *            (AU is read and can be deleted)
+ *          - Check if the last AU has been read or if we're about to read it
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   streamID: (IN) Stream selection
+ * @param   pAU: (IN/OUT) Acces Unit Structure
+ * @return  M4NO_ERROR          there is no error
+ * @return  M4ERR_PARAMETER     at least one parameter is NULL
+ * @return  M4ERR_ALLOC         there is no more memory available
+ * @return  M4ERR_STATE         this function cannot be called now
+ * @return  M4M4WAR_NO_DATA_YET there is no enough data in the file to provide a new access unit.
+ * @return  M4WAR_END_OF_STREAM There is no more access unit in the stream,
+ *                              or the sample number is bigger the maximum one.
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 size_read;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context) || (M4OSA_NULL == pAU))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading)
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Allocates AU dataAdress */
+    pAU->dataAddress = c->m_pAuBuffer;
+    size_read        = c->m_blockSize;
+
+    if((c->m_offset + size_read) >= c->m_decoderConfig.DataLength)
+    {
+        size_read = c->m_decoderConfig.DataLength - c->m_offset;
+    }
+
+    /* Read data in file, and copy it to AU Structure */
+    err = c->m_pFileReadFunc->readData(c->m_fileContext, (M4OSA_MemAddr8)pAU->dataAddress,
+         (M4OSA_UInt32 *)&size_read);
+    if(M4NO_ERROR != err)
+    {
+        return err;
+    }
+
+    /* Calculates the new m_offset, used to determine whether we're at end of reading or not */
+    c->m_offset = c->m_offset + size_read;
+
+    /* Fill others parameters of AU structure */
+    pAU->CTS =
+         (M4OSA_Time)(((M4OSA_Double)c->m_offset/(M4OSA_Double)c->m_decoderConfig.AvgBytesPerSec)\
+            *1000);
+    pAU->DTS = pAU->CTS;
+
+    pAU->attribute  = 0;
+    pAU->frag       = M4OSA_NULL;
+    pAU->nbFrag     = 0;
+    pAU->stream     = M4OSA_NULL;
+    pAU->size       = size_read;
+
+    /* Change states */
+    c->m_state = M4PCMR_kReading; /* Not changed ... */
+    c->m_microState = M4PCMR_kReading_nextAU; /* AU is read and can be deleted */
+
+    /* Check if there is another AU to read */
+    /* ie: if decoded nb of bytes = nb of bytes to decode,
+         it means there is no more AU to decode */
+    if(c->m_offset >= c->m_decoderConfig.DataLength)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+ * @brief   This function frees the AU provided in parameter
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to free an AU
+ *          - Free dataAddress field of AU structure
+ *          - Change state of the reader in 'Reading' (not useful...)
+ *          - Change Micro state 'Reading' in M4PCMR_kReading (another AU can be read)
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   streamID: (IN) Stream selection
+ * @param   pAU: (IN) Acces Unit Structure
+ * @return  M4NO_ERROR  there is no error
+ * @return  M4ERR_PARAMETER at least one parameter is NULL
+ * @return  M4ERR_STATE this function cannot be called now
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context ) || (M4OSA_NULL == pAU))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading_nextAU)
+    {
+        return M4ERR_STATE;
+    }
+
+    pAU->dataAddress = M4OSA_NULL;
+
+    /* Change states */
+    c->m_state = M4PCMR_kReading; /* Not changed ... */
+    c->m_microState = M4PCMR_kReading; /* AU is deleted, another AU can be read */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID,
+                         M4OSA_Time time, M4SYS_seekAccessMode seekAccessMode,
+                         M4OSA_Time* pObtainCTS[])
+ * @brief   This function seeks into the PCM file at the provided time
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to seek
+ *          - Determines from provided time m_offset to seek in file
+ *          - If m_offset is correct, seek in file
+ *          - Update new m_offset in PCM reader context
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   pStreamID: (IN) Stream selection (not used, only 1 stream)
+ * @param   time: (IN) Targeted time
+ * @param   seekMode: (IN) Selects the seek access mode
+ * @param   pObtainCTS[]: (OUT) Returned Time (not used)
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL
+ * @return  M4ERR_ALLOC             there is no more memory available
+ * @return  M4ERR_STATE             this function cannot be called now
+ * @return  M4WAR_INVALID_TIME      Specified time is not reachable
+ * @param   M4ERR_NOT_IMPLEMENTED   This seek mode is not implemented yet
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+                      M4SYS_SeekAccessMode seekAccessMode, M4OSA_Time* pObtainCTS)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 offset;
+    M4OSA_UInt32 alignment;
+    M4OSA_UInt32 size_read;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamID))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kOpening_streamRetrieved && c->m_state != M4PCMR_kReading)
+    {
+        return M4ERR_STATE;
+    }
+
+    switch(seekAccessMode)
+    {
+        case M4SYS_kBeginning:
+            /* Determine m_offset from time*/
+            offset =
+                (M4OSA_UInt32)(time * ((M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec) / 1000));
+            /** check the alignment on sample boundary */
+            alignment = c->m_decoderConfig.nbChannels*c->m_decoderConfig.BitsPerSample/8;
+            if (offset%alignment != 0)
+            {
+                offset -= offset%alignment;
+            }
+            /*add the header offset*/
+            offset += c->m_dataStartOffset;
+            /* If m_offset is over file size -> Invalid time */
+            if (offset > (c->m_dataStartOffset + c->m_decoderConfig.DataLength))
+            {
+                return M4WAR_INVALID_TIME;
+            }
+            else
+            {
+                /* Seek file */
+                size_read = offset;
+                err = c->m_pFileReadFunc->seek(c->m_fileContext, M4OSA_kFileSeekBeginning,
+                    (M4OSA_FilePosition *) &size_read);
+                if(M4NO_ERROR != err)
+                {
+                    return err;
+                }
+                /* Update m_offset in M4PCMR_context */
+                c->m_offset = offset - c->m_dataStartOffset;
+            }
+            break;
+
+        default:
+            return M4ERR_NOT_IMPLEMENTED;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
+ * @brief   This function closes PCM file, and frees context
+ * @note    This function :
+ *          - Verifies that the current reader's state allows close the PCM file
+ *          - Closes the file
+ *          - Free structures
+ * @param   context: (IN/OUT) PCM Reader context
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL
+ * @return  M4ERR_STATE             this function cannot be called now
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check parameters */
+    if(M4OSA_NULL == context)
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    if(c->m_pDecoderSpecInfo != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)c->m_pDecoderSpecInfo);
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kReading)
+    {
+        return M4ERR_STATE;
+    }
+    else if(c->m_microState == M4PCMR_kReading_nextAU)
+    {
+        return M4ERR_STATE;
+    }
+
+    if (M4OSA_NULL != c->m_pAuBuffer)
+    {
+        M4OSA_free((M4OSA_MemAddr32)c->m_pAuBuffer);
+    }
+
+    /* Close the file */
+    if (M4OSA_NULL != c->m_pFileReadFunc)
+    {
+        err = c->m_pFileReadFunc->closeRead(c->m_fileContext);
+    }
+
+    /* Free internal context */
+    if (M4OSA_NULL != c)
+    {
+        M4OSA_free((M4OSA_MemAddr32)c);
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ *                                M4OSA_DataOption* pValue)
+ * @brief   This function get option of the PCM Reader
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to get an option
+ *          - Return corresponding option value
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   optionID: (IN) ID of the option to get
+ * @param   pValue: (OUT) Variable where the option value is returned
+ * @return  M4NO_ERROR              there is no error.
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL.
+ * @return  M4ERR_BAD_OPTION_ID     the optionID is not a valid one.
+ * @return  M4ERR_STATE             this option is not available now.
+ * @return  M4ERR_NOT_IMPLEMENTED   this option is not implemented
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+                             M4OSA_DataOption* pValue)
+{
+    M4PCMR_Context *c =(M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if(M4OSA_NULL == context)
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check reader's state */
+    if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
+         && (c->m_state != M4PCMR_kReading))
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Depend of the OptionID, the value to return is different */
+    switch(optionID)
+    {
+        case M4PCMR_kPCMblockSize:
+            *pValue = &c->m_blockSize;
+            break;
+
+        default:
+            return M4ERR_BAD_OPTION_ID;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ *                                 M4OSA_DataOption Value)
+ * @brief   This function set option of the PCM Reader
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to set an option
+ *          - Set corresponding option value
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   optionID: (IN) ID of the option to get
+ * @param   Value: (IN) Variable where the option value is stored
+ * @return  M4NO_ERROR              there is no error.
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL.
+ * @return  M4ERR_BAD_OPTION_ID     the optionID is not a valid one.
+ * @return  M4ERR_STATE             this option is not available now.
+ * @return  M4ERR_NOT_IMPLEMENTED   this option is not implemented
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID, M4OSA_DataOption Value)
+{
+    M4PCMR_Context *c =(M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if(context == M4OSA_NULL)
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check reader's state */
+    if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
+         && (c->m_state != M4PCMR_kReading))
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Depend of the OptionID, the value to set is different */
+    switch(optionID)
+    {
+        case M4PCMR_kPCMblockSize:
+            c->m_blockSize = (M4OSA_UInt32)Value;
+            break;
+
+        default:
+            return M4ERR_BAD_OPTION_ID;
+    }
+
+    return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4PCMR_getVersion (M4_VersionInfo *pVersion)
+/*********************************************************/
+{
+    M4OSA_TRACE1_1("M4PCMR_getVersion called with pVersion: 0x%x", pVersion);
+    M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
+         "pVersion is NULL in M4PCMR_getVersion");
+
+    pVersion->m_major = M4PCMR_VERSION_MAJOR;
+    pVersion->m_minor = M4PCMR_VERSION_MINOR;
+    pVersion->m_revision = M4PCMR_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_API.c b/libvideoeditor/vss/src/M4PTO3GPP_API.c
new file mode 100755
index 0000000..5581cbd
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PTO3GPP_API.c
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4PTO3GPP_API.c
+ * @brief   Picture to 3gpp Service implementation.
+ * @note
+ ******************************************************************************
+*/
+
+/*16 bytes signature to be written in the generated 3gp files */
+#define M4PTO3GPP_SIGNATURE     "NXP-SW : PTO3GPP"
+
+/****************/
+/*** Includes ***/
+/****************/
+
+/**
+ *  Our header */
+#include "M4PTO3GPP_InternalTypes.h"
+#include "M4PTO3GPP_API.h"
+
+/**
+ *  Our errors */
+#include "M4PTO3GPP_ErrorCodes.h"
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+#include "VideoEditorVideoEncoder.h"
+#endif
+
+
+/**
+ *  OSAL headers */
+#include "M4OSA_Memory.h"       /* OSAL memory management */
+#include "M4OSA_Debug.h"        /* OSAL debug management */
+
+
+/************************/
+/*** Various Magicals ***/
+/************************/
+
+#define M4PTO3GPP_WRITER_AUDIO_STREAM_ID                1
+#define M4PTO3GPP_WRITER_VIDEO_STREAM_ID                2
+#define M4PTO3GPP_QUANTIZER_STEP                        4       /**< Quantizer step */
+#define M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL            0xFF    /**< No specific profile and
+                                                                     level */
+#define M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE           8000    /**< AMR */
+#define M4PTO3GPP_BITRATE_REGULATION_CTS_PERIOD_IN_MS   500     /**< MAGICAL */
+#define M4PTO3GPP_MARGE_OF_FILE_SIZE                    25000   /**< MAGICAL */
+/**
+ ******************************************************************************
+ * define   AMR 12.2 kbps silence frame
+ ******************************************************************************
+*/
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE     32
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_DURATION 20
+const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_122_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE]=
+{ 0x3C, 0x91, 0x17, 0x16, 0xBE, 0x66, 0x78, 0x00, 0x00, 0x01, 0xE7, 0xAF,
+  0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 20
+const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_048_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+{ 0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33, 0xFF, 0xE0, 0x00, 0x00, 0x00 };
+
+/***************************/
+/*** "Private" functions ***/
+/***************************/
+static M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
+
+/****************************/
+/*** "External" functions ***/
+/****************************/
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces(M4WRITER_OutputFileType* pType,
+                                            M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                            M4WRITER_DataInterface** SrcDataInterface);
+extern M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+                                            M4READER_GlobalInterface **pRdrGlobalInterface,
+                                            M4READER_DataInterface **pRdrDataInterface);
+extern M4OSA_ERR M4READER_3GP_getInterfaces(M4READER_MediaType *pMediaType,
+                                            M4READER_GlobalInterface **pRdrGlobalInterface,
+                                            M4READER_DataInterface **pRdrDataInterface);
+
+/****************************/
+/*** "Static" functions ***/
+/****************************/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(
+                                    M4WRITER_DataInterface* pWriterDataIntInterface,
+                                    M4WRITER_Context* pWriterContext,
+                                    M4SYS_AccessUnit* pWriterAudioAU,
+                                    M4OSA_Time mtIncCts);
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(
+                                   M4WRITER_DataInterface* pWriterDataIntInterface,
+                                   M4WRITER_Context* pWriterContext,
+                                   M4SYS_AccessUnit* pWriterAudioAU,
+                                   M4OSA_Time mtIncCts);
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+ * @brief   Get the M4PTO3GPP version.
+ * @note    Can be called anytime. Do not need any context.
+ * @param   pVersionInfo        (OUT) Pointer to a version info structure
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+*/
+
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo)
+/*********************************************************/
+{
+    M4OSA_TRACE3_1("M4PTO3GPP_GetVersion called with pVersionInfo=0x%x", pVersionInfo);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pVersionInfo),M4ERR_PARAMETER,
+            "M4PTO3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
+
+    pVersionInfo->m_major       = M4PTO3GPP_VERSION_MAJOR;
+    pVersionInfo->m_minor       = M4PTO3GPP_VERSION_MINOR;
+    pVersionInfo->m_revision    = M4PTO3GPP_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext);
+ * @brief   Initializes the M4PTO3GPP (allocates an execution context).
+ * @note
+ * @param   pContext            (OUT) Pointer on the M4PTO3GPP context to allocate
+ * @param   pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (If Debug Level >= 2)
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Init(   M4PTO3GPP_Context* pContext,
+                            M4OSA_FileReadPointer* pFileReadPtrFct,
+                            M4OSA_FileWriterPointer* pFileWritePtrFct)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext *pC;
+    M4OSA_UInt32 i;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Init called with pContext=0x%x", pContext);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+            "M4PTO3GPP_Init: pContext is M4OSA_NULL");
+
+    /**
+     *  Allocate the M4PTO3GPP context and return it to the user */
+    pC = (M4PTO3GPP_InternalContext*)M4OSA_malloc(sizeof(M4PTO3GPP_InternalContext), M4PTO3GPP,
+        (M4OSA_Char *)"M4PTO3GPP_InternalContext");
+    *pContext = pC;
+    if (M4OSA_NULL == pC)
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Step(): unable to allocate M4PTO3GPP_InternalContext,\
+                       returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**
+     *  Init the context. All pointers must be initialized to M4OSA_NULL because CleanUp()
+        can be called just after Init(). */
+    pC->m_State = M4PTO3GPP_kState_CREATED;
+    pC->m_VideoState = M4PTO3GPP_kStreamState_NOSTREAM;
+    pC->m_AudioState = M4PTO3GPP_kStreamState_NOSTREAM;
+
+    /**
+     *  Reader stuff */
+    pC->m_pReaderAudioAU        = M4OSA_NULL;
+    pC->m_pReaderAudioStream    = M4OSA_NULL;
+
+    /**
+     *  Writer stuff */
+    pC->m_pEncoderHeader        = M4OSA_NULL;
+    pC->m_pWriterVideoStream    = M4OSA_NULL;
+    pC->m_pWriterAudioStream    = M4OSA_NULL;
+    pC->m_pWriterVideoStreamInfo= M4OSA_NULL;
+    pC->m_pWriterAudioStreamInfo= M4OSA_NULL;
+
+    /**
+     *  Contexts of the used modules  */
+    pC->m_pAudioReaderContext    = M4OSA_NULL;
+    pC->m_p3gpWriterContext  = M4OSA_NULL;
+    pC->m_pMp4EncoderContext = M4OSA_NULL;
+    pC->m_eEncoderState = M4PTO3GPP_kNoEncoder;
+
+    /**
+     *  Interfaces of the used modules */
+    pC->m_pReaderGlobInt    = M4OSA_NULL;
+    pC->m_pReaderDataInt    = M4OSA_NULL;
+    pC->m_pWriterGlobInt    = M4OSA_NULL;
+    pC->m_pWriterDataInt    = M4OSA_NULL;
+    pC->m_pEncoderInt       = M4OSA_NULL;
+    pC->m_pEncoderExternalAPI = M4OSA_NULL;
+    pC->m_pEncoderUserData = M4OSA_NULL;
+
+    /**
+     * Fill the OSAL file function set */
+    pC->pOsalFileRead = pFileReadPtrFct;
+    pC->pOsalFileWrite = pFileWritePtrFct;
+
+    /**
+     *  Video rate control stuff */
+    pC->m_mtCts             = 0.0F;
+    pC->m_mtNextCts         = 0.0F;
+    pC->m_mtAudioCts        = 0.0F;
+    pC->m_AudioOffSet       = 0.0F;
+    pC->m_dLastVideoRegulCts= 0.0F;
+    pC->m_PrevAudioCts      = 0.0F;
+    pC->m_DeltaAudioCts     = 0.0F;
+
+    pC->m_MaxFileSize       = 0;
+    pC->m_CurrentFileSize   = 0;
+
+    pC->m_IsLastPicture         = M4OSA_FALSE;
+    pC->m_bAudioPaddingSilence  = M4OSA_FALSE;
+    pC->m_bLastInternalCallBack = M4OSA_FALSE;
+    pC->m_NbCurrentFrame        = 0;
+
+    pC->pSavedPlane = M4OSA_NULL;
+    pC->uiSavedDuration = 0;
+
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        pC->registeredExternalEncs[i].pEncoderInterface = M4OSA_NULL;
+        pC->registeredExternalEncs[i].pUserData = M4OSA_NULL;
+        pC->registeredExternalEncs[i].registered = M4OSA_FALSE;
+    }
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Init(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
+ * @brief   Set the M4PTO3GPP input and output files.
+ * @note    It opens the input file, but the output file may not be created yet.
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @param   pParams             (IN) Pointer to the parameters for the PTO3GPP.
+ * @note    The pointed structure can be de-allocated after this function returns because
+ *          it is internally copied by the PTO3GPP
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return  M4ERR_STATE:        M4PTO3GPP is not in an appropriate state for this function to be
+                                 called
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ * @return  ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 The output video frame
+ *                              size parameter is incompatible with H263 encoding
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT       The output video format
+                                                            parameter is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE      The output video bit-rate parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE   The output video frame size parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE          The output file size parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING             The output audio padding parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE    The input audio file contains
+                                                            a track format not handled by PTO3GPP
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext   *pC = (M4PTO3GPP_InternalContext*)(pContext);
+    M4OSA_ERR                   err = M4NO_ERROR;
+
+    M4READER_MediaFamily    mediaFamily;
+    M4_StreamHandler*       pStreamHandler;
+    M4READER_MediaType      readerMediaType;
+
+    M4OSA_TRACE2_2("M4PTO3GPP_Open called with pContext=0x%x, pParams=0x%x", pContext, pParams);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER, \
+                    "M4PTO3GPP_Open: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams),  M4ERR_PARAMETER, \
+                    "M4PTO3GPP_Open: pParams is M4OSA_NULL");
+
+    /**
+     *  Check parameters correctness */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackFct),
+               M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackFct is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackCtxt),
+                M4ERR_PARAMETER,
+                 "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pOutput3gppFile),
+                M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pOutput3gppFile is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pTemporaryFile),
+                M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pTemporaryFile is M4OSA_NULL");
+
+    /**
+     * Video Format */
+    if( (M4VIDEOEDITING_kH263 != pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kMPEG4 != pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kMPEG4_EMP != pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kH264 != pParams->OutputVideoFormat))
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video format");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+     }
+
+     /**
+     * Video Bitrate */
+    if(!((M4VIDEOEDITING_k16_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k24_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k32_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k48_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k64_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k96_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k128_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k192_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k256_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k288_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k384_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k512_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k800_KBPS      == pParams->OutputVideoBitrate) ||
+         /*+ New Encoder bitrates */
+         (M4VIDEOEDITING_k2_MBPS        == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k5_MBPS        == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k8_MBPS        == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_kVARIABLE_KBPS == pParams->OutputVideoBitrate)))
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video bitrate");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+    }
+
+    /**
+     * Video frame size */
+    if (!((M4VIDEOEDITING_kSQCIF == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kQQVGA == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kQCIF == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kQVGA == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kCIF  == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kVGA  == pParams->OutputVideoFrameSize) ||
+
+          (M4VIDEOEDITING_kNTSC == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kWVGA == pParams->OutputVideoFrameSize) ||
+
+          (M4VIDEOEDITING_k640_360 == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_k854_480 == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kHD1280  == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kHD1080  == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kHD960   == pParams->OutputVideoFrameSize)))
+
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video frame size");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+    }
+
+    /**
+     * Maximum size of the output 3GPP file */
+    if (!((M4PTO3GPP_k50_KB     == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k75_KB     == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k100_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k150_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k200_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k300_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k400_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k500_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_kUNLIMITED == pParams->OutputFileMaxSize)))
+
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output 3GPP file size");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE;
+    }
+
+    /* Audio padding */
+    if (M4OSA_NULL != pParams->pInputAudioTrackFile)
+    {
+        if ((!( (M4PTO3GPP_kAudioPaddingMode_None   == pParams->AudioPaddingMode) ||
+                (M4PTO3GPP_kAudioPaddingMode_Silence== pParams->AudioPaddingMode) ||
+                (M4PTO3GPP_kAudioPaddingMode_Loop   == pParams->AudioPaddingMode))))
+        {
+            M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined audio padding");
+            return ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING;
+        }
+    }
+
+    /**< Size check for H263 (only valid sizes are CIF, QCIF and SQCIF) */
+    if ((M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kSQCIF != pParams->OutputVideoFrameSize) &&
+        (M4VIDEOEDITING_kQCIF != pParams->OutputVideoFrameSize) &&
+        (M4VIDEOEDITING_kCIF != pParams->OutputVideoFrameSize))
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open():\
+             returning ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263");
+        return ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263;
+    }
+
+    /**
+     *  Check state automaton */
+    if (M4PTO3GPP_kState_CREATED != pC->m_State)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Open(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+        return M4ERR_STATE;
+    }
+
+    /**
+     * Copy the M4PTO3GPP_Params structure */
+    M4OSA_memcpy((M4OSA_MemAddr8)(&pC->m_Params),
+                (M4OSA_MemAddr8)pParams, sizeof(M4PTO3GPP_Params));
+    M4OSA_TRACE1_1("M4PTO3GPP_Open: outputVideoBitrate = %d", pC->m_Params.OutputVideoBitrate);
+
+    /***********************************/
+    /* Open input file with the reader */
+    /***********************************/
+    if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile)
+    {
+        /**
+         * Get the reader interface according to the input audio file type */
+        switch(pC->m_Params.AudioFileFormat)
+        {
+#ifdef M4VSS_SUPPORT_READER_AMR
+        case M4VIDEOEDITING_kFileType_AMR:
+        err = M4READER_AMR_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
+                &pC->m_pReaderDataInt);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_AMR_getInterfaces returns 0x%x", err);
+            return err;
+        }
+            break;
+#endif
+
+#ifdef AAC_SUPPORTED
+        case M4VIDEOEDITING_kFileType_3GPP:
+            err = M4READER_3GP_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
+                    &pC->m_pReaderDataInt);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_3GP_getInterfaces returns 0x%x", err);
+                return err;
+            }
+            break;
+#endif
+
+        default:
+            return ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE;
+        }
+
+        /**
+         *  Initializes the reader shell */
+        err = pC->m_pReaderGlobInt->m_pFctCreate(&pC->m_pAudioReaderContext);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctCreate returns 0x%x", err);
+            return err;
+        }
+
+        pC->m_pReaderDataInt->m_readerContext = pC->m_pAudioReaderContext;
+        /**< Link the reader interface to the reader context */
+
+        /**
+         *  Set the reader shell file access functions */
+        err = pC->m_pReaderGlobInt->m_pFctSetOption(pC->m_pAudioReaderContext,
+            M4READER_kOptionID_SetOsaFileReaderFctsPtr,  (M4OSA_DataOption)pC->pOsalFileRead);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctSetOption returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         *  Open the input audio file */
+        err = pC->m_pReaderGlobInt->m_pFctOpen(pC->m_pAudioReaderContext,
+            pC->m_Params.pInputAudioTrackFile);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctOpen returns 0x%x", err);
+            pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
+            pC->m_pAudioReaderContext = M4OSA_NULL;
+            return err;
+        }
+
+        /**
+         *  Get the audio streams from the input file */
+        err = M4NO_ERROR;
+        while (M4NO_ERROR == err)
+        {
+            err = pC->m_pReaderGlobInt->m_pFctGetNextStream(pC->m_pAudioReaderContext,
+                &mediaFamily, &pStreamHandler);
+
+            if((err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE)) ||
+                   (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)))
+            {
+                err = M4NO_ERROR;
+                continue;
+            }
+
+            if (M4NO_ERROR == err) /**< One stream found */
+            {
+                /**< Found an audio stream */
+                if ((M4READER_kMediaFamilyAudio == mediaFamily)
+                    && (M4OSA_NULL == pC->m_pReaderAudioStream))
+                {
+                    pC->m_pReaderAudioStream = (M4_AudioStreamHandler*)pStreamHandler;
+                    /**< Keep pointer to the audio stream */
+                    M4OSA_TRACE3_0("M4PTO3GPP_Open(): Found an audio stream in input");
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                     *  Allocate audio AU used for read operations */
+                    pC->m_pReaderAudioAU = (M4_AccessUnit*)M4OSA_malloc(sizeof(M4_AccessUnit),
+                        M4PTO3GPP,(M4OSA_Char *)"pReaderAudioAU");
+                    if (M4OSA_NULL == pC->m_pReaderAudioAU)
+                    {
+                        M4OSA_TRACE1_0("M4PTO3GPP_Open(): unable to allocate pReaderAudioAU, \
+                                       returning M4ERR_ALLOC");
+                        return M4ERR_ALLOC;
+                    }
+
+                    /**
+                     *  Initializes an access Unit */
+                    err = pC->m_pReaderGlobInt->m_pFctFillAuStruct(pC->m_pAudioReaderContext,
+                            pStreamHandler, pC->m_pReaderAudioAU);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Open():\
+                         pReaderGlobInt->m_pFctFillAuStruct(audio)returns 0x%x", err);
+                        return err;
+                    }
+                }
+                else
+                {
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+            else if (M4WAR_NO_MORE_STREAM != err) /**< Unexpected error code */
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_Open():\
+                     pReaderGlobInt->m_pFctGetNextStream returns 0x%x",
+                    err);
+                return err;
+            }
+        } /* while*/
+    } /*if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile)*/
+
+    pC->m_VideoState = M4PTO3GPP_kStreamState_STARTED;
+
+    /**
+     * Init the audio stream */
+    if (M4OSA_NULL != pC->m_pReaderAudioStream)
+    {
+        pC->m_AudioState = M4PTO3GPP_kStreamState_STARTED;
+        err = pC->m_pReaderGlobInt->m_pFctReset(pC->m_pAudioReaderContext,
+            (M4_StreamHandler*)pC->m_pReaderAudioStream);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderDataInt->m_pFctReset(audio returns 0x%x",
+                 err);
+            return err;
+        }
+    }
+
+    /**
+     *  Update state automaton */
+    pC->m_State = M4PTO3GPP_kState_OPENED;
+
+    /**
+     * Get the max File size */
+    switch(pC->m_Params.OutputFileMaxSize)
+    {
+    case M4PTO3GPP_k50_KB:  pC->m_MaxFileSize = 50000;  break;
+    case M4PTO3GPP_k75_KB:  pC->m_MaxFileSize = 75000;  break;
+    case M4PTO3GPP_k100_KB: pC->m_MaxFileSize = 100000; break;
+    case M4PTO3GPP_k150_KB: pC->m_MaxFileSize = 150000; break;
+    case M4PTO3GPP_k200_KB: pC->m_MaxFileSize = 200000; break;
+    case M4PTO3GPP_k300_KB: pC->m_MaxFileSize = 300000; break;
+    case M4PTO3GPP_k400_KB: pC->m_MaxFileSize = 400000; break;
+    case M4PTO3GPP_k500_KB: pC->m_MaxFileSize = 500000; break;
+    case M4PTO3GPP_kUNLIMITED:
+    default:                                            break;
+    }
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Open(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
+ * @brief   Perform one step of trancoding.
+ * @note
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @return  M4NO_ERROR          No error
+ * @return  M4ERR_PARAMETER     pContext is M4OSA_NULL
+ * @return  M4ERR_STATE:    M4PTO3GPP is not in an appropriate state for this function
+ *                           to be called
+ * @return  M4PTO3GPP_WAR_END_OF_PROCESSING Encoding completed
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 l_uiAudioStepCount = 0;
+    M4OSA_Int32  JumpToTime = 0;
+    M4OSA_Time  mtIncCts;
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER,
+                "M4PTO3GPP_Step: pContext is M4OSA_NULL");
+
+    /**
+     *  Check state automaton */
+    if ( !((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)) )
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Step(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+        return M4ERR_STATE;
+    }
+
+    /******************************************************************/
+    /**
+     *  In case this is the first step, we prepare the decoder, the encoder and the writer */
+    if (M4PTO3GPP_kState_OPENED == pC->m_State)
+    {
+        M4OSA_TRACE2_0("M4PTO3GPP_Step(): This is the first step, \
+                       calling M4PTO3GPP_Ready4Processing");
+
+        /**
+         *  Prepare the reader, the decoder, the encoder, the writer... */
+        err = M4PTO3GPP_Ready4Processing(pC);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_Ready4Processing() returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         *  Update state automaton */
+        pC->m_State = M4PTO3GPP_kState_READY;
+
+        M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (a)");
+        return M4NO_ERROR; /**< we only do that in the first step, \
+                           first REAL step will be the next one */
+    }
+
+
+    /*
+     * Check if we reached the targeted file size.
+     * We do that before the encoding, because the core encoder has to know if this is
+     * the last frame to encode */
+    err = pC->m_pWriterGlobInt->pFctGetOption(pC->m_p3gpWriterContext,
+        M4WRITER_kFileSizeAudioEstimated, (M4OSA_DataOption) &pC->m_CurrentFileSize);
+    if ((0 != pC->m_MaxFileSize) &&
+        /**< Add a marge to the file size in order to never exceed the max file size */
+       ((pC->m_CurrentFileSize + M4PTO3GPP_MARGE_OF_FILE_SIZE) >= pC->m_MaxFileSize))
+    {
+        pC->m_IsLastPicture = M4OSA_TRUE;
+    }
+
+    /******************************************************************
+    *  At that point we are in M4PTO3GPP_kState_READY state
+    *  We perform one step of video encoding
+    ******************************************************************/
+
+    /************* VIDEO ENCODING ***************/
+    if (M4PTO3GPP_kStreamState_STARTED == pC->m_VideoState) /**<If the video encoding is going on*/
+    {   /**
+         * Call the encoder  */
+        pC->m_NbCurrentFrame++;
+
+        /* Check if it is the last frame the to encode */
+        if((pC->m_Params.NbVideoFrames > 0) \
+            && (pC->m_NbCurrentFrame >= pC->m_Params.NbVideoFrames))
+        {
+            pC->m_IsLastPicture = M4OSA_TRUE;
+        }
+
+        M4OSA_TRACE2_2("M4PTO3GPP_Step(): Calling pEncoderInt->pFctEncode with videoCts = %.2f\
+                       nb = %lu", pC->m_mtCts, pC->m_NbCurrentFrame);
+
+        err = pC->m_pEncoderInt->pFctEncode(pC->m_pMp4EncoderContext, M4OSA_NULL,
+            /**< The input plane is null because the input Picture will be obtained by the\
+            VPP filter from the context */
+                                        pC->m_mtCts,
+                                        (pC->m_IsLastPicture ?
+                                        M4ENCODER_kLastFrame : M4ENCODER_kNormalFrame) );
+        /**< Last param set to M4OSA_TRUE signals that this is the last frame to be encoded,\
+        M4OSA_FALSE else */
+
+        M4OSA_TRACE3_2("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns 0x%x, vidFormat =0x%x",
+            err, pC->m_Params.OutputVideoFormat);
+        if((M4NO_ERROR == err) && (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
+        {
+            /* Check if last frame.*
+            *                  */
+            if(M4OSA_TRUE == pC->m_IsLastPicture)
+            {
+                M4OSA_TRACE3_0("M4PTO3GPP_Step(): Last picture");
+                pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
+            }
+
+        }
+
+        if (M4WAR_NO_MORE_AU == err) /**< The video encoding is finished */
+        {
+            M4OSA_TRACE3_0("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns M4WAR_NO_MORE_AU");
+            pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
+        }
+        else if (M4NO_ERROR != err)     /**< Unexpected error code */
+        {
+            if( (((M4OSA_UInt32)M4WAR_WRITER_STOP_REQ) == err) ||
+                    (((M4OSA_UInt32)M4ERR_ALLOC) == err) )
+            {
+                M4OSA_TRACE1_0("M4PTO3GPP_Step: returning ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR");
+                return ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR;
+            }
+            else
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_Step(): pEncoderInt->pFctEncode(last) (a) returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+    } /**< End of video encoding */
+
+
+    /****** AUDIO TRANSCODING (read + null encoding + write) ******/
+    if (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState)
+    {
+        while ( (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState) &&
+                (pC->m_mtAudioCts < pC->m_mtNextCts))
+
+        {
+            l_uiAudioStepCount++;
+            if (M4OSA_FALSE == pC->m_bAudioPaddingSilence)
+            {
+                /**< Read the next audio AU in the input Audio file */
+                err = pC->m_pReaderDataInt->m_pFctGetNextAu(pC->m_pAudioReaderContext,
+                    (M4_StreamHandler*)pC->m_pReaderAudioStream, pC->m_pReaderAudioAU);
+                pC->m_mtAudioCts = pC->m_pReaderAudioAU->m_CTS + pC->m_AudioOffSet;
+
+                if (M4WAR_NO_MORE_AU == err)    /* The audio transcoding is finished */
+                {
+                    M4OSA_TRACE2_0("M4PTO3GPP_Step():\
+                                  pReaderDataInt->m_pFctGetNextAu(audio) returns \
+                                    M4WAR_NO_MORE_AU");
+                    switch(pC->m_Params.AudioPaddingMode)
+                    {
+                        case M4PTO3GPP_kAudioPaddingMode_None:
+
+                            pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+                            break;
+
+                        case M4PTO3GPP_kAudioPaddingMode_Silence:
+
+                            if (M4DA_StreamTypeAudioAmrNarrowBand
+                                != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                                /**< Do nothing if the input audio file format is not AMR */
+                            {
+                                pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+                            }
+                            else
+                            {
+                                pC->m_bAudioPaddingSilence = M4OSA_TRUE;
+                            }
+                            break;
+
+                        case M4PTO3GPP_kAudioPaddingMode_Loop:
+
+                            /**< Jump to the beginning of the audio file */
+                            err = pC->m_pReaderGlobInt->m_pFctJump(pC->m_pAudioReaderContext,
+                                (M4_StreamHandler*)pC->m_pReaderAudioStream, &JumpToTime);
+
+                            if (M4NO_ERROR != err)
+                            {
+                                M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
+                                              pReaderDataInt->m_pFctReset(audio returns 0x%x",
+                                               err);
+                                return err;
+                            }
+
+                            if (M4DA_StreamTypeAudioAmrNarrowBand
+                                == pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                            {
+                                pC->m_mtAudioCts += 20; /*< SEMC bug fixed at Lund */
+                                pC->m_AudioOffSet = pC->m_mtAudioCts;
+
+                                /**
+                                 * 'BZZZ' bug fix:
+                                 * add a silence frame */
+                                mtIncCts = (M4OSA_Time)((pC->m_mtAudioCts) *
+                                    (pC->m_pWriterAudioStream->timeScale / 1000.0));
+                                err = M4PTO3GPP_writeAmrSilence122Frame(pC->m_pWriterDataInt,
+                                    pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
+
+                                if (M4NO_ERROR != err)
+                                {
+                                    M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
+                                                   M4PTO3GPP_AddAmrSilenceSid returns 0x%x", err);
+                                    return err;
+                                }/**< Add => no audio cts increment...*/
+                            }
+                            else
+                            {
+                                pC->m_AudioOffSet = pC->m_mtAudioCts + pC->m_DeltaAudioCts;
+                            }
+                            break;
+                    } /* end of: switch */
+                }
+                else if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4PTO3GPP_Step(): pReaderDataInt->m_pFctGetNextAu(Audio)\
+                                   returns 0x%x", err);
+                    return err;
+                }
+                else
+                {
+                    /**
+                     * Save the delta Cts (AAC only) */
+                    pC->m_DeltaAudioCts = pC->m_pReaderAudioAU->m_CTS - pC->m_PrevAudioCts;
+                    pC->m_PrevAudioCts  = pC->m_pReaderAudioAU->m_CTS;
+
+                    /**
+                     *  Prepare the writer AU */
+                    err = pC->m_pWriterDataInt->pStartAU(pC->m_p3gpWriterContext, 1,
+                        &pC->m_WriterAudioAU);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pStartAU(Audio)\
+                                       returns 0x%x", err);
+                        return err;
+                    }
+
+                    /**
+                     *  Copy audio data from reader AU to writer AU */
+                    M4OSA_TRACE2_1("M4PTO3GPP_Step(): Copying audio AU: size=%d",
+                        pC->m_pReaderAudioAU->m_size);
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->m_WriterAudioAU.dataAddress,
+                        (M4OSA_MemAddr8)pC->m_pReaderAudioAU->m_dataAddress,
+                        pC->m_pReaderAudioAU->m_size);
+                    pC->m_WriterAudioAU.size = pC->m_pReaderAudioAU->m_size;
+
+                    /**
+                     *  Convert CTS unit from milliseconds to timescale */
+                    if (M4DA_StreamTypeAudioAmrNarrowBand
+                        != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                    {
+                        pC->m_WriterAudioAU.CTS  = (M4OSA_Time)
+                            ((pC->m_AudioOffSet + pC->m_pReaderAudioAU->m_CTS)
+                            * pC->m_pWriterAudioStream->timeScale / 1000.0);
+                    }
+                    else
+                    {
+                        pC->m_WriterAudioAU.CTS = (M4OSA_Time)(pC->m_mtAudioCts *
+                            (pC->m_pWriterAudioStream->timeScale / 1000.0));
+                    }
+                    pC->m_WriterAudioAU.nbFrag = 0;
+                    M4OSA_TRACE2_1("M4PTO3GPP_Step(): audio AU: CTS=%d ms", pC->m_mtAudioCts
+                        /*pC->m_pReaderAudioAU->m_CTS*/);
+
+                    /**
+                     *  Write it to the output file */
+                    err = pC->m_pWriterDataInt->pProcessAU(pC->m_p3gpWriterContext, 1,
+                        &pC->m_WriterAudioAU);
+
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pProcessAU(Audio)\
+                                       returns 0x%x", err);
+                        return err;
+                    }
+                }
+            }
+            else /**< M4OSA_TRUE == pC->m_bAudioPaddingSilence */
+            {
+                if (M4DA_StreamTypeAudioAmrNarrowBand ==
+                    pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                {
+                    /**
+                     * Fill in audio au with silence */
+                    pC->m_mtAudioCts += 20;
+
+                    /**
+                     * Padd with silence */
+                    mtIncCts = (M4OSA_Time)(pC->m_mtAudioCts
+                        * (pC->m_pWriterAudioStream->timeScale / 1000.0));
+                    err = M4PTO3GPP_writeAmrSilence048Frame(pC->m_pWriterDataInt,
+                        pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
+
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_AddAmrSilenceSid returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< Do nothing if the input audio file format is not AMR */
+                {
+                    pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+                }
+
+            }
+        } /**< while */
+    } /**< End of audio encoding */
+
+    pC->m_mtCts = pC->m_mtNextCts;
+
+    /**
+     *  The transcoding is finished when no stream is being encoded anymore */
+    if (M4PTO3GPP_kStreamState_FINISHED == pC->m_VideoState)
+    {
+        pC->m_State = M4PTO3GPP_kState_FINISHED;
+        M4OSA_TRACE2_0("M4PTO3GPP_Step(): transcoding finished, returning M4WAR_NO_MORE_AU");
+        return M4PTO3GPP_WAR_END_OF_PROCESSING;
+    }
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (b)");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
+ * @brief   Finish the M4PTO3GPP transcoding.
+ * @note    The output 3GPP file is ready to be played after this call
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return  M4ERR_STATE:    M4PTO3GPP is not in an appropriate state for this function to be called
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+    M4OSA_ERR    osaErr = M4NO_ERROR;
+    M4OSA_UInt32 lastCTS;
+    M4ENCODER_Header* encHeader;
+    M4SYS_StreamIDmemAddr streamHeader;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Close called with pContext=0x%x", pContext);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER, "M4PTO3GPP_Close:\
+                                                             pContext is M4OSA_NULL");
+
+    /* Check state automaton */
+    if ((pC->m_State != M4PTO3GPP_kState_OPENED) &&
+        (pC->m_State != M4PTO3GPP_kState_READY) &&
+        (pC->m_State != M4PTO3GPP_kState_FINISHED))
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Close(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+        return M4ERR_STATE;
+    }
+
+    /*************************************/
+    /******** Finish the encoding ********/
+    /*************************************/
+    if (M4PTO3GPP_kState_READY == pC->m_State)
+    {
+        pC->m_State = M4PTO3GPP_kState_FINISHED;
+    }
+
+    if (M4PTO3GPP_kEncoderRunning == pC->m_eEncoderState)
+    {
+        if (pC->m_pEncoderInt->pFctStop != M4OSA_NULL)
+        {
+            osaErr = pC->m_pEncoderInt->pFctStop(pC->m_pMp4EncoderContext);
+            if (M4NO_ERROR != osaErr)
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctStop returns 0x%x", osaErr);
+                /* Well... how the heck do you handle a failed cleanup? */
+            }
+        }
+
+        pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
+    }
+
+    /* Has the encoder actually been opened? Don't close it if that's not the case. */
+    if (M4PTO3GPP_kEncoderStopped == pC->m_eEncoderState)
+    {
+        osaErr = pC->m_pEncoderInt->pFctClose(pC->m_pMp4EncoderContext);
+        if (M4NO_ERROR != osaErr)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctClose returns 0x%x", osaErr);
+            /* Well... how the heck do you handle a failed cleanup? */
+        }
+
+        pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
+    }
+
+    /*******************************/
+    /******** Close 3GP out ********/
+    /*******************************/
+
+    if (M4OSA_NULL != pC->m_p3gpWriterContext)  /* happens in state _SET */
+    {
+        /* HW encoder: fetch the DSI from the shell video encoder, and feed it to the writer before
+        closing it. */
+        if ( (M4VIDEOEDITING_kMPEG4_EMP == pC->m_Params.OutputVideoFormat)
+            || (M4VIDEOEDITING_kMPEG4 == pC->m_Params.OutputVideoFormat)
+            || (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
+        {
+            osaErr = pC->m_pEncoderInt->pFctGetOption(pC->m_pMp4EncoderContext,
+                M4ENCODER_kOptionID_EncoderHeader,
+                                                            (M4OSA_DataOption)&encHeader);
+            if ( (M4NO_ERROR != osaErr) || (M4OSA_NULL == encHeader->pBuf) )
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_close: failed to get the encoder header (err 0x%x)",
+                    osaErr);
+                /**< no return here, we still have stuff to deallocate after close, even if \
+                it fails. */
+            }
+            else
+            {
+                /* set this header in the writer */
+                streamHeader.streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+                streamHeader.size = encHeader->Size;
+                streamHeader.addr = (M4OSA_MemAddr32)encHeader->pBuf;
+                osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                    M4WRITER_kDSI, &streamHeader);
+                if (M4NO_ERROR != osaErr)
+                {
+                    M4OSA_TRACE1_1("M4PTO3GPP_close: failed to set the DSI in the writer \
+                                (err 0x%x)   ", osaErr);
+                }
+            }
+        }
+
+        /* Update last Video CTS */
+        lastCTS = (M4OSA_UInt32)pC->m_mtCts;
+
+        osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+        if (M4NO_ERROR != osaErr)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Close: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+                osaErr);
+        }
+
+        /* Write and close the 3GP output file */
+        osaErr = pC->m_pWriterGlobInt->pFctCloseWrite(pC->m_p3gpWriterContext);
+        if (M4NO_ERROR != osaErr)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Close: pWriterGlobInt->pFctCloseWrite returns 0x%x", osaErr);
+            /**< don't return yet, we have to close other things */
+        }
+        pC->m_p3gpWriterContext = M4OSA_NULL;
+    }
+
+    /**
+     * State transition */
+    pC->m_State = M4PTO3GPP_kState_CLOSED;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Close(): returning 0x%x", osaErr);
+    return osaErr;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
+ * @brief   Free all resources used by the M4PTO3GPP.
+ * @note    The context is no more valid after this call
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+    M4OSA_TRACE3_1("M4PTO3GPP_CleanUp called with pContext=0x%x", pContext);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext),M4ERR_PARAMETER, "M4PTO3GPP_CleanUp: pContext \
+                                                            is M4OSA_NULL");
+
+    /**
+     *  First call Close, if needed, to clean the video encoder */
+
+    if ((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)
+        || (M4PTO3GPP_kState_FINISHED == pC->m_State))
+    {
+        err = M4PTO3GPP_Close(pContext);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: M4PTO3GPP_Close returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+    }
+
+    /**
+     *  Free Audio reader stuff, if needed */
+
+    if (M4OSA_NULL != pC->m_pAudioReaderContext) /**< may be M4OSA_NULL if M4PTO3GPP_Open was not\
+                                                 called */
+    {
+
+        err = pC->m_pReaderGlobInt->m_pFctClose(pC->m_pAudioReaderContext);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctClose returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+        err = pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
+        pC->m_pAudioReaderContext = M4OSA_NULL;
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctDestroy returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+    }
+
+    if (M4OSA_NULL != pC->m_pReaderAudioAU)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderAudioAU);
+        pC->m_pReaderAudioAU = M4OSA_NULL;
+    }
+
+    /**
+     *  Free video encoder stuff, if needed */
+    if (M4OSA_NULL != pC->m_pMp4EncoderContext)
+    {
+        err = pC->m_pEncoderInt->pFctCleanup(pC->m_pMp4EncoderContext);
+        pC->m_pMp4EncoderContext = M4OSA_NULL;
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pEncoderInt->pFctDestroy returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+    }
+
+    if (M4OSA_NULL != pC->m_pWriterVideoStream)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterVideoStream);
+        pC->m_pWriterVideoStream = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pWriterAudioStream)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterAudioStream);
+        pC->m_pWriterAudioStream = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pWriterVideoStreamInfo)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterVideoStreamInfo);
+        pC->m_pWriterVideoStreamInfo = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pWriterAudioStreamInfo)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterAudioStreamInfo);
+        pC->m_pWriterAudioStreamInfo = M4OSA_NULL;
+    }
+
+
+    /**
+     *  Free the shells interfaces */
+    if (M4OSA_NULL != pC->m_pReaderGlobInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderGlobInt);
+        pC->m_pReaderGlobInt = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pReaderDataInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderDataInt);
+        pC->m_pReaderDataInt = M4OSA_NULL;
+    }
+
+    if(M4OSA_NULL != pC->m_pEncoderInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pEncoderInt);
+        pC->m_pEncoderInt = M4OSA_NULL;
+    }
+    if(M4OSA_NULL != pC->m_pWriterGlobInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterGlobInt);
+        pC->m_pWriterGlobInt = M4OSA_NULL;
+    }
+    if(M4OSA_NULL != pC->m_pWriterDataInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterDataInt);
+        pC->m_pWriterDataInt = M4OSA_NULL;
+    }
+    /**< Do not free pC->pOsaMemoryPtrFct and pC->pOsaMemoryPtrFct, because it's owned by the \
+    application */
+
+    /**
+     *  Free the context itself */
+    M4OSA_free((M4OSA_MemAddr32)pC);
+    pC = M4OSA_NULL;
+
+    M4OSA_TRACE3_0("M4PTO3GPP_CleanUp(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/********************* INTERNAL FUNCTIONS *********************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
+ * @brief   Prepare all resources and interfaces for the transcoding.
+ * @note    It is called by the first M4OSA_Step() call
+ * @param   pC          (IN) M4PTO3GPP private context
+ * @return  M4NO_ERROR: No error
+ * @return  Any error returned by an underlaying module
+ ******************************************************************************
+*/
+/******************************************************/
+M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC)
+/******************************************************/
+{
+    M4OSA_ERR               err = M4NO_ERROR;
+    M4WRITER_OutputFileType outputFileType;
+    M4OSA_UInt32            uiVersion;
+    M4ENCODER_Format        encFormat;
+    M4ENCODER_AdvancedParams   EncParams;    /**< Encoder advanced parameters */
+    M4SYS_StreamIDValue     optionValue;
+    M4OSA_Bool              bActivateEmp = M4OSA_FALSE;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing called with pC=0x%x", pC);
+
+    /******************************/
+    /******************************/
+
+    /********************************************/
+    /********                            ********/
+    /******** Video Encoder Parames init ********/
+    /********                            ********/
+    /********************************************/
+
+    /**
+     *  Get the correct encoder interface */
+    switch(pC->m_Params.OutputVideoFormat)
+    {
+        case M4VIDEOEDITING_kMPEG4_EMP: bActivateEmp = M4OSA_TRUE; /* no break */
+        case M4VIDEOEDITING_kMPEG4:
+            if (pC->registeredExternalEncs[M4VE_kMpeg4VideoEnc].registered)
+            {
+#ifdef M4VSS_ENABLE_EXTERNAL_ENCODERS
+                pC->m_pEncoderExternalAPI = pC->registeredExternalEncs[M4VE_kMpeg4VideoEnc]
+                .pEncoderInterface;
+                pC->m_pEncoderUserData = pC->registeredExternalEncs[M4VE_kMpeg4VideoEnc].pUserData;
+
+                err = M4EGE_MPEG4_getInterfaces(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else
+                M4OSA_TRACE1_0("No external MPEG4 encoder available!\
+                               Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif
+            }
+            else
+            {
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+                err = VideoEditorVideoEncoder_getInterface_MPEG4(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else /* software MPEG4 encoder not available! */
+                M4OSA_TRACE1_0("No MPEG4 encoder available! Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif /* software MPEG4 encoder available? */
+            }
+            break;
+        case M4VIDEOEDITING_kH263:
+            if (pC->registeredExternalEncs[M4VE_kH263VideoEnc].registered)
+            {
+#ifdef M4VSS_ENABLE_EXTERNAL_ENCODERS
+                pC->m_pEncoderExternalAPI = pC->registeredExternalEncs[M4VE_kH263VideoEnc]
+                .pEncoderInterface;
+                pC->m_pEncoderUserData = pC->registeredExternalEncs[M4VE_kH263VideoEnc].pUserData;
+
+                err = M4EGE_H263_getInterfaces(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else
+                M4OSA_TRACE1_0("No external H263 encoder available! Did you forget to register\
+                               one?");
+                err = M4ERR_STATE;
+#endif
+            }
+            else
+            {
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+                err = VideoEditorVideoEncoder_getInterface_H263(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else /* software H263 encoder not available! */
+                M4OSA_TRACE1_0("No H263 encoder available! Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif /* software H263 encoder available? */
+            }
+            break;
+        case M4VIDEOEDITING_kH264:
+            if (pC->registeredExternalEncs[M4VE_kH264VideoEnc].registered)
+            {
+                M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing: No external H264 encoder available! \
+                               Did you forget to register one?");
+                err = M4ERR_STATE;
+            }
+            else
+            {
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+                err = VideoEditorVideoEncoder_getInterface_H264(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else /* software H264 encoder not available! */
+                M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing: No H264 encoder available!\
+                               Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif /* software H264 encoder available? */
+            }
+            break;
+        default:
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                           pC->m_Params.OutputVideoFormat);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+    }
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("switch(pC->m_Params.OutputVideoFormat): getInterfaces returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     *  Fill encoder parameters according to M4PTO3GPP settings */
+
+    /**
+     * Video frame size */
+    switch(pC->m_Params.OutputVideoFrameSize)
+    {
+        case M4VIDEOEDITING_kSQCIF :
+            EncParams.FrameHeight = M4ENCODER_SQCIF_Height;
+            EncParams.FrameWidth  = M4ENCODER_SQCIF_Width;
+            break;
+        case M4VIDEOEDITING_kQQVGA :
+            EncParams.FrameHeight = M4ENCODER_QQVGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_QQVGA_Width;
+            break;
+        case M4VIDEOEDITING_kQCIF :
+            EncParams.FrameHeight = M4ENCODER_QCIF_Height;
+            EncParams.FrameWidth  = M4ENCODER_QCIF_Width;
+            break;
+        case M4VIDEOEDITING_kQVGA :
+            EncParams.FrameHeight = M4ENCODER_QVGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_QVGA_Width;
+            break;
+        case M4VIDEOEDITING_kCIF :
+            EncParams.FrameHeight = M4ENCODER_CIF_Height;
+            EncParams.FrameWidth  = M4ENCODER_CIF_Width;
+            break;
+        case M4VIDEOEDITING_kVGA :
+            EncParams.FrameHeight = M4ENCODER_VGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_VGA_Width;
+            break;
+/* +PR LV5807 */
+        case M4VIDEOEDITING_kWVGA :
+            EncParams.FrameHeight = M4ENCODER_WVGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_WVGA_Width;
+            break;
+        case M4VIDEOEDITING_kNTSC:
+            EncParams.FrameHeight = M4ENCODER_NTSC_Height;
+            EncParams.FrameWidth  = M4ENCODER_NTSC_Width;
+            break;
+/* -PR LV5807 */
+/* +CR Google */
+        case M4VIDEOEDITING_k640_360:
+            EncParams.FrameHeight = M4ENCODER_640_360_Height;
+            EncParams.FrameWidth  = M4ENCODER_640_360_Width;
+            break;
+
+        case M4VIDEOEDITING_k854_480:
+            EncParams.FrameHeight = M4ENCODER_854_480_Height;
+            EncParams.FrameWidth  = M4ENCODER_854_480_Width;
+            break;
+
+        case M4VIDEOEDITING_kHD1280:
+            EncParams.FrameHeight = M4ENCODER_HD1280_Height;
+            EncParams.FrameWidth  = M4ENCODER_HD1280_Width;
+            break;
+
+        case M4VIDEOEDITING_kHD1080:
+            EncParams.FrameHeight = M4ENCODER_HD1080_Height;
+            EncParams.FrameWidth  = M4ENCODER_HD1080_Width;
+            break;
+
+        case M4VIDEOEDITING_kHD960:
+            EncParams.FrameHeight = M4ENCODER_HD960_Height;
+            EncParams.FrameWidth  = M4ENCODER_HD960_Width;
+            break;
+/* -CR Google */
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE",
+                           pC->m_Params.OutputVideoFrameSize);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+    }
+
+    EncParams.InputFormat = M4ENCODER_kIYUV420;
+
+    /**
+     * Video bitrate */
+    switch(pC->m_Params.OutputVideoBitrate)
+    {
+        case M4VIDEOEDITING_k16_KBPS:
+        case M4VIDEOEDITING_k24_KBPS:
+        case M4VIDEOEDITING_k32_KBPS:
+        case M4VIDEOEDITING_k48_KBPS:
+        case M4VIDEOEDITING_k64_KBPS:
+        case M4VIDEOEDITING_k96_KBPS:
+        case M4VIDEOEDITING_k128_KBPS:
+        case M4VIDEOEDITING_k192_KBPS:
+        case M4VIDEOEDITING_k256_KBPS:
+        case M4VIDEOEDITING_k288_KBPS:
+        case M4VIDEOEDITING_k384_KBPS:
+        case M4VIDEOEDITING_k512_KBPS:
+        case M4VIDEOEDITING_k800_KBPS:
+/*+ New Encoder bitrates */
+        case M4VIDEOEDITING_k2_MBPS:
+        case M4VIDEOEDITING_k5_MBPS:
+        case M4VIDEOEDITING_k8_MBPS:
+/*- New Encoder bitrates */
+            EncParams.Bitrate = pC->m_Params.OutputVideoBitrate;
+            break;
+
+        case M4VIDEOEDITING_kVARIABLE_KBPS:
+/*+ New Encoder bitrates */
+            EncParams.Bitrate = M4VIDEOEDITING_k8_MBPS;
+/*- New Encoder bitrates */
+            break;
+
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+                           pC->m_Params.OutputVideoBitrate);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+    }
+
+    /**
+     * Video format */
+    switch(pC->m_Params.OutputVideoFormat)
+    {
+        case M4VIDEOEDITING_kMPEG4_EMP :
+        case M4VIDEOEDITING_kMPEG4 :
+            EncParams.Format    = M4ENCODER_kMPEG4;
+            break;
+        case M4VIDEOEDITING_kH263 :
+            EncParams.Format    = M4ENCODER_kH263;
+            break;
+        case M4VIDEOEDITING_kH264:
+            EncParams.Format    = M4ENCODER_kH264;
+            break;
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                           pC->m_Params.OutputVideoFormat);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+    }
+
+    /**
+     * Video frame rate (set it to max = 30 fps) */
+    EncParams.uiTimeScale = 30;
+    EncParams.uiRateFactor = 1;
+
+    EncParams.FrameRate = M4ENCODER_k30_FPS;
+
+
+    /******************************/
+    /******** 3GP out init ********/
+    /******************************/
+
+    /* Get the 3GPP writer interface */
+    err = M4WRITER_3GP_getInterfaces(&outputFileType, &pC->m_pWriterGlobInt, &pC->m_pWriterDataInt);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4WRITER_3GP_getInterfaces: M4WRITER_3GP_getInterfaces returns 0x%x", err);
+        return err;
+    }
+
+    /* Init the 3GPP writer */
+    err = pC->m_pWriterGlobInt->pFctOpen(&pC->m_p3gpWriterContext, pC->m_Params.pOutput3gppFile,
+        pC->pOsalFileWrite, pC->m_Params.pTemporaryFile, pC->pOsalFileRead);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctOpen returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     *  Link to the writer context in the writer interface */
+    pC->m_pWriterDataInt->pWriterContext = pC->m_p3gpWriterContext;
+
+    /**
+     *  Set the product description string in the written file */
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedString,
+        (M4OSA_DataOption)M4PTO3GPP_SIGNATURE);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
+                       pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     *  Set the product version in the written file */
+    uiVersion = M4VIDEOEDITING_VERSION_MAJOR*100 + M4VIDEOEDITING_VERSION_MINOR*10
+        + M4VIDEOEDITING_VERSION_REVISION;
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedVersion,
+        (M4OSA_DataOption)&uiVersion);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
+                       pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     * In case of EMP, we have to explicitely give an emp ftyp to the writer */
+    if(M4OSA_TRUE == bActivateEmp)
+    {
+        M4VIDEOEDITING_FtypBox ftyp;
+
+        ftyp.major_brand          = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.minor_version        = M4VIDEOEDITING_BRAND_0000;
+        ftyp.nbCompatibleBrands   = 2;
+        ftyp.compatible_brands[0] = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.compatible_brands[1] = M4VIDEOEDITING_BRAND_EMP;
+
+        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kSetFtypBox, (M4OSA_DataOption) &ftyp);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing:\
+                         m_pWriterGlobInt->pFctSetOption(M4WRITER_kSetFtypBox) returns 0x%x!", err);
+            return err;
+        }
+    }
+
+    /**
+     *  Allocate and fill the video stream structures for the writer */
+    pC->m_pWriterVideoStream =
+        (M4SYS_StreamDescription*)M4OSA_malloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
+        (M4OSA_Char *)"pWriterVideoStream");
+    if (M4OSA_NULL == pC->m_pWriterVideoStream)
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStream, \
+                       returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+    pC->m_pWriterVideoStreamInfo =
+        (M4WRITER_StreamVideoInfos*)M4OSA_malloc(sizeof(M4WRITER_StreamVideoInfos), M4PTO3GPP,
+        (M4OSA_Char *)"pWriterVideoStreamInfo");
+    if (M4OSA_NULL == pC->m_pWriterVideoStreamInfo)
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStreamInfo,\
+                       returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**
+     * Fill Video properties structure for the AddStream method */
+    pC->m_pWriterVideoStreamInfo->height        = EncParams.FrameHeight;
+    pC->m_pWriterVideoStreamInfo->width         = EncParams.FrameWidth;
+    pC->m_pWriterVideoStreamInfo->fps           = 0;        /**< Not used by the core writer */
+    pC->m_pWriterVideoStreamInfo->Header.pBuf   = M4OSA_NULL;
+    /** No header, will be set by setOption */
+    pC->m_pWriterVideoStreamInfo->Header.Size   = 0;
+
+    /**
+     *  Fill Video stream description structure for the AddStream method */
+    pC->m_pWriterVideoStream->streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+
+    /**
+     * Video format */
+    switch(pC->m_Params.OutputVideoFormat)
+    {
+        case M4VIDEOEDITING_kMPEG4_EMP:
+        case M4VIDEOEDITING_kMPEG4:
+            pC->m_pWriterVideoStream->streamType = M4SYS_kMPEG_4;   break;
+        case M4VIDEOEDITING_kH263:
+            pC->m_pWriterVideoStream->streamType = M4SYS_kH263;     break;
+        case M4VIDEOEDITING_kH264:
+            pC->m_pWriterVideoStream->streamType = M4SYS_kH264;     break;
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                           pC->m_Params.OutputVideoFormat);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+    }
+
+    /**
+     * Video bitrate */
+    switch(pC->m_Params.OutputVideoBitrate)
+    {
+        case M4VIDEOEDITING_k16_KBPS:
+        case M4VIDEOEDITING_k24_KBPS:
+        case M4VIDEOEDITING_k32_KBPS:
+        case M4VIDEOEDITING_k48_KBPS:
+        case M4VIDEOEDITING_k64_KBPS:
+        case M4VIDEOEDITING_k96_KBPS:
+        case M4VIDEOEDITING_k128_KBPS:
+        case M4VIDEOEDITING_k192_KBPS:
+        case M4VIDEOEDITING_k256_KBPS:
+        case M4VIDEOEDITING_k288_KBPS:
+        case M4VIDEOEDITING_k384_KBPS:
+        case M4VIDEOEDITING_k512_KBPS:
+        case M4VIDEOEDITING_k800_KBPS:
+/*+ New Encoder bitrates */
+        case M4VIDEOEDITING_k2_MBPS:
+        case M4VIDEOEDITING_k5_MBPS:
+        case M4VIDEOEDITING_k8_MBPS:
+/*- New Encoder bitrates */
+            pC->m_pWriterVideoStream->averageBitrate = pC->m_Params.OutputVideoBitrate;
+            break;
+
+        case M4VIDEOEDITING_kVARIABLE_KBPS :
+            pC->m_pWriterVideoStream->averageBitrate = 0;
+            break;
+
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+                           pC->m_Params.OutputVideoBitrate);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+    }
+
+    pC->m_pWriterVideoStream->duration                  = 0;        /**< Duration is not known */
+    pC->m_pWriterVideoStream->timeScale                 = 0;    /**< Not used by the core writer */
+    pC->m_pWriterVideoStream->maxBitrate                = pC->m_pWriterVideoStream->averageBitrate;
+    pC->m_pWriterVideoStream->profileLevel              = 0;    /**< Not used by the core writer */
+    pC->m_pWriterVideoStream->decoderSpecificInfo       = (M4OSA_MemAddr32)
+                                                            (pC->m_pWriterVideoStreamInfo);
+    pC->m_pWriterVideoStream->decoderSpecificInfoSize   = sizeof(M4WRITER_StreamVideoInfos);
+
+    /**
+     * Update AU properties for video stream */
+    pC->m_WriterVideoAU.CTS         = pC->m_WriterVideoAU.DTS = 0;  /** Reset time */
+    pC->m_WriterVideoAU.size        = 0;
+    pC->m_WriterVideoAU.frag        = M4OSA_NULL;
+    pC->m_WriterVideoAU.nbFrag      = 0;                            /** No fragment */
+    pC->m_WriterVideoAU.stream      = pC->m_pWriterVideoStream;
+    pC->m_WriterVideoAU.attribute   = AU_RAP;
+    pC->m_WriterVideoAU.dataAddress = M4OSA_NULL;
+
+    /**
+     *  If there is an audio input, allocate and fill the audio stream structures for the writer */
+    if(M4OSA_NULL != pC->m_pReaderAudioStream)
+    {
+        pC->m_pWriterAudioStream =
+            (M4SYS_StreamDescription*)M4OSA_malloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
+            (M4OSA_Char *)"pWriterAudioStream");
+        if (M4OSA_NULL == pC->m_pWriterAudioStream)
+        {
+            M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterAudioStream, \
+                           returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->m_pWriterAudioStreamInfo =
+            (M4WRITER_StreamAudioInfos*)M4OSA_malloc(sizeof(M4WRITER_StreamAudioInfos), M4PTO3GPP,
+            (M4OSA_Char *)"pWriterAudioStreamInfo");
+        if (M4OSA_NULL == pC->m_pWriterAudioStreamInfo)
+        {
+            M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate \
+                           pWriterAudioStreamInfo, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        pC->m_pWriterAudioStreamInfo->nbSamplesPerSec = 0; /**< unused by our shell writer */
+        pC->m_pWriterAudioStreamInfo->nbBitsPerSample = 0; /**< unused by our shell writer */
+        pC->m_pWriterAudioStreamInfo->nbChannels = 1;      /**< unused by our shell writer */
+
+        if( (M4OSA_NULL != pC->m_pReaderAudioStream) && /* audio could have been discarded */
+            (M4OSA_NULL != pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo) )
+        {
+            /* If we copy the stream from the input, we copy its DSI */
+            pC->m_pWriterAudioStreamInfo->Header.Size =
+                pC->m_pReaderAudioStream->m_basicProperties.m_decoderSpecificInfoSize;
+            pC->m_pWriterAudioStreamInfo->Header.pBuf =
+                (M4OSA_MemAddr8)pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo;
+        }
+        else
+        {
+            /* Writer will put a default DSI */
+            pC->m_pWriterAudioStreamInfo->Header.Size = 0;
+            pC->m_pWriterAudioStreamInfo->Header.pBuf = M4OSA_NULL;
+        }
+
+        /**
+         * Add the audio stream */
+        switch (pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+        {
+            case M4DA_StreamTypeAudioAmrNarrowBand:
+                pC->m_pWriterAudioStream->streamType = M4SYS_kAMR;
+                break;
+            case M4DA_StreamTypeAudioAac:
+                pC->m_pWriterAudioStream->streamType = M4SYS_kAAC;
+                break;
+            case M4DA_StreamTypeAudioEvrc:
+                pC->m_pWriterAudioStream->streamType = M4SYS_kEVRC;
+                break;
+            default:
+                M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unhandled audio format (0x%x),\
+                               returning ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+                               pC->m_pReaderAudioStream->m_basicProperties.m_streamType);
+                return ERR_PTO3GPP_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+        }
+
+        /*
+         * Fill Audio stream description structure for the AddStream method */
+        pC->m_pWriterAudioStream->streamID                  = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
+        pC->m_pWriterAudioStream->duration                  = 0;/**< Duration is not known yet */
+        pC->m_pWriterAudioStream->timeScale                 = M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE;
+        pC->m_pWriterAudioStream->profileLevel              = M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL;
+        pC->m_pWriterAudioStream->averageBitrate            =
+                                pC->m_pReaderAudioStream->m_basicProperties.m_averageBitRate;
+        pC->m_pWriterAudioStream->maxBitrate                =
+                                pC->m_pWriterAudioStream->averageBitrate;
+
+        /**
+         * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos \
+            in the DSI pointer... */
+        pC->m_pWriterAudioStream->decoderSpecificInfo =
+                    (M4OSA_MemAddr32)pC->m_pWriterAudioStreamInfo;
+
+        /**
+         * Update AU properties for audio stream */
+        pC->m_WriterAudioAU.CTS         = pC->m_WriterAudioAU.DTS = 0;  /** Reset time */
+        pC->m_WriterAudioAU.size        = 0;
+        pC->m_WriterAudioAU.frag        = M4OSA_NULL;
+        pC->m_WriterAudioAU.nbFrag      = 0;                            /** No fragment */
+        pC->m_WriterAudioAU.stream      = pC->m_pWriterAudioStream;
+        pC->m_WriterAudioAU.attribute   = AU_RAP;
+        pC->m_WriterAudioAU.dataAddress = M4OSA_NULL;
+    }
+
+    /************************************/
+    /******** Video Encoder Init ********/
+    /************************************/
+
+    /**
+     * PTO uses its own bitrate regulation, not the "true" core regulation */
+    EncParams.bInternalRegulation = M4OSA_TRUE; //M4OSA_FALSE;
+    EncParams.uiStartingQuantizerValue = M4PTO3GPP_QUANTIZER_STEP;
+
+    /**
+     * Other encoder settings */
+    if(M4OSA_TRUE == bActivateEmp)
+    {
+        EncParams.uiHorizontalSearchRange  = 15;            /* set value */
+        EncParams.uiVerticalSearchRange    = 15;            /* set value */
+        EncParams.bErrorResilience         = M4OSA_FALSE;   /* no error resilience */
+        EncParams.uiIVopPeriod             = 15;            /* one I frame every 15 frames */
+        EncParams.uiMotionEstimationTools  = 1;             /* M4V_MOTION_EST_TOOLS_NO_4MV */
+        EncParams.bAcPrediction            = M4OSA_FALSE;   /* no AC prediction */
+        EncParams.bDataPartitioning        = M4OSA_FALSE;   /* no data partitioning */
+    }
+    else
+    {
+        EncParams.uiHorizontalSearchRange  = 0;             /* use default */
+        EncParams.uiVerticalSearchRange    = 0;             /* use default */
+        EncParams.bErrorResilience         = M4OSA_FALSE;   /* no error resilience */
+        EncParams.uiIVopPeriod             = 15;             /* use default */
+        EncParams.uiMotionEstimationTools  = 0;             /* M4V_MOTION_EST_TOOLS_ALL */
+        EncParams.bAcPrediction            = M4OSA_TRUE;    /* use AC prediction */
+        EncParams.bDataPartitioning        = M4OSA_FALSE;   /* no data partitioning */
+    }
+
+    /**
+     * Create video encoder */
+    err = pC->m_pEncoderInt->pFctInit(&pC->m_pMp4EncoderContext, pC->m_pWriterDataInt,
+                                    M4PTO3GPP_applyVPP, pC, pC->m_pEncoderExternalAPI,
+                                    pC->m_pEncoderUserData);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctInit returns 0x%x", err);
+        return err;
+    }
+
+    pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
+
+    err = pC->m_pEncoderInt->pFctOpen(pC->m_pMp4EncoderContext, &pC->m_WriterVideoAU, &EncParams);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctOpen returns 0x%x", err);
+        return err;
+    }
+
+    pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
+
+    if (M4OSA_NULL != pC->m_pEncoderInt->pFctStart)
+    {
+        err = pC->m_pEncoderInt->pFctStart(pC->m_pMp4EncoderContext);
+
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctStart returns 0x%x", err);
+            return err;
+        }
+    }
+
+    pC->m_eEncoderState = M4PTO3GPP_kEncoderRunning;
+
+    /**
+     * No more  setoption on "M4ENCODER_kVideoFragmentSize" here.
+     * It is now automaticly and "smartly" set in the encoder shell. */
+
+    /**************************************/
+    /******** 3GP out add streams  ********/
+    /**************************************/
+
+    err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterVideoStream);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(video) returns\
+                       0x%x", err);
+        return err;
+    }
+
+    /**
+     * Set video max au size */
+    optionValue.streamID    = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+    optionValue.value = (M4OSA_UInt32)(1.5F * (M4OSA_Float)(pC->m_pWriterVideoStreamInfo->width
+                                                * pC->m_pWriterVideoStreamInfo->height)
+                                                * M4PTO3GPP_VIDEO_MIN_COMPRESSION_RATIO);
+    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxAUSize: %u",optionValue.value);
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                                (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
+                       M4WRITER_kMaxAUSize) returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     * Set video max chunck size */
+    optionValue.value = (M4OSA_UInt32)((M4OSA_Float)optionValue.value
+                        * M4PTO3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO);
+    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxChunckSize: %u",optionValue.value);
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                        (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
+                       M4WRITER_kMaxChunckSize) returns 0x%x", err);
+        return err;
+    }
+
+    if (M4OSA_NULL != pC->m_pReaderAudioStream)
+    {
+        err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterAudioStream);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(audio) \
+                           returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         * Set audio max au size */
+        optionValue.value       = M4PTO3GPP_AUDIO_MAX_AU_SIZE;
+        optionValue.streamID    = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
+        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
+                           M4WRITER_kMaxAUSize) returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         * Set audio max chunck size */
+        optionValue.value = M4PTO3GPP_AUDIO_MAX_CHUNK_SIZE; /**< Magical */
+        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                        (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
+                           M4WRITER_kMaxChunckSize) returns 0x%x", err);
+            return err;
+        }
+    }
+
+    /*
+     * Close the stream registering in order to be ready to write data */
+    err = pC->m_pWriterGlobInt->pFctStartWriting(pC->m_p3gpWriterContext);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctStartWriting returns 0x%x",
+                        err);
+        return err;
+    }
+
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Ready4Processing: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                            M4WRITER_Context* pWriterContext,
+                                      M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
+ * @brief   Write an AMR 12.2kbps silence FRAME into the writer
+ * @note    Mainly used to fix the 'bzz' bug...
+ * @param   pWriterDataIntInterface (IN)    writer data interfaces
+ *          pWriterContext          (IN/OUT)writer context
+ *          pWriterAudioAU          (OUT)   writer audio access unit
+ *          mtIncCts                (IN)    writer CTS
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+*/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                                                   M4WRITER_Context* pWriterContext,
+                                                    M4SYS_AccessUnit* pWriterAudioAU,
+                                                    M4OSA_Time mtIncCts)
+{
+    M4OSA_ERR err;
+
+    err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+                                        pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pStartAU(audio) returns \
+                                                    0x%x!", err);
+        return err;
+    }
+
+    M4OSA_memcpy((M4OSA_MemAddr8)pWriterAudioAU->dataAddress,
+     (M4OSA_MemAddr8)M4PTO3GPP_AMR_AU_SILENCE_122_FRAME, M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE);
+    pWriterAudioAU->size    = M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE;
+    pWriterAudioAU->CTS     = mtIncCts;
+    pWriterAudioAU->nbFrag  = 0;
+
+    err = pWriterDataIntInterface->pProcessAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+                                                pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pProcessAU(silence) \
+                       returns 0x%x!", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                                        M4WRITER_Context* pWriterContext,
+                                      M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
+ * @brief   Write an AMR 12.2kbps silence FRAME into the writer
+ * @note    Mainly used to fix the 'bzz' bug...
+ * @param   pWriterDataIntInterface (IN)    writer data interfaces
+ *          pWriterContext          (IN/OUT)writer context
+ *          pWriterAudioAU          (OUT)   writer audio access unit
+ *          mtIncCts                (IN)    writer CTS
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+*/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                                                   M4WRITER_Context* pWriterContext,
+                                                M4SYS_AccessUnit* pWriterAudioAU,
+                                                M4OSA_Time mtIncCts)
+{
+    M4OSA_ERR err;
+
+    err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+                                                        pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: pWriterDataInt->pStartAU(audio)\
+                       returns 0x%x!", err);
+        return err;
+    }
+
+    M4OSA_memcpy((M4OSA_MemAddr8)pWriterAudioAU->dataAddress,
+                (M4OSA_MemAddr8)M4PTO3GPP_AMR_AU_SILENCE_048_FRAME,
+                M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
+    pWriterAudioAU->size    = M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+    pWriterAudioAU->CTS     = mtIncCts;
+    pWriterAudioAU->nbFrag  = 0;
+
+    err = pWriterDataIntInterface->pProcessAU(pWriterContext,
+                    M4PTO3GPP_WRITER_AUDIO_STREAM_ID, pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: \
+                       pWriterDataInt->pProcessAU(silence) returns 0x%x!", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+M4OSA_ERR M4PTO3GPP_RegisterExternalVideoEncoder(M4PTO3GPP_Context pContext,
+                                     M4VE_EncoderType encoderType,
+                                     M4VE_Interface*    pEncoderInterface,
+                                     M4OSA_Void* pUserData)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+    switch (encoderType)
+    {
+        case M4VE_kMpeg4VideoEnc:
+        case M4VE_kH263VideoEnc:
+            /* OK */
+        break;
+
+        case M4VE_kH264VideoEnc:
+            M4OSA_TRACE1_0("M4PTO3GPP_RegisterExternalVideoEncoder: \
+                           H264 encoder type not implemented yet");
+            return M4ERR_NOT_IMPLEMENTED;
+        break;
+
+        default:
+            M4OSA_TRACE1_1("M4PTO3GPP_RegisterExternalVideoEncoder:\
+                           unknown encoderType %d", encoderType);
+            return M4ERR_PARAMETER;
+        break;
+    }
+
+    pC->registeredExternalEncs[encoderType].pEncoderInterface = pEncoderInterface;
+    pC->registeredExternalEncs[encoderType].pUserData = pUserData;
+    pC->registeredExternalEncs[encoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW encoder that may already have been registered for this type;
+    this is normal. */
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c b/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
new file mode 100755
index 0000000..bcbfaf0
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4PTO3GPP_VideoPreProcessing.c
+ * @brief   Picture to 3gpp Service video preprocessing management.
+ ******************************************************************************
+ */
+
+/**
+ *    OSAL Debug utilities */
+#include "M4OSA_Debug.h"
+
+/**
+ *    OSAL Memory management */
+#include "M4OSA_Memory.h"
+
+/**
+ *    Definition of the M4PTO3GPP internal context */
+#include "M4PTO3GPP_InternalTypes.h"
+
+/**
+ *    Definition of the M4PTO3GPP errors */
+#include "M4PTO3GPP_ErrorCodes.h"
+
+/* If time increment is too low then we have an infinite alloc loop into M4ViEncCaptureFrame() */
+/* Time increment should match 30 fps maximum */
+#define M4PTO3GPP_MIN_TIME_INCREMENT 33.3333334
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ *                                 M4VIFI_ImagePlane* pPlaneOut)
+ * @brief    Call an external callback to get the picture to encode
+ * @note    It is called by the video encoder
+ * @param    pContext    (IN) VPP context, which actually is the M4PTO3GPP internal context
+ *                            in our case
+ * @param    pPlaneIn    (IN) Contains the image
+ * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the
+ *                        output YUV420 image read with the m_pPictureCallbackFct
+ * @return    M4NO_ERROR:    No error
+ * @return    Any error returned by an underlaying module
+ ******************************************************************************
+ */
+/******************************************************/
+M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+                             M4VIFI_ImagePlane* pPlaneOut)
+/******************************************************/
+{
+    M4OSA_ERR    err;
+    M4OSA_Double mtDuration;
+    M4OSA_UInt32 i;
+
+    /*** NOTE ***/
+    /* It's OK to get pPlaneIn == M4OSA_NULL here                        */
+    /* since it has been given NULL in the pFctEncode() call.            */
+    /* It's because we use the M4PTO3GPP internal context to            */
+    /* transmit the encoder input data.                                    */
+    /* The input data is the image read from the m_pPictureCallbackFct    */
+
+    /**
+     *    The VPP context is actually the M4PTO3GPP context! */
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+    /**
+    *  Get the picture to encode */
+    if (M4OSA_FALSE == pC->m_bLastInternalCallBack)
+    {
+        err = pC->m_Params.pPictureCallbackFct(pC->m_Params.pPictureCallbackCtxt, pPlaneOut,
+             &mtDuration);
+
+        /* In case of error when getting YUV to encode (ex: error when decoding a JPEG) */
+        if((M4NO_ERROR != err) && (((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) != err))
+        {
+            return err;
+        }
+
+        /**
+         * If end of encoding is asked by the size limitation system,
+         * we must end the encoding the same way that when it is asked by the
+         * picture callback (a.k.a. the integrator).
+         * Thus we simulate the LastPicture code return: */
+        if (M4OSA_TRUE == pC->m_IsLastPicture)
+        {
+            err = M4PTO3GPP_WAR_LAST_PICTURE;
+        }
+
+        if(((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) == err)
+        {
+            pC->m_bLastInternalCallBack = M4OSA_TRUE; /* Toggle flag for the final call of the CB*/
+            pC->m_IsLastPicture         = M4OSA_TRUE; /* To stop the encoder */
+            pC->pSavedPlane             = pPlaneOut;  /* Save the last YUV plane ptr */
+            pC->uiSavedDuration         = (M4OSA_UInt32)mtDuration; /* Save the last duration */
+        }
+    }
+    else
+    {
+        /**< Not necessary here because the last frame duration is set to the-last-but-one by
+                the light writer */
+        /**< Only necessary for pC->m_mtNextCts below...*/
+        mtDuration = pC->uiSavedDuration;
+
+
+        /** Copy the last YUV plane into the current one
+         * (the last pic is splited due to the callback extra-call... */
+        for (i=0; i<3; i++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[i].pac_data,
+                 (M4OSA_MemAddr8)pC->pSavedPlane[i].pac_data,
+                     pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
+        }
+    }
+
+    /* TimeIncrement should be 30 fps maximum */
+    if(mtDuration < M4PTO3GPP_MIN_TIME_INCREMENT)
+    {
+        mtDuration = M4PTO3GPP_MIN_TIME_INCREMENT;
+    }
+
+    pC->m_mtNextCts += mtDuration;
+
+    M4OSA_TRACE3_0("M4PTO3GPP_applyVPP: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4READER_Amr.c b/libvideoeditor/vss/src/M4READER_Amr.c
new file mode 100755
index 0000000..32bf9cf
--- /dev/null
+++ b/libvideoeditor/vss/src/M4READER_Amr.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ************************************************************************
+ * @file   M4READER_Amr.c
+ * @brief  Generic encapsulation of the core amr reader
+ * @note   This file implements the generic M4READER interface
+ *         on top of the AMR reader
+ ************************************************************************
+*/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+
+#include "M4_Utils.h"
+
+#include "M4AMRR_CoreReader.h"
+#include "M4READER_Amr.h"
+
+/**
+ ************************************************************************
+ * structure    M4READER_AMR_Context
+ * @brief       This structure defines the internal context of a amr reader instance
+ * @note        The context is allocated and de-allocated by the reader
+ ************************************************************************
+*/
+typedef struct _M4READER_AMR_Context
+{
+    M4OSA_Context           m_pCoreContext;     /**< core amr reader context */
+    M4_AudioStreamHandler*  m_pAudioStream;     /**< pointer on the audio stream
+                                                 description returned by the core */
+    M4SYS_AccessUnit        m_audioAu;          /**< audio access unit to be filled by the core */
+    M4OSA_Time              m_maxDuration;      /**< duration of the audio stream */
+    M4OSA_FileReadPointer*    m_pOsaFileReaderFcts;    /**< OSAL file read functions */
+
+} M4READER_AMR_Context;
+
+
+/**
+ ************************************************************************
+ * @brief    create an instance of the reader
+ * @note     allocates the context
+ * @param    pContext:        (OUT)    pointer on a reader context
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_ALLOC                a memory allocation has failed
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_create(M4OSA_Context *pContext)
+{
+    M4READER_AMR_Context* pReaderContext;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_create: invalid context pointer");
+
+    pReaderContext = (M4READER_AMR_Context*)M4OSA_malloc(sizeof(M4READER_AMR_Context),
+         M4READER_AMR, (M4OSA_Char *)"M4READER_AMR_Context");
+    if (pReaderContext == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    pReaderContext->m_pAudioStream  = M4OSA_NULL;
+    pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
+    M4OSA_INT64_FROM_INT32(pReaderContext->m_maxDuration, 0);
+    pReaderContext->m_pCoreContext = M4OSA_NULL;
+    pReaderContext->m_pOsaFileReaderFcts = M4OSA_NULL;
+
+    *pContext = pReaderContext;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief    destroy the instance of the reader
+ * @note     after this call the context is invalid
+ *
+ * @param    context:        (IN)    Context of the reader
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_destroy(M4OSA_Context context)
+{
+    M4READER_AMR_Context*   pC=(M4READER_AMR_Context*)context;
+
+    /* Check function parameters*/
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_AMR_destroy: invalid context pointer");
+
+    /**
+     *    Check input parameter */
+    if (M4OSA_NULL == pC)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_destroy(): M4READER_AMR_destroy: context is M4OSA_NULL,\
+             returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)pC);
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief    open the reader and initializes its created instance
+ * @note     this function opens the AMR file
+ * @param    context:            (IN)    Context of the reader
+ * @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying the media to open
+ * @return    M4NO_ERROR                     there is no error
+ * @return    M4ERR_PARAMETER                the context is NULL
+ * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
+{
+    M4READER_AMR_Context*    pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR                err;
+
+    /* Check function parameters*/
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),              M4ERR_PARAMETER,
+         "M4READER_AMR_open: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+         "M4READER_AMR_open: invalid pointer pFileDescriptor");
+
+    err = M4AMRR_openRead( &pC->m_pCoreContext, pFileDescriptor, pC->m_pOsaFileReaderFcts);
+
+    return err;
+}
+
+
+
+/**
+ ************************************************************************
+ * @brief    close the reader
+ * @note
+ * @param    context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            the context is NULL
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR   M4READER_AMR_close(M4OSA_Context context)
+{
+    M4READER_AMR_Context*    pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR                err;
+    M4AMRR_State State;
+
+    /* Check function parameters*/
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_AMR_close: invalid context pointer");
+
+    /**
+     *    Check input parameter */
+    if (M4OSA_NULL == pC)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_close(): M4READER_AMR_close: context is M4OSA_NULL,\
+             returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    if (M4OSA_NULL != pC->m_pAudioStream)
+    {
+        err = M4AMRR_getState(pC->m_pCoreContext, &State,
+                ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId);
+        if(M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_0("M4READER_AMR_close: error when calling M4AMRR_getState\n");
+            return err;
+        }
+
+        if (M4AMRR_kReading_nextAU == State)
+        {
+            err = M4AMRR_freeAU(pC->m_pCoreContext,
+                ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId,  &pC->m_audioAu);
+            if (err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_0("M4READER_AMR_close: error when freeing access unit\n");
+                return err;
+            }
+        }
+
+        /* Delete the DSI if needed */
+        if(M4OSA_NULL != pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo)
+        {
+            M4OSA_free((M4OSA_MemAddr32)\
+                pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo);
+
+            pC->m_pAudioStream->m_basicProperties.m_decoderSpecificInfoSize = 0;
+            pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
+        }
+
+        /* Finally destroy the stream handler */
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioStream);
+        pC->m_pAudioStream = M4OSA_NULL;
+    }
+
+    if (M4OSA_NULL != pC->m_pCoreContext)
+    {
+        err = M4AMRR_closeRead(pC->m_pCoreContext);
+        pC->m_pCoreContext = M4OSA_NULL;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    Get the next stream found in the media
+ * @note    current version needs to translate M4SYS_Stream to M4_StreamHandler
+ *
+ * @param    context:        (IN)   Context of the reader
+ * @param    pMediaFamily:   (OUT)  pointer to a user allocated M4READER_MediaFamily
+ *                                  that will be filled with the media family of the found stream
+ * @param    pStreamHandler: (OUT)  pointer to a stream handler that will be
+ *                                  allocated and filled with the found stream description
+ *
+ * @return    M4NO_ERROR            there is no error
+ * @return    M4WAR_NO_MORE_STREAM  no more available stream in the media (all streams found)
+ * @return    M4ERR_PARAMETER       at least one parameter is not properly set (in DEBUG mode only)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
+                                     M4_StreamHandler **pStreamHandlerParam)
+{
+    M4READER_AMR_Context*   pC=(M4READER_AMR_Context*)context;
+    M4OSA_ERR               err;
+    M4SYS_StreamID          streamIdArray[2];
+    M4SYS_StreamDescription streamDesc;
+    M4_AudioStreamHandler*  pAudioStreamHandler;
+    M4_StreamHandler*       pStreamHandler;
+
+    M4OSA_DEBUG_IF1((pC == 0),                  M4ERR_PARAMETER,
+                "M4READER_AMR_getNextStream: invalid context");
+    M4OSA_DEBUG_IF1((pMediaFamily == 0),        M4ERR_PARAMETER,
+                "M4READER_AMR_getNextStream: invalid pointer to MediaFamily");
+    M4OSA_DEBUG_IF1((pStreamHandlerParam == 0), M4ERR_PARAMETER,
+                "M4READER_AMR_getNextStream: invalid pointer to StreamHandler");
+
+    err = M4AMRR_getNextStream( pC->m_pCoreContext, &streamDesc);
+    if (err == M4WAR_NO_MORE_STREAM)
+    {
+        streamIdArray[0] = 0;
+        streamIdArray[1] = 0;
+        err = M4AMRR_startReading(pC->m_pCoreContext, streamIdArray);
+        if ((M4OSA_UInt32)M4ERR_ALLOC == err)
+        {
+            M4OSA_TRACE2_0("M4READER_AMR_getNextStream: M4AMRR_startReading returns M4ERR_ALLOC!");
+            return err;
+        }
+        return M4WAR_NO_MORE_STREAM;
+    }
+    else if (err != M4NO_ERROR)
+    {
+        return err;
+    }
+
+    *pMediaFamily = M4READER_kMediaFamilyAudio;
+
+    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_malloc(sizeof(M4_AudioStreamHandler),
+                        M4READER_AMR, (M4OSA_Char *)"M4_AudioStreamHandler");
+    if (pAudioStreamHandler == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+    pStreamHandler =(M4_StreamHandler*)(pAudioStreamHandler);
+    *pStreamHandlerParam = pStreamHandler;
+    pC->m_pAudioStream = pAudioStreamHandler;
+
+    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+
+    /*
+     * Audio stream handler fields are initialised with 0 value.
+     * They will be properly set by the AMR decoder
+     */
+    pAudioStreamHandler->m_samplingFrequency = 0;
+    pAudioStreamHandler->m_byteFrameLength   = 0;
+    pAudioStreamHandler->m_byteSampleSize    = 0;
+    pAudioStreamHandler->m_nbChannels        = 0;
+
+    pStreamHandler->m_pDecoderSpecificInfo    = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+    pStreamHandler->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
+    pStreamHandler->m_streamId                = streamDesc.streamID;
+ // M4OSA_INT64_FROM_DOUBLE(pStreamHandler->m_duration,
+ // (M4OSA_Double)(((M4OSA_Float)streamDesc.duration*1000/(M4OSA_Float)(streamDesc.timeScale))));
+    pStreamHandler->m_duration                = streamDesc.duration;
+    pStreamHandler->m_pUserData               = (void*)streamDesc.timeScale; /*trick to change*/
+
+    if (M4OSA_TIME_COMPARE(streamDesc.duration, pC->m_maxDuration) > 0)
+    {
+        M4OSA_TIME_SET(pC->m_maxDuration, streamDesc.duration);
+    }
+    pStreamHandler->m_averageBitRate          = streamDesc.averageBitrate;
+
+    M4AMRR_getmaxAUsize(pC->m_pCoreContext, &pStreamHandler->m_maxAUSize);
+
+    switch (streamDesc.streamType)
+    {
+    case M4SYS_kAMR:
+        pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrNarrowBand;
+        break;
+    case M4SYS_kAMR_WB:
+        pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrWideBand;
+        break;
+    default:
+        break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    fill the access unit structure with initialization values
+ * @note
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler: (IN)     pointer to the stream handler to
+ *                                    which the access unit will be associated
+ * @param    pAccessUnit:    (IN/OUT) pointer to the access unit (allocated by the caller)
+ *                                      to initialize
+ *
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                     M4_AccessUnit *pAccessUnit)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4SYS_AccessUnit*       pAu;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_AMR_fillAuStruct: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_fillAuStruct: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_AMR_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_fillAuStruct: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    pAu->dataAddress = M4OSA_NULL;
+    pAu->size        = 0;
+    /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
+    pAu->CTS         = -20;
+    pAu->DTS         = -20;
+    pAu->attribute   = 0;
+    pAu->nbFrag      = 0;
+
+    pAccessUnit->m_size         = 0;
+    /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
+    pAccessUnit->m_CTS          = -20;
+    pAccessUnit->m_DTS          = -20;
+    pAccessUnit->m_attribute    = 0;
+    pAccessUnit->m_dataAddress  = M4OSA_NULL;/*pBuffer;*/
+    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
+    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
+    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief    get an option value from the reader
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to retrieve a property value:
+ *          - the duration of the longest stream of the media
+ *          - the version number of the reader (not implemented yet)
+ *
+ * @param    context:        (IN)    Context of the reader
+ * @param    optionId:        (IN)    indicates the option to get
+ * @param    pValue:            (OUT)    pointer to structure or value (allocated by user)
+ *                                       where option is stored
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getOption(M4OSA_Context context, M4OSA_OptionID optionId,
+                                 M4OSA_DataOption pValue)
+
+{
+    M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER, "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
+
+    switch(optionId)
+    {
+    case M4READER_kOptionID_Duration :
+        {
+            M4OSA_TIME_SET(*(M4OSA_Time*)pValue, pC->m_maxDuration);
+        }
+        break;
+
+    case M4READER_kOptionID_Bitrate:
+        {
+            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+            if (M4OSA_NULL != pC->m_pAudioStream)
+            {
+                *pBitrate = pC->m_pAudioStream->m_basicProperties.m_averageBitRate;
+            }
+            else
+            {
+                pBitrate = 0;
+                err = M4ERR_PARAMETER;
+            }
+
+        }
+        break;
+    case M4READER_kOptionID_Version:
+        {
+            err = M4AMRR_getVersion((M4_VersionInfo*)pValue);
+        }
+        break;
+
+    default :
+        {
+            err = M4ERR_PARAMETER;
+        }
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the readder
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to set a property value:
+ *          - the OSAL file read functions
+ *
+ * @param   context:    (IN)        Context of the decoder
+ * @param   optionId:   (IN)        Identifier indicating the option to set
+ * @param   pValue:     (IN)        Pointer to structure or value (allocated by user)
+ *                                  where option is stored
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_PARAMETER         The option parameter is invalid
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_setOption(M4OSA_Context context, M4OSA_OptionID optionId,
+                                 M4OSA_DataOption pValue)
+{
+    M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER, "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
+
+    switch(optionId)
+    {
+    case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
+        {
+            pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
+        }
+        break;
+    default :
+        {
+            err = M4ERR_PARAMETER;
+        }
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    reset the stream, that is seek it to beginning and make it ready to be read
+ * @note    this function is to be deprecated in next versions
+ *
+ * @param    context:        (IN)    Context of the reader
+ * @param    pStreamHandler    (IN)    The stream handler of the stream to reset
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                there is no more memory available
+ * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
+ * @return    M4ERR_STATE    this function cannot be called now
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4WAR_INVALID_TIME        beginning of the stream can not be reached
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64;
+    M4AMRR_State            State;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
+
+    M4OSA_INT64_FROM_INT32(time64, 0);
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_reset: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+    if (M4AMRR_kReading_nextAU == State)
+    {
+        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_AMR_reset: error when freeing access unit\n");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    err = M4NO_ERROR;
+
+    /* for reset during playback */
+    /* (set CTS to -20 in order the first AU CTS is 0) */
+    pAu->CTS = -20;
+    pAu->DTS = -20;
+
+    err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_reset: error when calling M4AMRR_seek()\n");
+        return err;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    jump into the stream at the specified time
+ * @note
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler    (IN)     the stream description of the stream to make jump
+ * @param    pTime            (IN/OUT) IN:  the time to jump to (in ms)
+ *                                     OUT: the time to which the stream really jumped
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                there is no more memory available
+ * @return    M4WAR_INVALID_TIME        the time can not be reached
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                             M4OSA_Int32* pTime)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64;
+    M4OSA_Double            timeDouble; /*used for type conversion only*/
+    M4AMRR_State            State;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_3GP_jump: invalid time pointer");
+
+    M4OSA_INT64_FROM_INT32(time64, *pTime);
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_jump: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+    if (M4AMRR_kReading_nextAU == State)
+    {
+        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_AMR_jump: error when freeing access unit\n");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    pAu->CTS = time64;
+    pAu->DTS = time64;
+    err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kNoRAPprevious, &time64);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_jump: error when calling M4AMRR_seek()\n");
+        return err;
+    }
+
+    M4OSA_INT64_TO_DOUBLE(timeDouble, time64);
+    *pTime = (M4OSA_Int32)timeDouble;
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Gets an access unit (AU) from the stream handler source.
+ * @note    An AU is the smallest possible amount of data to be decoded by a decoder (audio/video).
+ *          In the current version, we need to translate M4OSA_AccessUnit to M4_AccessUnit
+ *
+ * @param    context:        (IN)        Context of the reader
+ * @param    pStreamHandler  (IN)        The stream handler of the stream to make jump
+ * @param    pAccessUnit     (IN/OUT)    Pointer to an access unit to fill with read data (the au
+                                         structure is allocated by the user, and must be
+                                         initialized by calling M4READER_fillAuStruct_fct after
+                                         creation)
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_BAD_CONTEXT       provided context is not a valid one
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set
+ * @return    M4ERR_ALLOC             memory allocation failed
+ * @return    M4ERR_BAD_STREAM_ID     at least one of the stream Id. does not exist.
+ * @return    M4WAR_NO_MORE_AU        there are no more access unit in the stream (end of stream)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                M4_AccessUnit *pAccessUnit)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR               err = M4NO_ERROR;
+    M4SYS_AccessUnit*       pAu;
+    M4_MediaTime            timeScale;
+    M4AMRR_State            State;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_AMR_getNextAu: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_getNextAu: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_AMR_getNextAu: invalid pointer to M4_AccessUnit");
+
+    /* keep trace of the allocated buffers in AU to be able to free them at destroy()
+       but be aware that system is risky and would need upgrade if more than
+       one video and one audio AU is needed */
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_getNextAu: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+    if (M4AMRR_kReading_nextAU == State)
+    {
+        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_AVI_getNextAu: error when freeing access unit\n");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    pAu->nbFrag = 0;
+    err = M4AMRR_nextAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+
+    if (err == M4NO_ERROR)
+    {
+        timeScale = (M4OSA_Float)(M4OSA_Int32)(pStreamHandler->m_pUserData)/1000;
+        pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
+        pAccessUnit->m_size = pAu->size;
+        pAccessUnit->m_CTS  = (M4_MediaTime)pAu->CTS/*/timeScale*/;
+        pAccessUnit->m_DTS  = (M4_MediaTime)pAu->DTS/*/timeScale*/;
+        pAccessUnit->m_attribute = pAu->attribute;
+    }
+    else
+    {
+        pAccessUnit->m_size=0;
+    }
+
+    return err;
+}
+
+/**
+*************************************************************************
+* @brief Retrieves the generic interfaces implemented by the reader
+*
+* @param pMediaType          : Pointer on a M4READER_MediaType (allocated by the caller)
+*                              that will be filled with the media type supported by this reader
+* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface implemented
+*                              by this reader. The interface is a structure allocated by the function and must
+*                              be un-allocated by the caller.
+* @param pRdrDataInterface   : Address of a pointer that will be set to the data interface implemented
+*                              by this reader. The interface is a structure allocated by the function and must
+*                              be un-allocated by the caller.
+*
+* @returns : M4NO_ERROR     if OK
+*            ERR_ALLOC      if an allocation failed
+*            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
+*************************************************************************
+*/
+M4OSA_ERR   M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+                                         M4READER_GlobalInterface **pRdrGlobalInterface,
+                                         M4READER_DataInterface **pRdrDataInterface)
+{
+    M4OSA_DEBUG_IF1((pMediaType == 0),          M4ERR_PARAMETER,
+         "M4READER_AMR_getInterfaces: invalid pointer to MediaType");
+    M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_getInterfaces: invalid pointer to M4READER_GlobalInterface");
+    M4OSA_DEBUG_IF1((pRdrDataInterface == 0),   M4ERR_PARAMETER,
+         "M4READER_AMR_getInterfaces: invalid pointer to M4READER_DataInterface");
+
+    *pRdrGlobalInterface =
+         (M4READER_GlobalInterface*)M4OSA_malloc( sizeof(M4READER_GlobalInterface),
+             M4READER_AMR, (M4OSA_Char *)"M4READER_GlobalInterface" );
+    if (M4OSA_NULL == *pRdrGlobalInterface)
+    {
+        *pRdrDataInterface = M4OSA_NULL;
+        return M4ERR_ALLOC;
+    }
+    *pRdrDataInterface = (M4READER_DataInterface*)M4OSA_malloc( sizeof(M4READER_DataInterface),
+         M4READER_AMR, (M4OSA_Char *)"M4READER_DataInterface");
+    if (M4OSA_NULL == *pRdrDataInterface)
+    {
+        M4OSA_free((M4OSA_MemAddr32)*pRdrGlobalInterface);
+        *pRdrGlobalInterface = M4OSA_NULL;
+        return M4ERR_ALLOC;
+    }
+
+    *pMediaType = M4READER_kMediaTypeAMR;
+
+    (*pRdrGlobalInterface)->m_pFctCreate           = M4READER_AMR_create;
+    (*pRdrGlobalInterface)->m_pFctDestroy          = M4READER_AMR_destroy;
+    (*pRdrGlobalInterface)->m_pFctOpen             = M4READER_AMR_open;
+    (*pRdrGlobalInterface)->m_pFctClose            = M4READER_AMR_close;
+    (*pRdrGlobalInterface)->m_pFctGetOption        = M4READER_AMR_getOption;
+    (*pRdrGlobalInterface)->m_pFctSetOption        = M4READER_AMR_setOption;
+    (*pRdrGlobalInterface)->m_pFctGetNextStream    = M4READER_AMR_getNextStream;
+    (*pRdrGlobalInterface)->m_pFctFillAuStruct     = M4READER_AMR_fillAuStruct;
+    (*pRdrGlobalInterface)->m_pFctStart            = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctStop             = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctJump             = M4READER_AMR_jump;
+    (*pRdrGlobalInterface)->m_pFctReset            = M4READER_AMR_reset;
+    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime   = M4OSA_NULL; /*all AUs are RAP*/
+
+    (*pRdrDataInterface)->m_pFctGetNextAu          = M4READER_AMR_getNextAu;
+
+    (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4READER_Pcm.c b/libvideoeditor/vss/src/M4READER_Pcm.c
new file mode 100755
index 0000000..5604983
--- /dev/null
+++ b/libvideoeditor/vss/src/M4READER_Pcm.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4READER_Wav.c
+ * @brief  Generic encapsulation of the core pcm reader
+ * @note   This file implements the generic M4READER interface
+ *         on top of the PCM reader
+ ************************************************************************
+*/
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4PCMR_CoreReader.h"
+#include "M4READER_Pcm.h"
+/**
+ ************************************************************************
+ * structure    M4READER_WAV_Context
+ * @brief       This structure defines the internal context of a wav reader instance
+ * @note        The context is allocated and de-allocated by the reader
+ ************************************************************************
+ */
+typedef struct _M4READER_PCM_Context
+{
+    M4OSA_Context           m_coreContext;        /**< core wav reader context */
+    M4_StreamHandler*       m_pAudioStream;       /**< pointer on the audio stream description
+                                                        returned by the core */
+    M4SYS_AccessUnit        m_audioAu;            /**< audio access unit to be filled by the core */
+    M4OSA_FileReadPointer*  m_pOsaFileReaderFcts; /**< OSAL file read functions */
+
+} M4READER_PCM_Context;
+
+
+/**
+ ************************************************************************
+ * @brief   Creates a wav reader instance
+ * @note    allocates the context
+ * @param   pContext:            (OUT)  Pointer to a wav reader context
+ * @return  M4NO_ERROR:                 there is no error
+ * @return  M4ERR_ALLOC:                a memory allocation has failed
+ * @return  M4ERR_PARAMETER:            at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_create(M4OSA_Context* pContext)
+{
+    M4READER_PCM_Context*   pReaderContext;
+
+    M4OSA_DEBUG_IF1((pContext == 0),       M4ERR_PARAMETER,
+         "M4READER_PCM_create: invalid context pointer");
+
+    pReaderContext = (M4READER_PCM_Context*)M4OSA_malloc(sizeof(M4READER_PCM_Context),
+         M4READER_WAV, (M4OSA_Char *)"M4READER_PCM_Context");
+    if (pReaderContext == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    pReaderContext->m_coreContext         = M4OSA_NULL;
+    pReaderContext->m_pAudioStream        = M4OSA_NULL;
+    pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
+    pReaderContext->m_pOsaFileReaderFcts  = M4OSA_NULL;
+
+    *pContext = pReaderContext;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Destroy the instance of the reader
+ * @note    the context is un-allocated
+ * @param   context:         (IN) context of the network reader
+ * @return  M4NO_ERROR:           there is no error
+ * @return  M4ERR_PARAMETER:      at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_destroy(M4OSA_Context context)
+{
+    M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_PCM_destroy: invalid context pointer");
+
+    M4OSA_free((M4OSA_MemAddr32)pC);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Initializes the reader instance
+ * @param   context:           (IN)    context of the network reader
+ * @param   pFileDescriptor:   (IN)    Pointer to proprietary data identifying the media to open
+ * @return  M4NO_ERROR:                there is no error
+ * @return  M4ERR_PARAMETER:           at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_PCM_open: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor),   M4ERR_PARAMETER,
+         "M4READER_PCM_open: invalid pointer pFileDescriptor");
+
+    err = M4PCMR_openRead(&(pC->m_coreContext), (M4OSA_Char*)pFileDescriptor,
+         pC->m_pOsaFileReaderFcts);
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief     close the reader
+ * @note
+ * @param     context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            the context is NULL
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_close(M4OSA_Context context)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_PCM_close: invalid context pointer");
+
+    /* Free audio AU and audio stream */
+    if (M4OSA_NULL != pC->m_pAudioStream)
+    {
+        if (M4OSA_NULL != pC->m_audioAu.dataAddress)
+        {
+            err = M4PCMR_freeAU(pC->m_coreContext, pC->m_pAudioStream->m_streamId,
+                 &pC->m_audioAu);
+            if (err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_0("M4READER_PCM_close: Error when freeing audio access unit");
+                return err;
+            }
+        }
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioStream);
+        pC->m_pAudioStream = M4OSA_NULL;
+    }
+
+
+    if (M4OSA_NULL != pC->m_coreContext)
+    {
+        /* Close tha PCM file */
+       err = M4PCMR_closeRead(pC->m_coreContext);
+       pC->m_coreContext = M4OSA_NULL;
+    }
+
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the reader
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to set a property value:
+ * @param    context:        (IN)    Context of the reader
+ * @param    optionId:       (IN)    indicates the option to set
+ * @param    pValue:         (IN)    pointer to structure or value (allocated by user)
+ *                                    where option is stored
+ *
+ * @return    M4NO_ERROR             there is no error
+ * @return    M4ERR_BAD_CONTEXT      provided context is not a valid one
+ * @return    M4ERR_PARAMETER        at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID    when the option ID is not a valid one
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_setOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
+{
+    M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER,
+         "M4READER_PCM_setOption: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+         "M4READER_PCM_setOption: invalid value pointer");
+
+    switch(optionId)
+    {
+    case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
+        {
+            pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
+        }
+        break;
+    default :
+        {
+            err = M4ERR_PARAMETER;
+        }
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Retrieves the an option value from the reader, given an option ID.
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to retrieve a property value:
+ *
+ * @param   context:  (IN) context of the network reader
+ * @param   optionId: (IN) option identificator whose option value is to be retrieved.
+ * @param   pValue:  (OUT) option value retrieved.
+ *
+ * @return  M4NO_ERROR:          there is no error
+ * @return  M4ERR_PARAMETER:     at least one parameter is not properly set (in DEBUG only)
+ * @return  M4ERR_BAD_OPTION_ID: the required option identificator is unknown
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
+{
+    M4READER_PCM_Context*   pContext = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err      = M4NO_ERROR;
+
+    /* no check of context at this level because some option does not need it */
+    M4OSA_DEBUG_IF1((pValue == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getOption: invalid pointer on value");
+
+    switch (optionId)
+    {
+    case M4READER_kOptionID_Duration:
+        *((M4OSA_UInt64*)pValue) = pContext->m_pAudioStream->m_duration;
+        break;
+
+    case M4READER_kOptionID_Version:
+        err = M4PCMR_getVersion((M4_VersionInfo*)pValue);
+        break;
+
+    case M4READER_kOptionID_Copyright:
+        return M4ERR_NOT_IMPLEMENTED;
+        break;
+
+    case M4READER_kOptionID_Bitrate:
+        {
+            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+            if (M4OSA_NULL != pContext->m_pAudioStream)
+            {
+                *pBitrate = pContext->m_pAudioStream->m_averageBitRate;
+            }
+            else
+            {
+                pBitrate = 0;
+                err = M4ERR_PARAMETER;
+            }
+        }
+        break;
+
+    default:
+        err = M4ERR_BAD_OPTION_ID;
+        M4OSA_TRACE1_0("M4READER_PCM_getOption: unsupported optionId");
+        break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Get the next stream found in the media
+ * @note
+ *
+ * @param   context:        (IN)  context of the network reader
+ * @param   pMediaFamily:   (OUT) pointer to a user allocated M4READER_MediaFamily that will
+ *                                be filled
+ * @param   pStreamHandler: (OUT) pointer to a stream handler that will be allocated and filled
+ *                                with the found stream description
+ *
+ * @return  M4NO_ERROR:       there is no error.
+ * @return  M4ERR_PARAMETER:  at least one parameter is not properly set (in DEBUG only)
+ * @return  M4WAR_NO_MORE_STREAM    no more available stream in the media (all streams found)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
+                                     M4_StreamHandler **pStreamHandler)
+{
+    M4READER_PCM_Context*   pC=(M4READER_PCM_Context*)context;
+    M4OSA_ERR               err;
+/*    M4_StreamHandler*       pStreamHandler = M4OSA_NULL;*/
+    M4SYS_StreamDescription streamDesc;
+    M4_AudioStreamHandler*  pAudioStreamHandler;
+    M4OSA_Double            fDuration;
+    M4SYS_StreamID          streamIdArray[2];
+    M4PCMC_DecoderSpecificInfo* pDsi;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid context");
+    M4OSA_DEBUG_IF1((pMediaFamily == 0),   M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid pointer to MediaFamily");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid pointer to StreamHandler");
+
+    err = M4PCMR_getNextStream( pC->m_coreContext, &streamDesc);
+    if (err == M4WAR_NO_MORE_STREAM)
+    {
+        streamIdArray[0] = 0;
+        streamIdArray[1] = 0;
+        err = M4PCMR_startReading(pC->m_coreContext, streamIdArray); /*to put in open function*/
+
+        return M4WAR_NO_MORE_STREAM;
+    }
+    else if (M4NO_ERROR != err)
+    {
+        return err; /*also return M4WAR_NO_MORE_STREAM*/
+    }
+
+    switch (streamDesc.streamType)
+    {
+        case M4SYS_kAudioUnknown:
+        case M4SYS_kPCM_16bitsS:
+        case M4SYS_kPCM_16bitsU:
+        case M4SYS_kPCM_8bitsU:
+            *pMediaFamily = M4READER_kMediaFamilyAudio;
+            M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found audio stream");
+            break;
+        default:
+            *pMediaFamily = M4READER_kMediaFamilyUnknown;
+            M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found UNKNOWN stream");
+            return M4NO_ERROR;
+    }
+
+    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_malloc(sizeof(M4_AudioStreamHandler),
+         M4READER_WAV, (M4OSA_Char *)"M4_AudioStreamHandler");
+    if (pAudioStreamHandler == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+    pC->m_pAudioStream = (M4_StreamHandler*)(pAudioStreamHandler);
+
+    pDsi = (M4PCMC_DecoderSpecificInfo*)(streamDesc.decoderSpecificInfo);
+    M4OSA_DEBUG_IF1((pDsi == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid decoder specific info in stream");
+
+    pAudioStreamHandler->m_samplingFrequency = pDsi->SampleFrequency;
+    pAudioStreamHandler->m_byteSampleSize    = (M4OSA_UInt32)(pDsi->BitsPerSample/8);
+    /* m_byteFrameLength is badly named: it is not in bytes but in samples number */
+    if(pAudioStreamHandler->m_samplingFrequency == 8000)
+    {
+        /* AMR case */
+        pAudioStreamHandler->m_byteFrameLength   =
+             (((streamDesc.averageBitrate/8)/50)/pDsi->nbChannels)\
+                /pAudioStreamHandler->m_byteSampleSize;/*/50 to get around 20 ms of audio*/
+    }
+    else
+    {
+        /* AAC Case */
+        pAudioStreamHandler->m_byteFrameLength =
+             (M4OSA_UInt32)(((streamDesc.averageBitrate/8)/15.625)/pDsi->nbChannels)\
+                /pAudioStreamHandler->m_byteSampleSize;
+    }
+
+    pAudioStreamHandler->m_nbChannels        = pDsi->nbChannels;
+
+    M4OSA_TIME_TO_MS( fDuration, streamDesc.duration, streamDesc.timeScale);
+    pC->m_pAudioStream->m_duration                = (M4OSA_Int64)fDuration;
+    pC->m_pAudioStream->m_pDecoderSpecificInfo    = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+    pC->m_pAudioStream->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
+    pC->m_pAudioStream->m_streamId                = streamDesc.streamID;
+    pC->m_pAudioStream->m_pUserData               =
+        (void*)streamDesc.timeScale; /*trick to change*/
+    pC->m_pAudioStream->m_averageBitRate          = streamDesc.averageBitrate;
+    pC->m_pAudioStream->m_maxAUSize               =
+         pAudioStreamHandler->m_byteFrameLength*pAudioStreamHandler->m_byteSampleSize\
+            *pAudioStreamHandler->m_nbChannels;
+    pC->m_pAudioStream->m_streamType              = M4DA_StreamTypeAudioPcm;
+
+    *pStreamHandler = pC->m_pAudioStream;
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   fill the access unit structure with initialization values
+ * @note
+ *
+ * @param   context:        (IN) context of the network reader
+ * @param   pStreamHandler: (IN) pointer to the stream handler to which the access unit will
+ *                                 be associated
+ * @param   pAccessUnit:    (IN) pointer to the access unit(allocated by the caller) to initialize
+ * @return  M4NO_ERROR:       there is no error.
+ * @return  M4ERR_PARAMETER:  at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                     M4_AccessUnit *pAccessUnit)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4SYS_AccessUnit*       pAu;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_PCM_fillAuStruct: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_fillAuStruct: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_PCM_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_fillAuStruct: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    pAu->dataAddress = M4OSA_NULL;
+    pAu->size        = 0;
+    pAu->CTS         = 0;
+    pAu->DTS         = 0;
+    pAu->attribute   = 0;
+    pAu->nbFrag      = 0;
+
+    pAccessUnit->m_size         = 0;
+    pAccessUnit->m_CTS          = 0;
+    pAccessUnit->m_DTS          = 0;
+    pAccessUnit->m_attribute    = 0;
+    pAccessUnit->m_dataAddress  = M4OSA_NULL;/*pBuffer;*/
+    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
+    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
+    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   reset the stream, that is: seek it to beginning and make it ready to be read
+ * @note
+ * @param   context:        (IN) context of the network reader
+ * @param   pStreamHandler: (IN) The stream handler of the stream to reset
+ * @return  M4NO_ERROR: there is no error.
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64 = 0;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_reset: invalid pointer to M4_StreamHandler");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_reset: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pAu->dataAddress != M4OSA_NULL)
+    {
+        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_PCM_reset: error when freeing access unit");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    pAu->CTS = 0;
+    pAu->DTS = 0;
+
+    /* This call is needed only when replay during playback */
+    err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Get the next access unit of the specified stream
+ * @note
+ * @param   context:        (IN)        Context of the reader
+ * @param   pStreamHandler  (IN)        The stream handler of the stream to make jump
+ * @param   pAccessUnit     (IN/OUT)    Pointer to an access unit to fill with read data
+ *                                      (the au structure is allocated by the user, and must be
+ *                                        initialized
+ *                                      by calling M4READER_fillAuStruct_fct after creation)
+ * @return  M4NO_ERROR                  there is no error
+ * @return  M4ERR_BAD_CONTEXT           provided context is not a valid one
+ * @return  M4ERR_PARAMETER             at least one parameter is not properly set
+ * @returns M4ERR_ALLOC                 memory allocation failed
+ * @returns M4ERR_BAD_STREAM_ID         at least one of the stream Id. does not exist.
+ * @returns M4WAR_NO_DATA_YET           there is no enough data on the stream for new access unit
+ * @returns M4WAR_NO_MORE_AU            there are no more access unit in the stream (end of stream)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                 M4_AccessUnit *pAccessUnit)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err = M4NO_ERROR;
+    M4SYS_AccessUnit*       pAu;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_PCM_getNextAu: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getNextAu: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_PCM_getNextAu: invalid pointer to M4_AccessUnit");
+
+    /* keep trace of the allocated buffers in AU to be able to free them at destroy()
+       but be aware that system is risky and would need upgrade if more than
+       one video and one audio AU is needed */
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_getNextAu: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pAu->dataAddress != M4OSA_NULL)
+    {
+        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_PCM_getNextAu: error when freeing access unit");
+            return err;
+        }
+    }
+
+    pAu->nbFrag = 0;
+    err = M4PCMR_nextAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+
+    if (err == M4NO_ERROR)
+    {
+        pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
+        pAccessUnit->m_size = pAu->size;
+        pAccessUnit->m_CTS  = (M4OSA_Double)pAu->CTS;
+        pAccessUnit->m_DTS  = (M4OSA_Double)pAu->DTS;
+        pAccessUnit->m_attribute = pAu->attribute;
+    }
+    else
+    {
+        pAccessUnit->m_size=0;
+    }
+
+    return err;
+}
+
+
+/**
+ ************************************************************************
+ * @brief   jump into the stream at the specified time
+ * @note
+ * @param   context:        (IN)     Context of the reader
+ * @param   pStreamHandler  (IN)     the stream handler of the stream to make jump
+ * @param   pTime           (IN/OUT) IN:  the time to jump to (in ms)
+ *                                   OUT: the time to which the stream really jumped
+ *                                        But in this reader, we do not modify the time
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_BAD_CONTEXT       provided context is not a valid one
+ * @return  M4ERR_PARAMETER         at least one parameter is not properly set
+ * @return  M4ERR_ALLOC             there is no more memory available
+ * @return  M4ERR_BAD_STREAM_ID     the streamID does not exist
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+     M4OSA_Int32* pTime)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_jump: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid time pointer");
+
+    time64 = (M4OSA_Time)*pTime;
+
+    if (pStreamHandler == pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_jump: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pAu->dataAddress != M4OSA_NULL)
+    {
+        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_PCM_jump: Error when freeing access unit");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    pAu->CTS = time64;
+    pAu->DTS = time64;
+
+    err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+
+    *pTime = (M4OSA_Int32)time64;
+
+    return err;
+}
+
+/**
+ *************************************************************************
+ * @brief Retrieves the generic interfaces implemented by the reader
+ *
+ * @param pMediaType          : Pointer on a M4READER_MediaType (allocated by the caller)
+ *                              that will be filled with the media type supported by this reader
+ * @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
+ *                              implemented by this reader. The interface is a structure allocated
+ *                              by the function and must be un-allocated by the caller.
+ * @param pRdrDataInterface   : Address of a pointer that will be set to the data interface
+ *                              implemented by this reader. The interface is a structure allocated
+ *                              by the function and must be un-allocated by the caller.
+ *
+ * @returns : M4NO_ERROR     if OK
+ *            ERR_ALLOC      if an allocation failed
+ *            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
+ *************************************************************************
+ */
+M4OSA_ERR   M4READER_PCM_getInterfaces(M4READER_MediaType *pMediaType,
+                                       M4READER_GlobalInterface **pRdrGlobalInterface,
+                                       M4READER_DataInterface **pRdrDataInterface)
+/************************************************************************/
+{
+    M4OSA_DEBUG_IF1((pMediaType == 0),          M4ERR_PARAMETER,
+         "M4READER_PCM_getInterfaces: invalid pointer to MediaType passed");
+    M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getInterfaces: invalid pointer to M4READER_GlobalInterface");
+    M4OSA_DEBUG_IF1((pRdrDataInterface == 0),   M4ERR_PARAMETER,
+         "M4READER_PCM_getInterfaces: invalid pointer to M4READER_DataInterface");
+
+    *pRdrGlobalInterface =
+         (M4READER_GlobalInterface*)M4OSA_malloc( sizeof(M4READER_GlobalInterface), M4READER_WAV,
+             (M4OSA_Char *)"M4READER_PCM GlobalInterface");
+    if (M4OSA_NULL == *pRdrGlobalInterface)
+    {
+        return M4ERR_ALLOC;
+    }
+    *pRdrDataInterface =
+         (M4READER_DataInterface*)M4OSA_malloc( sizeof(M4READER_DataInterface), M4READER_WAV,
+            (M4OSA_Char *) "M4READER_PCM DataInterface");
+    if (M4OSA_NULL == *pRdrDataInterface)
+    {
+        M4OSA_free((M4OSA_MemAddr32)*pRdrGlobalInterface);
+        return M4ERR_ALLOC;
+    }
+
+    *pMediaType = M4READER_kMediaTypePCM;
+
+    (*pRdrGlobalInterface)->m_pFctCreate           = M4READER_PCM_create;
+    (*pRdrGlobalInterface)->m_pFctDestroy          = M4READER_PCM_destroy;
+    (*pRdrGlobalInterface)->m_pFctOpen             = M4READER_PCM_open;
+    (*pRdrGlobalInterface)->m_pFctClose            = M4READER_PCM_close;
+    (*pRdrGlobalInterface)->m_pFctStart            = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctStop             = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctGetOption        = M4READER_PCM_getOption;
+    (*pRdrGlobalInterface)->m_pFctSetOption        = M4READER_PCM_setOption;
+    (*pRdrGlobalInterface)->m_pFctGetNextStream    = M4READER_PCM_getNextStream;
+    (*pRdrGlobalInterface)->m_pFctFillAuStruct     = M4READER_PCM_fillAuStruct;
+    (*pRdrGlobalInterface)->m_pFctJump             = M4READER_PCM_jump;
+    (*pRdrGlobalInterface)->m_pFctReset            = M4READER_PCM_reset;
+    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime   = M4OSA_NULL; /*all AUs are RAP*/
+
+    (*pRdrDataInterface)->m_pFctGetNextAu          = M4READER_PCM_getNextAu;
+
+    (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+
diff --git a/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c b/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
new file mode 100755
index 0000000..bc75488
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#include "M4VD_EXTERNAL_Interface.h"
+#include "M4VD_EXTERNAL_Internal.h"
+#include "M4VD_Tools.h"
+
+/**
+ ************************************************************************
+ * @file   M4VD_EXTERNAL_BitstreamParser.c
+ * @brief
+ * @note   This file implements external Bitstream parser
+ ************************************************************************
+ */
+
+M4OSA_UInt32 M4VD_EXTERNAL_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+     M4OSA_UInt32 nb_bits)
+{
+#if 0
+    M4OSA_UInt32    code;
+    M4OSA_UInt32    i;
+
+    code = 0;
+    for (i = 0; i < nb_bits; i++)
+    {
+        if (parsingCtxt->stream_index == 8)
+        {
+            M4OSA_memcpy( (M4OSA_MemAddr8)&(parsingCtxt->stream_byte), parsingCtxt->in,
+                 sizeof(unsigned char));
+            parsingCtxt->in++;
+            //fread(&stream_byte, sizeof(unsigned char),1,in);
+            parsingCtxt->stream_index = 0;
+        }
+        code = (code << 1);
+        code |= ((parsingCtxt->stream_byte & 0x80) >> 7);
+
+        parsingCtxt->stream_byte = (parsingCtxt->stream_byte << 1);
+        parsingCtxt->stream_index++;
+    }
+
+    return code;
+#endif
+        return(M4VD_Tools_GetBitsFromMemory(parsingCtxt,nb_bits));
+}
+
+M4OSA_ERR M4VD_EXTERNAL_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+                                                 M4OSA_MemAddr32 dest_bits,
+                                                 M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
+{
+#if 0
+    M4OSA_UInt8 i,j;
+    M4OSA_UInt32 temp_dest = 0, mask = 0, temp = 1;
+    M4OSA_UInt32 input = bitsToWrite;
+
+    input = (input << (32 - nb_bits - offset));
+
+    /* Put destination buffer to 0 */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                mask |= (temp << ((7*(j+1))-i+j));
+            }
+        }
+    }
+    mask = ~mask;
+    *dest_bits &= mask;
+
+    /* Parse input bits, and fill output buffer */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                temp = ((input & (0x80000000 >> offset)) >> (31-offset));
+                //*dest_bits |= (temp << (31 - i));
+                *dest_bits |= (temp << ((7*(j+1))-i+j));
+                input = (input << 1);
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+#endif
+        return (M4VD_Tools_WriteBitsToMemory( bitsToWrite,dest_bits,
+                                                offset,  nb_bits));
+}
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseVideoDSI(M4OSA_UInt8* pVol, M4OSA_Int32 aVolSize,
+                                             M4DECODER_MPEG4_DecoderConfigInfo* pDci,
+                                             M4DECODER_VideoSize* pVideoSize)
+{
+    M4VS_Bitstream_ctxt parsingCtxt;
+    M4OSA_UInt32 code, j;
+    M4OSA_MemAddr8 start;
+    M4OSA_UInt8 i;
+    M4OSA_UInt32 time_incr_length;
+    M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
+
+    /* Parsing variables */
+    M4OSA_UInt8 video_object_layer_shape = 0;
+    M4OSA_UInt8 sprite_enable = 0;
+    M4OSA_UInt8 reduced_resolution_vop_enable = 0;
+    M4OSA_UInt8 scalability = 0;
+    M4OSA_UInt8 enhancement_type = 0;
+    M4OSA_UInt8 complexity_estimation_disable = 0;
+    M4OSA_UInt8 interlaced = 0;
+    M4OSA_UInt8 sprite_warping_points = 0;
+    M4OSA_UInt8 sprite_brightness_change = 0;
+    M4OSA_UInt8 quant_precision = 0;
+
+    /* Fill the structure with default parameters */
+    pVideoSize->m_uiWidth              = 0;
+    pVideoSize->m_uiHeight             = 0;
+
+    pDci->uiTimeScale          = 0;
+    pDci->uiProfile            = 0;
+    pDci->uiUseOfResynchMarker = 0;
+    pDci->bDataPartition       = M4OSA_FALSE;
+    pDci->bUseOfRVLC           = M4OSA_FALSE;
+
+    /* Reset the bitstream context */
+    parsingCtxt.stream_byte = 0;
+    parsingCtxt.stream_index = 8;
+    parsingCtxt.in = (M4OSA_Int8 *)pVol;
+
+    start = (M4OSA_Int8 *)pVol;
+
+    /* Start parsing */
+    while (parsingCtxt.in - start < aVolSize)
+    {
+        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+        if (code == 0)
+        {
+            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+            if (code == 0)
+            {
+                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                if (code == 1)
+                {
+                    /* start code found */
+                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+
+                    /* ----- 0x20..0x2F : video_object_layer_start_code ----- */
+
+                    if ((code > 0x1F) && (code < 0x30))
+                    {
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* random accessible vol */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 8);/* video object type indication */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* is object layer identifier */
+                        if (code == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     4); /* video object layer verid */
+                            vol_verid = (M4OSA_UInt8)code;
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     3); /* video object layer priority */
+                        }
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 4);/* aspect ratio */
+                        if (code == 15)
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     16); /* par_width and par_height (8+8) */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* vol control parameters */
+                        if (code == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     3);/* chroma format + low delay (3+1) */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* vbv parameters */
+                            if (code == 1)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         32);/* first and latter half bitrate + 2 marker bits
+                                            (15 + 1 + 15 + 1) */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         31);/* first and latter half vbv buffer size + first
+                                          half vbv occupancy + marker bits (15+1+3+11+1)*/
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         16);/* first half vbv occupancy + marker bits (15+1)*/
+                            }
+                        }
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 2); /* video object layer shape */
+                        /* Need to save it for vop parsing */
+                        video_object_layer_shape = (M4OSA_UInt8)code;
+
+                        if (code != 0) return 0; /* only rectangular case supported */
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1); /* Marker bit */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 16); /* VOP time increment resolution */
+                        pDci->uiTimeScale = code;
+
+                        /* Computes time increment length */
+                        j    = code - 1;
+                        for (i = 0; (i < 32) && (j != 0); j >>=1)
+                        {
+                            i++;
+                        }
+                        time_incr_length = (i == 0) ? 1 : i;
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* Marker bit */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* Fixed VOP rate */
+                        if (code == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     time_incr_length);/* Fixed VOP time increment */
+                        }
+
+                        if(video_object_layer_shape != 1) /* 1 = Binary */
+                        {
+                            if(video_object_layer_shape == 0) /* 0 = rectangular */
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* Width */
+                                pVideoSize->m_uiWidth = code;
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* Height */
+                                pVideoSize->m_uiHeight = code;
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                            }
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* interlaced */
+                        interlaced = (M4OSA_UInt8)code;
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* OBMC disable */
+
+                        if(vol_verid == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* sprite enable */
+                            sprite_enable = (M4OSA_UInt8)code;
+                        }
+                        else
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     2);/* sprite enable */
+                            sprite_enable = (M4OSA_UInt8)code;
+                        }
+                        if ((sprite_enable == 1) || (sprite_enable == 2))
+                        /* Sprite static = 1 and Sprite GMC = 2 */
+                        {
+                            if (sprite_enable != 2)
+                            {
+
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite width */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite height */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite l coordinate */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite top coordinate */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                            }
+
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     6);/* sprite warping points */
+                            sprite_warping_points = (M4OSA_UInt8)code;
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     2);/* sprite warping accuracy */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* sprite brightness change */
+                            sprite_brightness_change = (M4OSA_UInt8)code;
+                            if (sprite_enable != 2)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                             1);/* low latency sprite enable */
+                            }
+                        }
+                        if ((vol_verid != 1) && (video_object_layer_shape != 0))
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* sadct disable */
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1); /* not 8 bits */
+                        if (code)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     4);/* quant precision */
+                            quant_precision = (M4OSA_UInt8)code;
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         4);/* bits per pixel */
+                        }
+
+                        /* greyscale not supported */
+                        if(video_object_layer_shape == 3)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     3); /* nogray quant update + composition method +
+                                            linear composition */
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* quant type */
+                        if (code)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* load intra quant mat */
+                            if (code)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
+                                 i    = 1;
+                                while (i < 64)
+                                {
+                                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                                    if (code == 0)
+                                        break;
+                                    i++;
+                                }
+                            }
+
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* load non intra quant mat */
+                            if (code)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
+                                 i    = 1;
+                                while (i < 64)
+                                {
+                                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                                    if (code == 0)
+                                        break;
+                                    i++;
+                                }
+                            }
+                        }
+
+                        if (vol_verid != 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* quarter sample */
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* complexity estimation disable */
+                        complexity_estimation_disable = (M4OSA_UInt8)code;
+                        if (!code)
+                        {
+                            //return M4ERR_NOT_IMPLEMENTED;
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* resync marker disable */
+                        pDci->uiUseOfResynchMarker = (code) ? 0 : 1;
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* data partitionned */
+                        pDci->bDataPartition = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+                        if (code)
+                        {
+                            /* reversible VLC */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            pDci->bUseOfRVLC = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+                        }
+
+                        if (vol_verid != 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* newpred */
+                            if (code)
+                            {
+                                //return M4ERR_PARAMETER;
+                            }
+                            /* reduced resolution vop enable */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            reduced_resolution_vop_enable = (M4OSA_UInt8)code;
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* scalability */
+                        scalability = (M4OSA_UInt8)code;
+                        if (code)
+                        {
+                            /* hierarchy type */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            b_hierarchy_type = (M4OSA_UInt8)code;
+                            /* ref layer id */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
+                            /* ref sampling direct */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            /* hor sampling factor N */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* hor sampling factor M */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* vert sampling factor N */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* vert sampling factor M */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* enhancement type */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            enhancement_type = (M4OSA_UInt8)code;
+                            if ((!b_hierarchy_type) && (video_object_layer_shape == 1))
+                            {
+                                /* use ref shape */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                                /* use ref texture */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                                /* shape hor sampling factor N */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape hor sampling factor M */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape vert sampling factor N */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape vert sampling factor M */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            }
+                        }
+                        break;
+                    }
+
+                    /* ----- 0xB0 : visual_object_sequence_start_code ----- */
+
+                    else if(code == 0xB0)
+                    {
+                        /* profile_and_level_indication */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                        pDci->uiProfile = (M4OSA_UInt8)code;
+                    }
+
+                    /* ----- 0xB5 : visual_object_start_code ----- */
+
+                    else if(code == 0xB5)
+                    {
+                        /* is object layer identifier */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                        if (code == 1)
+                        {
+                             /* visual object verid */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
+                            vol_verid = (M4OSA_UInt8)code;
+                             /* visual object layer priority */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 3);
+                        }
+                        else
+                        {
+                             /* Realign on byte */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 7);
+                            vol_verid = 1;
+                        }
+                    }
+
+                    /* ----- end ----- */
+                }
+                else
+                {
+                    if ((code >> 2) == 0x20)
+                    {
+                        /* H263 ...-> wrong*/
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseAVCDSI(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
+                                         M4DECODER_AVCProfileLevel *profile)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_Bool NALSPS_and_Profile0Found = M4OSA_FALSE;
+    M4OSA_UInt16 index;
+    M4OSA_Bool    constraintSet3;
+
+    /* check for baseline profile */
+    for(index = 0; index < (DSISize-1); index++)
+    {
+        if(((pDSI[index] & 0x1f) == 0x07) && (pDSI[index+1] == 0x42))
+        {
+            NALSPS_and_Profile0Found = M4OSA_TRUE;
+            break;
+        }
+    }
+    if(M4OSA_FALSE == NALSPS_and_Profile0Found)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_ParseAVCDSI: index bad = %d", index);
+        *profile = M4DECODER_AVC_kProfile_and_Level_Out_Of_Range;
+    }
+    else
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_ParseAVCDSI: index = %d", index);
+        constraintSet3 = (pDSI[index+2] & 0x10);
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_ParseAVCDSI: level = %d", pDSI[index+3]);
+        switch(pDSI[index+3])
+        {
+        case 10:
+            *profile = M4DECODER_AVC_kProfile_0_Level_1;
+            break;
+        case 11:
+            if(constraintSet3)
+                *profile = M4DECODER_AVC_kProfile_0_Level_1b;
+            else
+                *profile = M4DECODER_AVC_kProfile_0_Level_1_1;
+            break;
+        case 12:
+            *profile = M4DECODER_AVC_kProfile_0_Level_1_2;
+            break;
+        case 13:
+            *profile = M4DECODER_AVC_kProfile_0_Level_1_3;
+            break;
+        case 20:
+            *profile = M4DECODER_AVC_kProfile_0_Level_2;
+            break;
+        case 21:
+            *profile = M4DECODER_AVC_kProfile_0_Level_2_1;
+            break;
+        case 22:
+            *profile = M4DECODER_AVC_kProfile_0_Level_2_2;
+            break;
+        case 30:
+            *profile = M4DECODER_AVC_kProfile_0_Level_3;
+            break;
+        case 31:
+            *profile = M4DECODER_AVC_kProfile_0_Level_3_1;
+            break;
+        case 32:
+            *profile = M4DECODER_AVC_kProfile_0_Level_3_2;
+            break;
+        case 40:
+            *profile = M4DECODER_AVC_kProfile_0_Level_4;
+            break;
+        case 41:
+            *profile = M4DECODER_AVC_kProfile_0_Level_4_1;
+            break;
+        case 42:
+            *profile = M4DECODER_AVC_kProfile_0_Level_4_2;
+            break;
+        case 50:
+            *profile = M4DECODER_AVC_kProfile_0_Level_5;
+            break;
+        case 51:
+            *profile = M4DECODER_AVC_kProfile_0_Level_5_1;
+            break;
+        default:
+            *profile = M4DECODER_AVC_kProfile_and_Level_Out_Of_Range;
+        }
+    }
+    return err;
+}
+
diff --git a/libvideoeditor/vss/src/M4VD_EXTERNAL_Interface.c b/libvideoeditor/vss/src/M4VD_EXTERNAL_Interface.c
new file mode 100755
index 0000000..009f495
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_EXTERNAL_Interface.c
@@ -0,0 +1,1155 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4VD_EXTERNAL_Interface.c
+ * @brief
+ * @note
+ ******************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+#include "M4OSA_Semaphore.h"
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+#include "M4VD_EXTERNAL_Interface.h"
+#include "M4VD_EXTERNAL_Internal.h"
+
+/* Warning: the decode thread has finished decoding all the frames */
+#define M4WAR_DECODE_FINISHED                                M4OSA_ERR_CREATE(M4_WAR,\
+                                                                 M4DECODER_EXTERNAL, 0x0001)
+/* Warning: the render thread has finished rendering the frame */
+#define M4WAR_RENDER_FINISHED                                M4OSA_ERR_CREATE(M4_WAR,\
+                                                                 M4DECODER_EXTERNAL, 0x0002)
+
+#define M4ERR_CHECK(x) if(M4NO_ERROR!=x) return x;
+#define M4ERR_EXIT(x) do { err = x; goto exit_with_error; } while(0)
+
+
+/* ----- shell API ----- */
+
+static M4OSA_ERR M4DECODER_EXTERNAL_create(M4OSA_Context *pVS_Context,
+                                             M4_StreamHandler *pStreamHandler,
+                                             M4READER_DataInterface *pReaderDataInterface,
+                                             M4_AccessUnit* pAccessUnit, M4OSA_Void* pUserData);
+static M4OSA_ERR M4DECODER_EXTERNAL_destroy(M4OSA_Context pVS_Context);
+static M4OSA_ERR M4DECODER_EXTERNAL_getOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                                M4OSA_DataOption* pValue);
+static M4OSA_ERR M4DECODER_EXTERNAL_setOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                                 M4OSA_DataOption pValue);
+static M4OSA_ERR M4DECODER_EXTERNAL_decode(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                             M4OSA_Bool bJump);
+static M4OSA_ERR M4DECODER_EXTERNAL_render(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                             M4VIFI_ImagePlane* pOutputPlane,
+                                             M4OSA_Bool bForceRender);
+
+/* ----- Signaling functions ----- */
+
+static M4OSA_ERR M4DECODER_EXTERNAL_signalDecoderOver(M4OSA_Context pVS_Context,
+                                                        M4_MediaTime aTime, M4OSA_ERR aUserError);
+static M4OSA_ERR M4DECODER_EXTERNAL_signalRenderOver(M4OSA_Context pVS_Context,
+                                                     M4_MediaTime aTime, M4OSA_ERR aUserError);
+
+/* ----- static internal functions ----- */
+
+static M4OSA_ERR M4DECODER_EXTERNAL_Init(void** pVS_Context, M4VD_Interface* p_HWInterface,
+                                         M4_StreamHandler *pStreamHandler);
+static M4OSA_ERR M4DECODER_EXTERNAL_StreamDescriptionInit(M4VD_StreamInfo** ppStreamInfo,
+                                                             M4_StreamHandler *pStreamHandler);
+static M4OSA_ERR M4DECODER_EXTERNAL_SetUpReadInput(void* pVS_Context,
+                                                     M4READER_DataInterface* pReader,
+                                                     M4_AccessUnit* pAccessUnit);
+static M4OSA_ERR M4DECODER_EXTERNAL_GetNextAu(M4VS_VideoDecoder_Context* pStreamContext,
+                                                 M4VD_VideoBuffer *nextBuffer,
+                                                 M4_MediaTime* nextFrameTime);
+static M4OSA_ERR M4DECODER_EXTERNAL_SynchronousDecode(M4OSA_Context pVS_Context);
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousDecode(M4OSA_Context pVS_Context);
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousRender(M4OSA_Context pVS_Context);
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                                                       |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief   Retrieves the interface implemented by the decoder
+ * @note
+ *
+ * @param   pDecoderInterface: (OUT) address of a pointer that will be set to the interface
+ *                                   implemented by this decoder. The interface is a structure
+ *                                   allocated by the function and must be unallocated by the
+ *                                   caller.
+ *
+ * @returns : M4NO_ERROR  if OK
+ *            M4ERR_ALLOC if allocation failed
+ ************************************************************************
+ */
+M4OSA_ERR M4DECODER_EXTERNAL_getInterface(M4DECODER_VideoInterface **pDecoderInterface)
+{
+    /* Allocates memory for the decoder shell pointer to function */
+    *pDecoderInterface =
+         (M4DECODER_VideoInterface*)M4OSA_malloc( sizeof(M4DECODER_VideoInterface),
+             M4DECODER_EXTERNAL, (M4OSA_Char *)"M4DECODER_VideoInterface" );
+    if (M4OSA_NULL == *pDecoderInterface)
+    {
+        M4OSA_TRACE1_0("M4DECODER_EXTERNAL_getInterface:\
+             unable to allocate M4DECODER_VideoInterface, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    (*pDecoderInterface)->m_pFctCreate    = M4DECODER_EXTERNAL_create;
+    (*pDecoderInterface)->m_pFctDestroy   = M4DECODER_EXTERNAL_destroy;
+    (*pDecoderInterface)->m_pFctGetOption = M4DECODER_EXTERNAL_getOption;
+    (*pDecoderInterface)->m_pFctSetOption = M4DECODER_EXTERNAL_setOption;
+    (*pDecoderInterface)->m_pFctDecode    = M4DECODER_EXTERNAL_decode;
+    (*pDecoderInterface)->m_pFctRender    = M4DECODER_EXTERNAL_render;
+
+    return M4NO_ERROR;
+}
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                           shell API                            |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief   Creates the external video decoder
+ * @note    This function creates internal video decoder context and
+ *          initializes it.
+ *
+ * @param   pVS_Context     (OUT)   Context of the video hw shell
+ * @param   pStreamHandler  (IN)    Pointer to a video stream description
+ * @param   pReaderDataInterface: (IN)  Pointer to the M4READER_DataInterface
+ *                                  structure that must be used by the
+ *                                  decoder to read data from the stream
+ * @param   pAccessUnit     (IN)    Pointer to an access unit (allocated
+ *                                  by the caller) where the decoded data
+ *                                  are stored
+ * @param   pExternalAPI    (IN)    Interface of the client video decoder
+ * @param   pUserData       (IN)    User data of the external video decoder
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_ALLOC             a memory allocation has failed
+ * @return  M4ERR_PARAMETER         at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_create(M4OSA_Context *pVS_Context,
+                                             M4_StreamHandler *pStreamHandler,
+                                             M4READER_DataInterface *pReaderDataInterface,
+                                             M4_AccessUnit* pAccessUnit, M4OSA_Void* pUserData)
+{
+    M4VD_VideoType videoDecoderKind;
+    M4VD_StreamInfo* pStreamInfo;
+    M4VD_OutputFormat outputFormat;
+
+    M4VS_VideoDecoder_Context* pStreamContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_create");
+
+    /* Video Shell Creation */
+    err = M4DECODER_EXTERNAL_Init(pVS_Context,
+         ((M4DECODER_EXTERNAL_UserDataType)pUserData)->externalFuncs, pStreamHandler);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4VD_EXTERNAL_Init RETURNS THE ERROR CODE = 0x%x", err);
+        return err;
+    }
+
+    err = M4DECODER_EXTERNAL_SetUpReadInput(*pVS_Context, pReaderDataInterface, pAccessUnit);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4VD_EXTERNAL_SetUpReadInput RETURNS THE ERROR CODE = 0x%x", err);
+        return err;
+    }
+
+    pStreamContext = (M4VS_VideoDecoder_Context*)(*pVS_Context);
+
+    /* Stream Description init */
+    err = M4DECODER_EXTERNAL_StreamDescriptionInit(&pStreamInfo, pStreamHandler);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4VD_EXTERNAL_StreamDescriptionInit RETURNS THE ERROR CODE = 0x%x", err);
+        return err;
+    }
+
+    pStreamContext->m_pStreamInfo = pStreamInfo;
+
+    /* HW context creation */
+    err = pStreamContext->m_VD_Interface->m_pFctInitVideoDecoder(&(pStreamContext->m_VD_Context),
+         &(pStreamContext->m_VD_SignalingInterface));
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create : m_pFctInitVideoDecoder() error 0x%x", err);
+        return err;
+    }
+
+    /* HW decoder creation */
+    switch(pStreamHandler->m_streamType)
+    {
+        case M4DA_StreamTypeVideoH263 :
+            videoDecoderKind = M4VD_kH263VideoDec;
+            break;
+
+        default :
+        case M4DA_StreamTypeVideoMpeg4 :
+            videoDecoderKind = M4VD_kMpeg4VideoDec;
+            break;
+    }
+
+    err = pStreamContext->m_VD_Interface->m_pFctOpenDecoder(pStreamContext->m_VD_Context,
+         videoDecoderKind, pStreamContext->m_pStreamInfo, &outputFormat,
+             ((M4DECODER_EXTERNAL_UserDataType)pUserData)->externalUserData);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create : m_pFctOpenDecoder() error 0x%x", err);
+        return err;
+    }
+
+    /* Parse the VOL header */
+    err = M4DECODER_EXTERNAL_ParseVideoDSI((M4OSA_UInt8 *)pStreamContext->m_pStreamInfo->\
+                                           decoderConfiguration.pBuffer,
+                                           pStreamContext->m_pStreamInfo->\
+                                           decoderConfiguration.aSize,
+                                           &pStreamContext->m_Dci, &pStreamContext->m_VideoSize);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4DECODER_EXTERNAL_ParseVideoDSI() error 0x%x", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   destroy the instance of the decoder
+ * @note    after this call the context is invalid
+ *
+ * @param   pVS_Context:   (IN) Context of the decoder
+ *
+ * @return  M4NO_ERROR          There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_destroy(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_destroy");
+
+    if(M4OSA_NULL != pStreamContext)
+    {
+        /* Call external API destroy function */
+        pStreamContext->m_VD_Interface->m_pFctClose(pStreamContext->m_VD_Context);
+
+        /* Destroy context */
+        pStreamContext->m_VD_Interface->m_pFctCleanUp(pStreamContext->m_VD_Context);
+
+        if(M4OSA_NULL != pStreamContext->m_pStreamInfo)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pStreamInfo);
+            pStreamContext->m_pStreamInfo = M4OSA_NULL;
+        }
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+        if (M4OSA_NULL != pStreamContext->m_SemSync)
+        {
+            M4OSA_semaphoreClose(pStreamContext->m_SemSync);
+        }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+        pStreamContext = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Get an option value from the decoder
+ * @note    It allows the caller to retrieve a property value:
+ *          - the size (width x height) of the image
+ *          - the DSI properties
+ *
+ * @param   pVS_Context: (IN)       Context of the decoder
+ * @param   optionId:    (IN)       indicates the option to set
+ * @param   pValue:      (IN/OUT)   pointer to structure or value (allocated by user) where option
+ *                                    is stored
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         The context is invalid (in DEBUG only)
+ * @return  M4ERR_BAD_OPTION_ID     when the option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_getOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                             M4OSA_DataOption *pValue)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_getOption");
+
+    switch (optionId)
+    {
+        case M4DECODER_kOptionID_VideoSize:
+            *((M4DECODER_VideoSize*)pValue) = pStreamContext->m_VideoSize;
+            err = M4NO_ERROR;
+            break;
+
+        case M4DECODER_MPEG4_kOptionID_DecoderConfigInfo:
+            *((M4DECODER_MPEG4_DecoderConfigInfo*)pValue) = pStreamContext->m_Dci;
+            err = M4NO_ERROR;
+            break;
+
+        default:
+            err = pStreamContext->m_VD_Interface->m_pFctGetOption(pStreamContext->m_VD_Context,
+                     optionId, pValue);
+            break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the decoder
+ * @note    It allows the caller to set a property value:
+ *          - Nothing implemented at this time
+ *
+ * @param   pVS_Context: (IN)       Context of the external video decoder shell
+ * @param   optionId:    (IN)       Identifier indicating the option to set
+ * @param   pValue:      (IN)       Pointer to structure or value (allocated by user) where
+ *                                    option is stored
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_PARAMETER         The option parameter is invalid
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_setOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                              M4OSA_DataOption pValue)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+    M4OSA_ERR err;
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_setOption");
+
+    switch (optionId)
+    {
+        case M4DECODER_kOptionID_OutputFilter:
+        {
+            M4DECODER_OutputFilter* pOutputFilter = (M4DECODER_OutputFilter*) pValue;
+            err =
+                pStreamContext->m_VD_Interface->m_pFctSetOutputFilter(pStreamContext->m_VD_Context,
+                            (M4VIFI_PlanConverterFunctionType*)pOutputFilter->m_pFilterFunction,
+                            pOutputFilter->m_pFilterUserData);
+        }
+        break;
+
+        case M4DECODER_kOptionID_DeblockingFilter:
+            err = M4NO_ERROR;
+        break;
+
+        default:
+            err = pStreamContext->m_VD_Interface->m_pFctSetOption(pStreamContext->m_VD_Context,
+                 optionId, pValue);
+        break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Decode video Access Units up to a target time
+ * @note    Parse and decode the video until it can output a decoded image for which
+ *          the composition time is equal or greater to the passed targeted time
+ *          The data are read from the reader data interface passed to M4DECODER_EXTERNAL_create.
+ *          If threaded mode, waits until previous decoding is over,
+ *          and fill decoding parameters used by the decoding thread.
+ *
+ * @param   pVS_Context:(IN)        Context of the external video decoder shell
+ * @param   pTime:      (IN/OUT)    IN: Time to decode up to (in milli secondes)
+ *                                  OUT:Time of the last decoded frame (in ms)
+ * @param   bJump:      (IN)        0 if no jump occured just before this call
+ *                                  1 if a a jump has just been made
+ *
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         at least one parameter is not properly set
+ * @return  M4WAR_NO_MORE_AU        there is no more access unit to decode (end of stream)
+ * @return  M4WAR_VIDEORENDERER_NO_NEW_FRAME    No frame to render
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_decode(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                             M4OSA_Bool bJump)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_2("M4DECODER_EXTERNAL_decode : up to %lf  bjump = 0x%x", *pTime, bJump);
+
+    pStreamContext->m_DecodeUpToCts = *pTime;
+    pStreamContext->m_bJump = bJump;
+    if (bJump)
+    {
+        pStreamContext->m_CurrentDecodeCts = -1.0;
+        pStreamContext->m_CurrentRenderCts = -1.0;
+    }
+
+    if(pStreamContext->m_DecodeUpToCts < pStreamContext->m_nextAUCts &&
+        pStreamContext->m_CurrentRenderCts > pStreamContext->m_DecodeUpToCts)
+    {
+        /* It means that we do not need to launch another predecode, as we will reuse
+             the previously decoded frame*/
+        /* a warning is returned to the service to warn it about that .*/
+        /* In that case, the service MUST NOT call render function, and must keep the
+             previous frame */
+        /* if necessary (i.e force render case)*/
+        M4OSA_TRACE2_0("No decode is needed, same frame reused");
+        return M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+    }
+
+    /* If render has not been called for frame n, it means that n+1 frame decoding has
+         not been launched
+    -> do not wait for its decoding completion ...*/
+    if(pStreamContext->m_bIsWaitNextDecode == M4OSA_TRUE)
+    {
+        /* wait for decode n+1 to complete */
+        //M4semvalue--;
+        //printf("Semaphore wait: %d\n", M4semvalue);
+        pStreamContext->m_bIsWaitNextDecode = M4OSA_FALSE;
+        M4OSA_semaphoreWait(pStreamContext->m_SemSync, M4OSA_WAIT_FOREVER);
+    }
+    if(pStreamContext->m_CurrentDecodeCts >= *pTime)
+    {
+        /* If we are not in this condition, it means that we ask for a frame after the
+             "predecoded" frame */
+        *pTime = pStreamContext->m_CurrentDecodeCts;
+        return M4NO_ERROR;
+    }
+
+    pStreamContext->m_NbDecodedFrames = 0;
+    pStreamContext->m_uiDecodeError = M4NO_ERROR;
+    pStreamContext->m_bDataDecodePending = M4OSA_TRUE;
+    pStreamContext->m_uiDecodeError = M4NO_ERROR;
+
+    /* Launch DecodeUpTo process in synchronous mode */
+    while(pStreamContext->m_uiDecodeError == M4NO_ERROR)
+    {
+        M4DECODER_EXTERNAL_SynchronousDecode(pVS_Context);
+        /* return code is ignored, it is used only in M4OSA_Thread api */
+    }
+
+    *pTime = pStreamContext->m_CurrentDecodeCts;
+
+    if ( (M4WAR_DECODE_FINISHED == pStreamContext->m_uiDecodeError)
+        || (M4WAR_VIDEORENDERER_NO_NEW_FRAME == pStreamContext->m_uiDecodeError) )
+    {
+        pStreamContext->m_uiDecodeError = M4NO_ERROR;
+    }
+
+    return pStreamContext->m_uiDecodeError;
+}
+
+/**
+ ************************************************************************
+ * @brief   Renders the video at the specified time.
+ * @note    If threaded mode, this function unlock the decoding thread,
+ *          which also call the external rendering function.
+ *          Else, just call external rendering function, and waits for its
+ *          completion.
+ *
+ * @param   pVS_Context: (IN)       Context of the video decoder shell
+ * @param   pTime:       (IN/OUT)   IN: Time to render to (in milli secondes)
+ *                                  OUT:Time of the effectively rendered frame (in ms)
+ * @param   pOutputPlane:(OUT)      Output plane filled with decoded data (converted)
+ *                                  If NULL, the rendering is made by the external
+ *                                  component.
+ * @param   bForceRender:(IN)       1 if the image must be rendered even it has already been
+ *                                  0 if not (in which case the function can return
+ *                                    M4WAR_VIDEORENDERER_NO_NEW_FRAME)
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_PARAMETER         At least one parameter is not properly set
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_ALLOC             There is no more available memory
+ * @return  M4WAR_VIDEORENDERER_NO_NEW_FRAME    If the frame to render has already been rendered
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_render(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                           M4VIFI_ImagePlane* pOutputPlane,
+                                           M4OSA_Bool bForceRender)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_2("M4DECODER_EXTERNAL_render : pTime = %lf, forceRender: %d ", *pTime,
+         bForceRender);
+
+    pStreamContext->m_TargetRenderCts = *pTime;
+    pStreamContext->m_pOutputPlane = pOutputPlane;
+    pStreamContext->m_bForceRender = bForceRender;
+    pStreamContext->m_uiRenderError = M4NO_ERROR;
+    pStreamContext->m_bDataRenderPending = M4OSA_TRUE;
+
+    /* Launch Render process in synchronous mode */
+    while(pStreamContext->m_uiRenderError == M4NO_ERROR)
+    {
+        M4DECODER_EXTERNAL_AsynchronousRender(pVS_Context);
+        /* return code is ignored, it is used only in M4OSA_Thread */
+    }
+
+
+    *pTime = pStreamContext->m_CurrentRenderCts;
+
+
+    if (M4WAR_RENDER_FINISHED == pStreamContext->m_uiRenderError)
+    {
+        pStreamContext->m_uiRenderError = M4NO_ERROR;
+    }
+
+    return pStreamContext->m_uiRenderError;
+}
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                        Signaling functions                        |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief   Called by the HW video decoder to signal that a decoding is
+ *          over
+ * @note    The function gets another AU in the internal AU buffer, and
+ *          launches the decoding.
+ *          If no more AU are available, the M4DECODER_EXTERNAL_decode
+ *          (or M4DECODER_EXTERNAL_render if threaded) function is unlocked
+ *
+ * @param   pVS_Context:    (IN)    context of the video hw shell
+ * @param   aTime:          (IN)    time of the decoded frame
+ * @param   aUserError      (IN)    error code returned to the VPS
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_HW_DECODER_xxx    A fatal error occured
+ * @return  M4ERR_PARAMETER         At least one parameter is NULL
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_signalDecoderOver(M4OSA_Context pVS_Context,
+                                                      M4_MediaTime aTime, M4OSA_ERR aUserError)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_1("M4DECODER_EXTERNAL_signalDecoderOver : aTime = %lf", aTime);
+
+    pStreamContext->m_NbDecodedFrames++;
+    pStreamContext->m_uiDecodeError = aUserError;
+    pStreamContext->m_CurrentDecodeCts = aTime;
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+    /* give control back to stepDecode */
+    //M4semvalue++;
+    //printf("Semaphore post: %d\n", M4semvalue);
+    M4OSA_semaphorePost(pStreamContext->m_SemSync);
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Called by the HW video renderer to signal that a rendering is
+ *          over
+ * @note    The function just post a semaphore to unblock
+ *          M4DECODER_EXTERNAL_render function
+ *
+ * @param   pVS_Context:    (IN)    context of the video hw shell
+ * @param   aTime:          (IN)    time of the decoded frame
+ * @param   aUserError      (IN)    error code returned to the VPS
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_HW_DECODER_xxx    A fatal error occured
+ * @return  M4ERR_PARAMETER         At least one parameter is NULL
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_signalRenderOver(M4OSA_Context pVS_Context,
+                                                     M4_MediaTime aTime, M4OSA_ERR aUserError)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE3_1("M4DECODER_EXTERNAL_signalRenderOver : aTime = %lf", aTime);
+
+    pStreamContext->m_uiRenderError = aUserError;
+    pStreamContext->m_CurrentRenderCts = aTime;
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+    /* give control back to stepRender */
+    //M4semvalue++;
+    //printf("Semaphore post: %d\n", M4semvalue);
+    M4OSA_semaphorePost(pStreamContext->m_SemSync);
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+    return M4NO_ERROR;
+}
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                            Internals                              |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief    Initializes the video decoder shell/handler
+ * @note     allocates an execution context
+ *
+ * @param    pVS_Context:    (OUT)   Output context allocated
+ * @param    p_HWInterface:  (IN)    Pointer on the set of external HW codec functions
+ * @param    pStreamHandler: (IN)    Pointer to a video stream description
+ *
+ * @return   M4NO_ERROR     There is no error
+ * @return   M4ERR_ALLOC    There is no more available memory
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_Init(M4OSA_Context* pVS_Context,
+                                         M4VD_Interface* p_HWInterface,
+                                         M4_StreamHandler *pStreamHandler)
+{
+    M4VS_VideoDecoder_Context* pStreamContext;
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_Init");
+
+    /* Allocate the internal context */
+    *pVS_Context = M4OSA_NULL;
+
+    pStreamContext = (M4VS_VideoDecoder_Context*)M4OSA_malloc(sizeof(M4VS_VideoDecoder_Context),
+         M4DECODER_EXTERNAL,(M4OSA_Char *) "M4VS_VideoDecoder_Context");
+    if (M4OSA_NULL == pStreamContext)
+    {
+        M4OSA_TRACE1_0("M4DECODER_EXTERNAL_Init : error, cannot allocate context !");
+        return M4ERR_ALLOC;
+    }
+
+    /* Reset internal context structure */
+    *pVS_Context = pStreamContext;
+
+    /* --- READER --- */
+    pStreamContext->m_pReader = M4OSA_NULL;
+    pStreamContext->m_pNextAccessUnitToDecode = M4OSA_NULL;
+    pStreamContext->m_bJump = M4OSA_FALSE;
+    pStreamContext->m_nextAUCts = -1;
+
+    /* --- DECODER --- */
+    pStreamContext->m_DecodeUpToCts = -1;
+    pStreamContext->m_CurrentDecodeCts = -1;
+    pStreamContext->m_NbDecodedFrames = 0;
+    pStreamContext->m_uiDecodeError = M4NO_ERROR;
+    pStreamContext->m_bDataDecodePending = M4OSA_FALSE;
+    pStreamContext->m_PreviousDecodeCts = 0;
+    pStreamContext->m_bIsWaitNextDecode = M4OSA_FALSE;
+
+    /* --- RENDER --- */
+    pStreamContext->m_TargetRenderCts = -1;
+    pStreamContext->m_CurrentRenderCts = -1;
+    pStreamContext->m_uiRenderError = M4NO_ERROR;
+    pStreamContext->m_bForceRender = M4OSA_TRUE;
+    pStreamContext->m_bDataRenderPending = M4OSA_FALSE;
+
+    /* --- STREAM PARAMS --- */
+    pStreamContext->m_pVideoStreamhandler = (M4_VideoStreamHandler*)pStreamHandler;
+    pStreamContext->m_pStreamInfo = M4OSA_NULL;
+    pStreamContext->m_pOutputPlane = M4OSA_NULL;
+
+    /* --- VD API --- */
+    pStreamContext->m_VD_Interface = p_HWInterface;
+    pStreamContext->m_VD_Context = M4OSA_NULL;
+
+    pStreamContext->m_VD_SignalingInterface.m_pSignalTarget = pStreamContext;
+    pStreamContext->m_VD_SignalingInterface.m_pFctSignalDecoderOver =
+         M4DECODER_EXTERNAL_signalDecoderOver;
+    pStreamContext->m_VD_SignalingInterface.m_pFctSignalRenderOver =
+         M4DECODER_EXTERNAL_signalRenderOver;
+
+    /* --- THREAD STUFF --- */
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+    pStreamContext->m_SemSync = M4OSA_NULL;
+    //M4semvalue=0;
+    err = M4OSA_semaphoreOpen(&(pStreamContext->m_SemSync), 0);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_Init: can't open sync semaphore (err 0x%08X)", err);
+        return err;
+    }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Fills the stream info structure
+ * @note    This function is called at decoder's creation time,
+ *          allocates and fills video info structure
+ *
+ * @param   ppStreamInfo    (OUT)   Video info structure
+ * @param   pStreamHandler  (IN)    Pointer to a video stream description
+ *
+ * @return  M4ERR_ALLOC     Memory allocation error
+ * @return  M4NO_ERROR      There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_StreamDescriptionInit(M4VD_StreamInfo** ppStreamInfo,
+                                                          M4_StreamHandler *pStreamHandler)
+{
+    M4_VideoStreamHandler* pVideoStreamHandler  = M4OSA_NULL;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_StreamDescriptionInit");
+
+    pVideoStreamHandler = (M4_VideoStreamHandler*)pStreamHandler;
+
+    /* M4VD_StreamInfo allocation */
+    *ppStreamInfo = (M4VD_StreamInfo*)M4OSA_malloc(sizeof(M4VD_StreamInfo),
+         M4DECODER_EXTERNAL, (M4OSA_Char *)"M4VD_StreamInfo");
+    if(M4OSA_NULL == *ppStreamInfo)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    /* init values */
+    (*ppStreamInfo)->anImageSize.aWidth  = pVideoStreamHandler->m_videoWidth;
+    (*ppStreamInfo)->anImageSize.aHeight = pVideoStreamHandler->m_videoHeight;
+
+    (*ppStreamInfo)->decoderConfiguration.pBuffer =
+         (M4OSA_MemAddr8)pStreamHandler->m_pDecoderSpecificInfo;
+    (*ppStreamInfo)->decoderConfiguration.aSize   = pStreamHandler->m_decoderSpecificInfoSize;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Initializes current AU parameters
+ * @note    It is called at decoder's creation time to initialize
+ *          current decoder's AU.
+ *
+ * @param   pVS_Context (IN)    Context of the video decoder shell
+ * @param   pReader     (IN)    Reader interface
+ * @param   pAccessUnit (IN)    Access Unit structure used bu decoder
+ *
+ * @return
+ * @return
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_SetUpReadInput(M4OSA_Context pVS_Context,
+                                                    M4READER_DataInterface* pReader,
+                                                    M4_AccessUnit* pAccessUnit)
+{
+    M4VS_VideoDecoder_Context* pStreamContext=(M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_SetUpReadInput");
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pStreamContext), M4ERR_PARAMETER,
+         "M4DECODER_EXTERNAL_SetUpReadInput: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pReader),        M4ERR_PARAMETER,
+         "M4DECODER_EXTERNAL_SetUpReadInput: invalid pReader pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pAccessUnit),    M4ERR_PARAMETER,
+         "M4DECODER_EXTERNAL_SetUpReadInput: invalid pAccessUnit pointer");
+
+    pStreamContext->m_pReader = pReader;
+    pStreamContext->m_pNextAccessUnitToDecode = pAccessUnit;
+
+    pAccessUnit->m_streamID = 0;
+    pAccessUnit->m_size = 0;
+    pAccessUnit->m_CTS = 0;
+    pAccessUnit->m_DTS = 0;
+    pAccessUnit->m_attribute = 0;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Gets the next AU from internal AU buffer
+ * @note    This function is necessary to be able to have a decodeUpTo
+ *          interface with the VPS.
+ *          The AU are read from file by M4DECODER_EXTERNAL_decode function
+ *          and stored into a buffer. This function is called internally
+ *          to get these stored AU.
+ *
+ * @param   pStreamContext: (IN)        context of the video hw shell
+ * @param   nextFrameTime:  (IN/OUT)    time of the AU
+ *
+ * @return  M4NO_ERROR          There is no error
+ * @return  M4WAR_NO_MORE_AU    No more AU in internal buffer
+ * @return  M4ERR_PARAMETER     One invalid parameter
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_GetNextAu(M4VS_VideoDecoder_Context* pStreamContext,
+                                                 M4VD_VideoBuffer *nextBuffer,
+                                                 M4_MediaTime* nextFrameTime)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4_AccessUnit* pAccessUnit;
+
+    M4OSA_TRACE3_0("M4DECODER_EXTERNAL_GetNextAu");
+
+    /* Check context is valid */
+    if(M4OSA_NULL == pStreamContext)
+    {
+        M4OSA_TRACE1_0("M4DECODER_EXTERNAL_GetNextAu : error pStreamContext is NULL");
+        return M4ERR_PARAMETER;
+    }
+
+    /* Read the AU */
+    pAccessUnit = pStreamContext->m_pNextAccessUnitToDecode;
+
+    err = pStreamContext->m_pReader->m_pFctGetNextAu(pStreamContext->m_pReader->m_readerContext,
+         (M4_StreamHandler*)pStreamContext->m_pVideoStreamhandler, pAccessUnit);
+
+    if((err == M4WAR_NO_DATA_YET) || (err == M4WAR_NO_MORE_AU))
+    {
+        M4OSA_TRACE2_1("M4DECODER_EXTERNAL_GetNextAu : no data avalaible 0x%x", err);
+    }
+    else if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_GetNextAu : filesystem error 0x%x", err);
+
+        *nextFrameTime         = 0;
+        nextBuffer->pBuffer    = M4OSA_NULL;
+        nextBuffer->bufferSize = 0;
+
+        return err;
+    }
+
+    /* Fill buffer */
+    *nextFrameTime         = pAccessUnit->m_CTS;
+    nextBuffer->pBuffer    = (M4OSA_MemAddr32)pAccessUnit->m_dataAddress;
+    nextBuffer->bufferSize = pAccessUnit->m_size;
+
+    M4OSA_TRACE3_1("M4DECODER_EXTERNAL_GetNextAu: AU obtained, time is %f", *nextFrameTime);
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief
+ * @note
+ *
+ * @param    pVS_Context:    (IN)    Context of the video hw shell
+ *
+ * @return    M4NO_ERROR        There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_SynchronousDecode(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VD_VideoBuffer nextBuffer;
+
+
+    /* ----- decode process ----- */
+
+    if(M4OSA_TRUE == pStreamContext->m_bDataDecodePending)
+    {
+        /* Targeted time is reached */
+        if( pStreamContext->m_CurrentDecodeCts >= pStreamContext->m_DecodeUpToCts )
+        {
+            M4OSA_TRACE2_0("M4DECODER_EXTERNAL_SynchronousDecode :\
+                 skip decode because synchronisation");
+
+            if(pStreamContext->m_NbDecodedFrames > 0)
+            {
+                pStreamContext->m_uiDecodeError = M4WAR_DECODE_FINISHED;
+            }
+            else
+            {
+                pStreamContext->m_uiDecodeError = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+            }
+
+            M4ERR_EXIT(M4NO_ERROR);
+        }
+
+        pStreamContext->m_PreviousDecodeCts = pStreamContext->m_CurrentDecodeCts;
+
+        /* Get the next AU */
+        pStreamContext->m_uiDecodeError = M4DECODER_EXTERNAL_GetNextAu(pStreamContext,
+             &nextBuffer, &pStreamContext->m_CurrentDecodeCts);
+
+        if( M4NO_ERROR != pStreamContext->m_uiDecodeError )
+        {
+            if ( M4WAR_NO_MORE_AU != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_SynchronousDecode :\
+                     M4DECODER_EXTERNAL_GetNextAu error 0x%x", pStreamContext->m_uiDecodeError);
+            }
+            M4ERR_EXIT(pStreamContext->m_uiDecodeError);
+        }
+
+        /* Decode the AU */
+        if(nextBuffer.bufferSize > 0)
+        {
+            pStreamContext->m_uiDecodeError =
+                 pStreamContext->m_VD_Interface->m_pFctStepDecode(pStreamContext->m_VD_Context,
+                     &nextBuffer, pStreamContext->m_CurrentDecodeCts);
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+            if ( (M4NO_ERROR == pStreamContext->m_uiDecodeError)
+                /*|| (M4WAR_IO_PENDING == pStreamContext->m_uiDecodeError)*/ )
+            {
+                /* wait for decode to complete */
+                //M4semvalue--;
+                //printf("Semaphore wait 2: %d\n", M4semvalue);
+                M4OSA_semaphoreWait(pStreamContext->m_SemSync, M4OSA_WAIT_FOREVER);
+                /* by now the actual m_uiDecodeError has been set by signalDecode */
+            }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+            if(M4NO_ERROR != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_SynchronousDecode : HW decoder error 0x%x",
+                     pStreamContext->m_uiDecodeError);
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+        }
+        else
+        {
+            M4ERR_EXIT(M4NO_ERROR);
+        }
+    }
+
+    return M4NO_ERROR;
+
+
+/* ----- Release resources if an error occured */
+exit_with_error:
+
+    /* Abort decoding */
+    pStreamContext->m_bDataDecodePending = M4OSA_FALSE;
+
+    if((M4NO_ERROR == pStreamContext->m_uiDecodeError) && (M4NO_ERROR != err))
+    {
+        pStreamContext->m_uiDecodeError = err;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief
+ * @note
+ *
+ * @param    pVS_Context:    (IN)    Context of the video hw shell
+ *
+ * @return    M4NO_ERROR        There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousDecode(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VD_VideoBuffer nextBuffer;
+
+
+    /* ----- decode process ----- */
+
+    if(M4OSA_TRUE == pStreamContext->m_bDataDecodePending)
+    {
+        pStreamContext->m_PreviousDecodeCts = pStreamContext->m_CurrentDecodeCts;
+
+        /* Get the next AU */
+        pStreamContext->m_uiDecodeError = M4DECODER_EXTERNAL_GetNextAu(pStreamContext,
+             &nextBuffer, &pStreamContext->m_nextAUCts);
+
+        if( M4NO_ERROR != pStreamContext->m_uiDecodeError )
+        {
+            if ( M4WAR_NO_MORE_AU != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_AsynchronousDecode :\
+                     M4DECODER_EXTERNAL_GetNextAu error 0x%x", pStreamContext->m_uiDecodeError);
+            }
+            //M4semvalue++;
+            //printf("Semaphore post: %d\n", M4semvalue);
+            //M4OSA_semaphorePost(pStreamContext->m_SemSync);
+            M4ERR_EXIT(pStreamContext->m_uiDecodeError);
+        }
+
+        /* Decode the AU if needed */
+        if(nextBuffer.bufferSize > 0)
+        {
+            pStreamContext->m_uiDecodeError =
+                 pStreamContext->m_VD_Interface->m_pFctStepDecode(pStreamContext->m_VD_Context,
+                    &nextBuffer, pStreamContext->m_nextAUCts\
+                        /*pStreamContext->m_CurrentDecodeCts*/);
+            if(M4NO_ERROR != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_AsynchronousDecode : HW decoder error 0x%x",
+                     pStreamContext->m_uiDecodeError);
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+            pStreamContext->m_bIsWaitNextDecode = M4OSA_TRUE;
+        }
+        else
+        {
+            M4ERR_EXIT(M4NO_ERROR);
+        }
+    }
+
+    return M4NO_ERROR;
+
+
+/* ----- Release resources if an error occured */
+exit_with_error:
+
+    /* Abort decoding */
+    pStreamContext->m_bDataDecodePending = M4OSA_FALSE;
+
+    if((M4NO_ERROR == pStreamContext->m_uiDecodeError) && (M4NO_ERROR != err))
+    {
+        pStreamContext->m_uiDecodeError = err;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief
+ * @note
+ *
+ * @param    pVS_Context:    (IN)    Context of the video hw shell
+ *
+ * @return    M4NO_ERROR        There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousRender(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+
+    /* ----- Render one frame ----- */
+
+    if(M4OSA_TRUE == pStreamContext->m_bDataRenderPending)
+    {
+#if 0
+        if (!pStreamContext->m_bForceRender)
+        {
+            /* Targeted time is reached */
+            if(pStreamContext->m_TargetRenderCts - pStreamContext->m_CurrentRenderCts < 1.0)
+             /* some +0.5 issues */
+            {
+                M4OSA_TRACE2_0("M4DECODER_EXTERNAL_AsynchronousRender :\
+                     skip render because synchronisation");
+                pStreamContext->m_uiRenderError = M4WAR_RENDER_FINISHED;
+
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+
+            if ( (M4WAR_NO_MORE_AU == pStreamContext->m_uiDecodeError)
+                && (pStreamContext->m_CurrentDecodeCts \
+                    - pStreamContext->m_CurrentRenderCts < 1.0) )
+            {
+                pStreamContext->m_uiRenderError = M4WAR_RENDER_FINISHED;
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+
+            if(pStreamContext->m_NbDecodedFrames == 0)
+            {
+                pStreamContext->m_uiRenderError = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+        }
+#endif
+        /* Render the frame */
+        pStreamContext->m_CurrentRenderCts = pStreamContext->m_CurrentDecodeCts;
+
+        pStreamContext->m_uiRenderError =
+             pStreamContext->m_VD_Interface->m_pFctStepRender(pStreamContext->m_VD_Context,
+                 pStreamContext->m_pOutputPlane, pStreamContext->m_CurrentRenderCts);
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+        if ( (M4NO_ERROR == pStreamContext->m_uiRenderError)
+            /* || (M4WAR_IO_PENDING == pStreamContext->m_uiRenderError) */ )
+        {
+            /* wait for render to complete */
+            //M4semvalue--;
+            //printf("Semaphore wait: %d\n", M4semvalue);
+            M4OSA_semaphoreWait(pStreamContext->m_SemSync, M4OSA_WAIT_FOREVER);
+            /* by now the actual m_uiRenderError has been set by signalRender */
+        }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+        if(M4NO_ERROR != pStreamContext->m_uiRenderError)
+        {
+            M4OSA_TRACE1_1("M4DECODER_EXTERNAL_AsynchronousRender : HW render error 0x%x", err);
+            pStreamContext->m_bDataRenderPending = M4OSA_FALSE;
+
+            return M4NO_ERROR;
+        }
+
+        /* Launch in asynchronous mode the predecoding of the next frame */
+        pStreamContext->m_NbDecodedFrames = 0;
+        pStreamContext->m_uiDecodeError = M4NO_ERROR;
+        pStreamContext->m_bDataDecodePending = M4OSA_TRUE;
+        M4DECODER_EXTERNAL_AsynchronousDecode(pVS_Context);
+
+        pStreamContext->m_uiRenderError = M4WAR_RENDER_FINISHED;
+    }
+
+    return M4NO_ERROR;
+
+
+/* ----- Release resources if an error occured */
+exit_with_error:
+
+    /* Abort the rendering */
+    pStreamContext->m_bDataRenderPending = M4OSA_FALSE;
+
+    if((M4NO_ERROR == pStreamContext->m_uiRenderError) && (M4NO_ERROR != err))
+    {
+        pStreamContext->m_uiRenderError = err;
+    }
+
+
+    return err;
+}
+
diff --git a/libvideoeditor/vss/src/M4VD_Tools.c b/libvideoeditor/vss/src/M4VD_Tools.c
new file mode 100644
index 0000000..4a737b2
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_Tools.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#include "M4VD_Tools.h"
+
+/**
+ ************************************************************************
+ * @file   M4VD_Tools.c
+ * @brief
+ * @note   This file implements helper functions for Bitstream parser
+ ************************************************************************
+ */
+
+M4OSA_UInt32 M4VD_Tools_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+     M4OSA_UInt32 nb_bits)
+{
+    M4OSA_UInt32    code;
+    M4OSA_UInt32    i;
+    code = 0;
+    for (i = 0; i < nb_bits; i++)
+    {
+        if (parsingCtxt->stream_index == 8)
+        {
+            //M4OSA_memcpy( (M4OSA_MemAddr8)&(parsingCtxt->stream_byte), parsingCtxt->in,
+            //     sizeof(unsigned char));
+            parsingCtxt->stream_byte = (unsigned char)(parsingCtxt->in)[0];
+            parsingCtxt->in++;
+            //fread(&stream_byte, sizeof(unsigned char),1,in);
+            parsingCtxt->stream_index = 0;
+        }
+        code = (code << 1);
+        code |= ((parsingCtxt->stream_byte & 0x80) >> 7);
+
+        parsingCtxt->stream_byte = (parsingCtxt->stream_byte << 1);
+        parsingCtxt->stream_index++;
+    }
+
+    return code;
+}
+
+M4OSA_ERR M4VD_Tools_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+                                     M4OSA_MemAddr32 dest_bits,
+                                     M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
+{
+    M4OSA_UInt8 i,j;
+    M4OSA_UInt32 temp_dest = 0, mask = 0, temp = 1;
+    M4OSA_UInt32 input = bitsToWrite;
+    input = (input << (32 - nb_bits - offset));
+
+    /* Put destination buffer to 0 */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                mask |= (temp << ((7*(j+1))-i+j));
+            }
+        }
+    }
+    mask = ~mask;
+    *dest_bits &= mask;
+
+    /* Parse input bits, and fill output buffer */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                temp = ((input & (0x80000000 >> offset)) >> (31-offset));
+                //*dest_bits |= (temp << (31 - i));
+                *dest_bits |= (temp << ((7*(j+1))-i+j));
+                input = (input << 1);
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+
+
diff --git a/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c b/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
new file mode 100755
index 0000000..8f00d08
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file     M4VIFI_RGB565toYUV420.c
+ * @brief    Contain video library function
+ * @note     Color Conversion Filter
+ *           -# Contains the format conversion filters from RGB565 to YUV420
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include    "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include    "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include    "M4VIFI_Clip.h"
+
+
+/**
+ ******************************************************************************
+ * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
+ *                                     M4VIFI_ImagePlane *pPlaneIn,
+ *                                   M4VIFI_ImagePlane *pPlaneOut)
+ * @brief   transform RGB565 image to a YUV420 image.
+ * @note    Convert RGB565 to YUV420,
+ *          Loop on each row ( 2 rows by 2 rows )
+ *              Loop on each column ( 2 col by 2 col )
+ *                  Get 4 RGB samples from input data and build 4 output Y samples
+ *                  and each single U & V data
+ *              end loop on col
+ *          end loop on row
+ * @param   pUserData: (IN) User Specific Data
+ * @param   pPlaneIn: (IN) Pointer to RGB565 Plane
+ * @param   pPlaneOut: (OUT) Pointer to  YUV420 buffer Plane
+ * @return  M4VIFI_OK: there is no error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
+ ******************************************************************************
+*/
+M4VIFI_UInt8    M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+                                                      M4VIFI_ImagePlane *pPlaneOut)
+{
+    M4VIFI_UInt32   u32_width, u32_height;
+    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+    M4VIFI_UInt32   u32_stride_rgb, u32_stride_2rgb;
+    M4VIFI_UInt32   u32_col, u32_row;
+
+    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
+    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
+    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
+    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
+    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
+    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
+    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
+    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
+    M4VIFI_UInt16   u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+    M4VIFI_UInt8 count_null=0;
+
+    /* Check planes height are appropriate */
+    if( (pPlaneIn->u_height != pPlaneOut[0].u_height)           ||
+        (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1))   ||
+        (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+    {
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+    }
+
+    /* Check planes width are appropriate */
+    if( (pPlaneIn->u_width != pPlaneOut[0].u_width)         ||
+        (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+        (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+    {
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+    }
+
+    /* Set the pointer to the beginning of the output data buffers */
+    pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+    pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+    pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+    /* Set the pointer to the beginning of the input data buffers */
+    pu8_rgbn_data   = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+    /* Get the size of the output image */
+    u32_width = pPlaneOut[0].u_width;
+    u32_height = pPlaneOut[0].u_height;
+
+    /* Set the size of the memory jumps corresponding to row jump in each output plane */
+    u32_stride_Y = pPlaneOut[0].u_stride;
+    u32_stride2_Y = u32_stride_Y << 1;
+    u32_stride_U = pPlaneOut[1].u_stride;
+    u32_stride_V = pPlaneOut[2].u_stride;
+
+    /* Set the size of the memory jumps corresponding to row jump in input plane */
+    u32_stride_rgb = pPlaneIn->u_stride;
+    u32_stride_2rgb = u32_stride_rgb << 1;
+
+
+    /* Loop on each row of the output image, input coordinates are estimated from output ones */
+    /* Two YUV rows are computed at each pass */
+    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+    {
+        /* Current Y plane row pointers */
+        pu8_yn = pu8_y_data;
+        /* Next Y plane row pointers */
+        pu8_ys = pu8_yn + u32_stride_Y;
+        /* Current U plane row pointer */
+        pu8_u = pu8_u_data;
+        /* Current V plane row pointer */
+        pu8_v = pu8_v_data;
+
+        pu8_rgbn = pu8_rgbn_data;
+
+        /* Loop on each column of the output image */
+        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+        {
+            /* Get four RGB 565 samples from input data */
+            u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
+            u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
+            u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
+            u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
+
+            /* Unpack RGB565 to 8bit R, G, B */
+#if 0
+            /* (x,y) */
+            GET_RGB565(i32_r00,i32_g00,i32_b00,u16_pix1);
+            /* (x+1,y) */
+            GET_RGB565(i32_r10,i32_g10,i32_b10,u16_pix2);
+            /* (x,y+1) */
+            GET_RGB565(i32_r01,i32_g01,i32_b01,u16_pix3);
+            /* (x+1,y+1) */
+            GET_RGB565(i32_r11,i32_g11,i32_b11,u16_pix4);
+#else
+            /* (x,y) */
+            GET_RGB565(i32_b00,i32_g00,i32_r00,u16_pix1);
+            /* (x+1,y) */
+            GET_RGB565(i32_b10,i32_g10,i32_r10,u16_pix2);
+            /* (x,y+1) */
+            GET_RGB565(i32_b01,i32_g01,i32_r01,u16_pix3);
+            /* (x+1,y+1) */
+            GET_RGB565(i32_b11,i32_g11,i32_r11,u16_pix4);
+#endif
+#if 1 /* Solution to avoid green effects due to transparency */
+            /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
+            if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+            {
+                i32_b00 = 31;
+                i32_r00 = 31;
+            }
+            if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
+            {
+                i32_b10 = 31;
+                i32_r10 = 31;
+            }
+            if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+            {
+                i32_b01 = 31;
+                i32_r01 = 31;
+            }
+            if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
+            {
+                i32_b11 = 31;
+                i32_r11 = 31;
+            }
+#endif
+            /* Convert RGB value to YUV */
+            i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+            i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+            /* luminance value */
+            i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+            i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+            i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+            /* luminance value */
+            i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+            i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+            i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+            /* luminance value */
+            i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+            i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+            i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+            /* luminance value */
+            i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+            /* Store luminance data */
+            pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+            pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+            pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+#if 0 /* Temporary solution to avoid green effects due to transparency -> To be removed */
+            count_null = 4;
+            /* Store chroma data */
+            if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+            {
+                i32_u00 = 0;
+                i32_v00 = 0;
+                count_null --;
+            }
+            if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
+            {
+                i32_u10 = 0;
+                i32_v10 = 0;
+                count_null --;
+            }
+            if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+            {
+                i32_u01 = 0;
+                i32_v01 = 0;
+                count_null --;
+            }
+            if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
+            {
+                i32_u11 = 0;
+                i32_v11 = 0;
+                count_null --;
+            }
+
+            if(count_null == 0)
+            {
+#endif
+            *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+            *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+#if 0 /* Temporary solution to avoid green effects due to transparency -> To be removed */
+            }
+            else
+            {
+                *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) / count_null);
+                *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) / count_null);
+            }
+#endif
+            /* Prepare for next column */
+            pu8_rgbn += (CST_RGB_16_SIZE<<1);
+            /* Update current Y plane line pointer*/
+            pu8_yn += 2;
+            /* Update next Y plane line pointer*/
+            pu8_ys += 2;
+            /* Update U plane line pointer*/
+            pu8_u ++;
+            /* Update V plane line pointer*/
+            pu8_v ++;
+        } /* End of horizontal scanning */
+
+        /* Prepare pointers for the next row */
+        pu8_y_data += u32_stride2_Y;
+        pu8_u_data += u32_stride_U;
+        pu8_v_data += u32_stride_V;
+        pu8_rgbn_data += u32_stride_2rgb;
+
+
+    } /* End of vertical scanning */
+
+    return M4VIFI_OK;
+}
+/* End of file M4VIFI_RGB565toYUV420.c */
+
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c b/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
new file mode 100755
index 0000000..6f6ba3c
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
@@ -0,0 +1,4210 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_AudioMixing.c
+ * @brief    Video Studio Service 3GPP audio mixing implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/* Put the definition of silence frames here */
+#define M4VSS3GPP_SILENCE_FRAMES
+#include "M4VSS3GPP_InternalConfig.h"
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h"  /**< OSAL debug management */
+
+
+#include "gLVAudioResampler.h"
+/**
+ ******************************************************************************
+ * @brief    Static functions
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
+                             M4VSS3GPP_AudioMixingSettings *pSettings );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
+                                                M4OSA_Int32 storeCount,
+                                                M4OSA_Int32 thresholdValue );
+/**
+ *    Internal warning */
+#define M4VSS3GPP_WAR_END_OF_ADDED_AUDIO    M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0030)
+
+/* A define used with SSRC 1.04 and above to avoid taking
+blocks smaller that the minimal block size */
+#define M4VSS_SSRC_MINBLOCKSIZE        600
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingInit(M4VSS3GPP_AudioMixingContext* pContext,
+ *                                     M4VSS3GPP_AudioMixingSettings* pSettings)
+ * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param    pSettings        (IN) Pointer to valid audio mixing settings
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_audioMixingInit( M4VSS3GPP_AudioMixingContext *pContext,
+                                    M4VSS3GPP_AudioMixingSettings *pSettings,
+                                    M4OSA_FileReadPointer *pFileReadPtrFct,
+                                    M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+    M4VSS3GPP_InternalAudioMixingContext *pC;
+    M4OSA_ERR err;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_audioMixingInit called with pContext=0x%x, pSettings=0x%x",
+        pContext, pSettings);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pSettings is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pFileReadPtrFct is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pFileWritePtrFct is M4OSA_NULL");
+
+    if( pSettings->uiBeginLoop > pSettings->uiEndLoop )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_audioMixingInit: Begin loop time is higher than end loop time!");
+        return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
+    }
+
+    /**
+    * Allocate the VSS audio mixing context and return it to the user */
+    pC = (M4VSS3GPP_InternalAudioMixingContext
+        *)M4OSA_malloc(sizeof(M4VSS3GPP_InternalAudioMixingContext),
+        M4VSS3GPP,(M4OSA_Char *)"M4VSS3GPP_InternalAudioMixingContext");
+    *pContext = pC;
+
+    if( M4OSA_NULL == pC )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_audioMixingInit(): unable to allocate \
+            M4VSS3GPP_InternalAudioMixingContext,returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /* Initialization of context Variables */
+    M4OSA_memset((M4OSA_MemAddr8)pC ,
+                 sizeof(M4VSS3GPP_InternalAudioMixingContext),0);
+    /**
+    * Copy this setting in context */
+    pC->iAddCts = pSettings->uiAddCts;
+    pC->bRemoveOriginal = pSettings->bRemoveOriginal;
+    pC->b_DuckingNeedeed = pSettings->b_DuckingNeedeed;
+    pC->InDucking_threshold = pSettings->InDucking_threshold;
+    pC->fBTVolLevel = pSettings->fBTVolLevel;
+    pC->fPTVolLevel = pSettings->fPTVolLevel;
+    pC->InDucking_lowVolume = pSettings->InDucking_lowVolume;
+    pC->bDoDucking = M4OSA_FALSE;
+    pC->bLoop = pSettings->bLoop;
+    pC->bNoLooping = M4OSA_FALSE;
+    pC->bjumpflag = M4OSA_TRUE;
+    /**
+    * Init some context variables */
+
+    pC->pInputClipCtxt = M4OSA_NULL;
+    pC->pAddedClipCtxt = M4OSA_NULL;
+    pC->fOrigFactor = 1.0F;
+    pC->fAddedFactor = 0.0F;
+    pC->bSupportSilence = M4OSA_FALSE;
+    pC->bHasAudio = M4OSA_FALSE;
+    pC->bAudioMixingIsNeeded = M4OSA_FALSE;
+
+    /* Init PC->ewc members */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+    pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
+    pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+    pC->ewc.bActivateEmp = M4OSA_FALSE;
+    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+    pC->ewc.uiNbChannels = 1;
+    pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+    pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
+    pC->ewc.pSilenceFrameData = M4OSA_NULL;
+    pC->ewc.pEncContext = M4OSA_NULL;
+    pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+    pC->ewc.p3gpWriterContext = M4OSA_NULL;
+    /**
+    * Set the OSAL filesystem function set */
+    pC->pOsaFileReadPtr = pFileReadPtrFct;
+    pC->pOsaFileWritPtr = pFileWritePtrFct;
+
+    /**
+    * Ssrc stuff */
+    pC->b_SSRCneeded = M4OSA_FALSE;
+    pC->pSsrcBufferIn = M4OSA_NULL;
+    pC->pSsrcBufferOut = M4OSA_NULL;
+    pC->pTempBuffer = M4OSA_NULL;
+    pC->pPosInTempBuffer = M4OSA_NULL;
+    pC->pPosInSsrcBufferIn = M4OSA_NULL;
+    pC->pPosInSsrcBufferOut = M4OSA_NULL;
+    pC->SsrcScratch = M4OSA_NULL;
+    pC->uiBeginLoop = pSettings->uiBeginLoop;
+    pC->uiEndLoop = pSettings->uiEndLoop;
+
+    /*
+    * Reset pointers for media and codecs interfaces */
+    err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /*  Call the media and codecs subscription module */
+    err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Open input clip, added clip and output clip and proceed with the settings */
+    err = M4VSS3GPP_intAudioMixingOpen(pC, pSettings);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Update main state automaton */
+    if( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream )
+        pC->State = M4VSS3GPP_kAudioMixingState_VIDEO;
+    else
+        pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
+
+    pC->ewc.iOutputDuration = (M4OSA_Int32)pC->pInputClipCtxt->pSettings->
+        ClipProperties.uiClipDuration;
+    /*gInputParams.lvBTChannelCount*/
+    pC->pLVAudioResampler = (M4OSA_Int32)LVAudioResamplerCreate(16,
+        pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels,
+        /* gInputParams.lvOutSampleRate*/pSettings->outputASF, 1);
+        LVAudiosetSampleRate(pC->pLVAudioResampler,
+        /*gInputParams.lvInSampleRate*/
+        pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency);
+
+    LVAudiosetVolume(pC->pLVAudioResampler,
+                    (M4OSA_Int16)(0x1000 ),
+                    (M4OSA_Int16)(0x1000 ));
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_audioMixingInit(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingStep(M4VSS3GPP_AudioMixingContext pContext)
+ * @brief    Perform one step of audio mixing.
+ * @note
+ * @param     pContext          (IN) VSS audio mixing context
+ * @return    M4NO_ERROR:       No error
+ * @return    M4ERR_PARAMETER:  pContext is M4OSA_NULL (debug only)
+ * @param     pProgress         (OUT) Progress percentage (0 to 100) of the finalization operation
+ * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
+ * @return    M4VSS3GPP_WAR_END_OF_AUDIO_MIXING: Audio mixing is over, user should now call
+ *                                               M4VSS3GPP_audioMixingCleanUp()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingStep( M4VSS3GPP_AudioMixingContext pContext,
+                                    M4OSA_UInt8 *pProgress )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_InternalAudioMixingContext *pC =
+        (M4VSS3GPP_InternalAudioMixingContext *)pContext;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_audioMixingStep called with pContext=0x%x",
+        pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingStep: pContext is M4OSA_NULL");
+
+    /**
+    * State automaton */
+    switch( pC->State )
+    {
+        case M4VSS3GPP_kAudioMixingState_VIDEO:
+            err = M4VSS3GPP_intAudioMixingStepVideo(pC);
+
+            /**
+            * Compute the progress percentage
+            * Note: audio and video CTS are not initialized before
+            * the call of M4VSS3GPP_intAudioMixingStepVideo */
+
+            /* P4ME00003276: First 0-50% segment is dedicated to state :
+               M4VSS3GPP_kAudioMixingState_VIDEO */
+            *pProgress = (M4OSA_UInt8)(50 * (pC->ewc.WriterVideoAU.CTS)
+                / pC->pInputClipCtxt->pVideoStream->
+                m_basicProperties.m_duration);
+
+            /**
+            * There may be no audio track (Remove audio track feature).
+            * In that case we double the current percentage */
+            if( M4SYS_kAudioUnknown == pC->ewc.WriterAudioStream.streamType )
+            {
+                ( *pProgress) <<= 1; /**< x2 */
+            }
+            else if( *pProgress >= 50 )
+            {
+                *pProgress =
+                    49; /**< Video processing is not greater than 50% */
+            }
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                if( pC->bHasAudio )
+                {
+                    /**
+                    * Video is over, state transition to audio and return OK */
+                    if( pC->iAddCts > 0 )
+                        pC->State =
+                        M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
+                    else
+                        pC->State =
+                        M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+                }
+                else
+                {
+                    /**
+                    * No audio, state transition to FINISHED */
+                    pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+                }
+
+                return M4NO_ERROR;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepVideo returns 0x%x!",
+                    err);
+                return err;
+            }
+            else
+            {
+                return M4NO_ERROR;
+            }
+            break;
+
+        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+            if( pC->pAddedClipCtxt->iAudioFrameCts
+                != -pC->pAddedClipCtxt->iSilenceFrameDuration
+                && (pC->pAddedClipCtxt->iAudioFrameCts - 0.5)
+                / pC->pAddedClipCtxt->scale_audio > pC->uiEndLoop
+                && pC->uiEndLoop > 0 )
+            {
+            if(pC->bLoop == M4OSA_FALSE)
+            {
+                pC->bNoLooping = M4OSA_TRUE;
+            }
+            else
+            {
+                M4OSA_Int32 jumpCTS = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                    pC->pAddedClipCtxt->pReaderContext,
+                    (M4_StreamHandler *)pC->pAddedClipCtxt->
+                    pAudioStream, &jumpCTS);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_audioMixingStep: error when jumping in added audio clip: 0x%x",
+                        err);
+                    return err;
+                }
+                /**
+                * Use offset to give a correct CTS ... */
+                pC->pAddedClipCtxt->iAoffset =
+                    (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+            }
+
+            }
+
+            if( M4OSA_FALSE == pC->bRemoveOriginal )
+            {
+                err = M4VSS3GPP_intAudioMixingStepAudioMix(pC);
+            }
+            else
+            {
+                err = M4VSS3GPP_intAudioMixingStepAudioReplace(pC);
+            }
+
+            /**
+            * Compute the progress percentage
+            * Note: audio and video CTS are not initialized before
+            * the call of M4VSS3GPP_intAudioMixingStepAudio */
+            if( 0 != pC->ewc.iOutputDuration )
+            {
+                /* P4ME00003276: Second 50-100% segment is dedicated to states :
+                M4VSS3GPP_kAudioMixingState_AUDIO... */
+                /* For Audio the progress computation is based on dAto and offset,
+                   it is more accurate */
+                *pProgress = (M4OSA_UInt8)(50
+                    + (50 * pC->ewc.dATo - pC->pInputClipCtxt->iVoffset)
+                    / (pC->ewc.iOutputDuration)); /**< 50 for 100/2 **/
+
+                if( *pProgress >= 100 )
+                {
+                    *pProgress =
+                        99; /**< It's not really finished, I prefer to return less than 100% */
+                }
+            }
+            else
+            {
+                *pProgress = 99;
+            }
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                /**
+                * Audio is over, state transition to FINISHED */
+                pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+                return M4NO_ERROR;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepAudio returns 0x%x!",
+                    err);
+                return err;
+            }
+            else
+            {
+                return M4NO_ERROR;
+            }
+            break;
+
+        case M4VSS3GPP_kAudioMixingState_FINISHED:
+
+            /**
+            * Progress percentage: finalize finished -> 100% */
+            *pProgress = 100;
+
+            /**
+            * Audio mixing is finished, return correct warning */
+            return M4VSS3GPP_WAR_END_OF_AUDIO_MIXING;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingStep: State error (0x%x)! Returning M4ERR_STATE",
+                pC->State);
+            return M4ERR_STATE;
+    }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingCleanUp(M4VSS3GPP_AudioMixingContext pContext)
+ * @brief    Free all resources used by the VSS audio mixing operation.
+ * @note    The context is no more valid after this call
+ * @param    pContext            (IN) VSS audio mixing context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingCleanUp( M4VSS3GPP_AudioMixingContext pContext )
+{
+    M4VSS3GPP_InternalAudioMixingContext *pC =
+        (M4VSS3GPP_InternalAudioMixingContext *)pContext;
+    M4OSA_ERR err;
+    M4OSA_UInt32 lastCTS;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_audioMixingCleanUp called with pContext=0x%x",
+        pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingCleanUp: pContext is M4OSA_NULL");
+
+    /**
+    * Check input parameter */
+    if( M4OSA_NULL == pContext )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_audioMixingCleanUp(): M4VSS3GPP_audioMixingCleanUp: pContext is\
+             M4OSA_NULL, returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    /**
+    * Close Input 3GPP file */
+    if( M4OSA_NULL != pC->pInputClipCtxt )
+    {
+        M4VSS3GPP_intClipCleanUp(pC->pInputClipCtxt);
+        pC->pInputClipCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Close Added 3GPP file */
+    if( M4OSA_NULL != pC->pAddedClipCtxt )
+    {
+        M4VSS3GPP_intClipCleanUp(pC->pAddedClipCtxt);
+        pC->pAddedClipCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Close the 3GP writer. In normal use case it has already been closed,
+      but not in abort use case */
+    if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+    {
+        /* Update last Video CTS */
+        lastCTS = pC->ewc.iOutputDuration;
+
+        err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
+            pC->ewc.p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+                err);
+        }
+
+        err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
+            pC->ewc.p3gpWriterContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: pWriterGlobalFcts->pFctCloseWrite returns 0x%x!",
+                err);
+            /**< don't return the error because we have other things to free! */
+        }
+        pC->ewc.p3gpWriterContext = M4OSA_NULL;
+    }
+
+    /**
+    * Free the Audio encoder context */
+    if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
+    {
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the ssrc stuff */
+
+    if( M4OSA_NULL != pC->SsrcScratch )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->SsrcScratch);
+        pC->SsrcScratch = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pSsrcBufferIn )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pSsrcBufferIn);
+        pC->pSsrcBufferIn = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pSsrcBufferOut
+        && (M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0) )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pSsrcBufferOut);
+        pC->pSsrcBufferOut = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pTempBuffer )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pTempBuffer);
+        pC->pTempBuffer = M4OSA_NULL;
+    }
+
+    /**
+    * Free the shells interfaces */
+    M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
+
+    /**
+    * Free the context */
+    M4OSA_free((M4OSA_MemAddr32)pContext);
+    pContext = M4OSA_NULL;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_audioMixingCleanUp(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/*********                  STATIC FUNCTIONS                         **********/
+/******************************************************************************/
+/******************************************************************************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingOpen()
+ * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param    pSettings        (IN) Pointer to valid audio mixing settings
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
+                             M4VSS3GPP_AudioMixingSettings *pSettings )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 outputASF = 0;
+    M4ENCODER_Header *encHeader;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intAudioMixingOpen called with pContext=0x%x, pSettings=0x%x",
+        pC, pSettings);
+
+    /**
+    * The Add Volume must be (strictly) superior than zero */
+    if( pSettings->uiAddVolume == 0 )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingOpen(): AddVolume is zero,\
+            returning M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
+        return M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO;
+    }
+    /*
+    else if(pSettings->uiAddVolume >= 100) // If volume is set to 100, no more original audio ...
+    {
+    pC->bRemoveOriginal = M4OSA_TRUE;
+    }
+    */
+    /**
+    * Build the input clip settings */
+    pC->InputClipSettings.pFile =
+        pSettings->pOriginalClipFile; /**< Input 3GPP file descriptor */
+    pC->InputClipSettings.FileType = M4VIDEOEDITING_kFileType_3GPP;
+    pC->InputClipSettings.uiBeginCutTime =
+        0; /**< No notion of cut for the audio mixing feature */
+    pC->InputClipSettings.uiEndCutTime =
+        0; /**< No notion of cut for the audio mixing feature */
+
+    /**
+    * Open the original Audio/Video 3GPP clip */
+    err = M4VSS3GPP_intClipInit(&pC->pInputClipCtxt, pC->pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(orig) returns 0x%x",
+            err);
+        return err;
+    }
+
+    err = M4VSS3GPP_intClipOpen(pC->pInputClipCtxt, &pC->InputClipSettings,
+        M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(orig) returns 0x%x",
+            err);
+        return err;
+    }
+
+    if( M4OSA_NULL == pC->pInputClipCtxt->pAudioStream )
+        {
+        pC->bRemoveOriginal = M4OSA_TRUE;
+        }
+    /**
+    * If there is no video, it's an error */
+    if( M4OSA_NULL == pC->pInputClipCtxt->pVideoStream )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingOpen(): no video stream in clip,\
+            returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+        return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
+    }
+
+    /**
+    * Compute clip properties */
+    err = M4VSS3GPP_intBuildAnalysis(pC->pInputClipCtxt,
+        &pC->pInputClipCtxt->pSettings->ClipProperties);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(orig) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Build the added clip settings */
+    pC->AddedClipSettings.pFile =
+        pSettings->pAddedAudioTrackFile; /**< Added file descriptor */
+    pC->AddedClipSettings.FileType = pSettings->AddedAudioFileType;
+    pC->AddedClipSettings.uiBeginCutTime =
+        0; /**< No notion of cut for the audio mixing feature */
+    pC->AddedClipSettings.uiEndCutTime   = 0;/**< No notion of cut for the audio mixing feature */
+    pC->AddedClipSettings.ClipProperties.uiNbChannels=
+        pSettings->uiNumChannels;
+    pC->AddedClipSettings.ClipProperties.uiSamplingFrequency=    pSettings->uiSamplingFrequency;
+
+    if( M4OSA_NULL != pC->AddedClipSettings.pFile )
+    {
+        /**
+        * Open the added Audio clip */
+        err = M4VSS3GPP_intClipInit(&pC->pAddedClipCtxt, pC->pOsaFileReadPtr);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(added) returns 0x%x",
+                err);
+            return err;
+        }
+
+        err = M4VSS3GPP_intClipOpen(pC->pAddedClipCtxt, &pC->AddedClipSettings,
+            M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(added) returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * If there is no audio, it's an error */
+        if( M4OSA_NULL == pC->pAddedClipCtxt->pAudioStream )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen(): no audio nor video stream in clip,\
+                returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+            return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
+        }
+
+        /**
+        * Compute clip properties */
+        err = M4VSS3GPP_intBuildAnalysis(pC->pAddedClipCtxt,
+            &pC->pAddedClipCtxt->pSettings->ClipProperties);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(added) returns 0x%x",
+                err);
+            return err;
+        }
+
+        switch( pSettings->outputASF )
+        {
+            case M4VIDEOEDITING_k8000_ASF:
+                outputASF = 8000;
+                break;
+
+            case M4VIDEOEDITING_k16000_ASF:
+                outputASF = 16000;
+                break;
+
+            case M4VIDEOEDITING_k22050_ASF:
+                outputASF = 22050;
+                break;
+
+            case M4VIDEOEDITING_k24000_ASF:
+                outputASF = 24000;
+                break;
+
+            case M4VIDEOEDITING_k32000_ASF:
+                outputASF = 32000;
+                break;
+
+            case M4VIDEOEDITING_k44100_ASF:
+                outputASF = 44100;
+                break;
+
+            case M4VIDEOEDITING_k48000_ASF:
+                outputASF = 48000;
+                break;
+
+            default:
+                M4OSA_TRACE1_0("Bad parameter in output ASF ");
+                return M4ERR_PARAMETER;
+                break;
+        }
+
+        if( pC->bRemoveOriginal == M4OSA_TRUE
+            && (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+            == M4VIDEOEDITING_kMP3 || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType
+            != pSettings->outputAudioFormat
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiSamplingFrequency != outputASF
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiNbChannels
+            != pSettings->outputNBChannels) )
+        {
+
+            if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+            {
+                pSettings->outputASF = M4VIDEOEDITING_k8000_ASF;
+                pSettings->outputNBChannels = 1;
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize = 320;
+            }
+            else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+            {
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize =
+                    2048 * pSettings->outputNBChannels;
+            }
+
+            pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency =
+                outputASF;
+
+            if( outputASF != pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency )
+            {
+                /* We need to call SSRC in order to align ASF and/or nb of channels */
+                /* Moreover, audio encoder may be needed in case of audio replacing... */
+                pC->b_SSRCneeded = M4OSA_TRUE;
+            }
+
+            if( pSettings->outputNBChannels
+                < pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+            {
+                /* Stereo to Mono */
+                pC->ChannelConversion = 1;
+            }
+            else if( pSettings->outputNBChannels
+                > pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+            {
+                /* Mono to Stereo */
+                pC->ChannelConversion = 2;
+            }
+
+            pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels =
+                pSettings->outputNBChannels;
+        }
+
+        /**
+        * Check compatibility chart */
+        err = M4VSS3GPP_intAudioMixingCompatibility(pC,
+            &pC->pInputClipCtxt->pSettings->ClipProperties,
+            &pC->pAddedClipCtxt->pSettings->ClipProperties);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                M4VSS3GPP_intAudioMixingCompatibility returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * Check loop parameters */
+        if( pC->uiBeginLoop > pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiClipAudioDuration )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                begin loop time is higher than added clip audio duration");
+            return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
+        }
+
+        /**
+        * Ok, let's go with this audio track */
+        pC->bHasAudio = M4OSA_TRUE;
+    }
+    else
+    {
+        /* No added file, force remove original */
+        pC->AddedClipSettings.FileType = M4VIDEOEDITING_kFileType_Unsupported;
+        pC->bRemoveOriginal = M4OSA_TRUE;
+        pC->bHasAudio = M4OSA_FALSE;
+    }
+
+    /**
+    * Copy the video properties of the input clip to the output properties */
+    pC->ewc.uiVideoBitrate =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
+    pC->ewc.uiVideoWidth =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoWidth;
+    pC->ewc.uiVideoHeight =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoHeight;
+    pC->ewc.uiVideoTimeScale =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoTimeScale;
+    pC->ewc.bVideoDataPartitioning =
+        pC->pInputClipCtxt->pSettings->ClipProperties.bMPEG4dataPartition;
+
+    switch( pC->pInputClipCtxt->pSettings->ClipProperties.VideoStreamType )
+    {
+        case M4VIDEOEDITING_kH263:
+            pC->ewc.VideoStreamType = M4SYS_kH263;
+            break;
+
+        case M4VIDEOEDITING_kMPEG4_EMP:
+            pC->ewc.bActivateEmp = M4OSA_TRUE; /* no break */
+
+        case M4VIDEOEDITING_kMPEG4:
+            pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
+            break;
+
+        case M4VIDEOEDITING_kH264:
+            pC->ewc.VideoStreamType = M4SYS_kH264;
+            break;
+
+        default:
+            pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+            break;
+    }
+
+    /* Add a link to video dsi */
+    if( M4SYS_kH264 == pC->ewc.VideoStreamType )
+    {
+
+        /* For H.264 encoder case
+        * Fetch the DSI from the shell video encoder, and feed it to the writer */
+
+        M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen: get DSI for H264 stream");
+
+        if( M4OSA_NULL == pC->ewc.pEncContext )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL");
+            err = M4VSS3GPP_intAudioMixingCreateVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen:\
+                    M4VSS3GPP_intAudioMixingCreateVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+
+        if( M4OSA_NULL != pC->ewc.pEncContext )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
+                pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
+                (M4OSA_DataOption) &encHeader);
+
+            if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen: failed to get the encoder header (err 0x%x)",
+                    err);
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_intAudioMixingOpen: encHeader->pBuf=0x%x, size=0x%x",
+                    encHeader->pBuf, encHeader->Size);
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen: send DSI for H264 stream to 3GP writer");
+
+                /**
+                * Allocate and copy the new DSI */
+                pC->ewc.pVideoOutputDsi =
+                    (M4OSA_MemAddr8)M4OSA_malloc(encHeader->Size, M4VSS3GPP,
+                    (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
+
+                if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intAudioMixingOpen():\
+                        unable to allocate pVideoOutputDsi (H264), returning M4ERR_ALLOC");
+                    return M4ERR_ALLOC;
+                }
+                pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
+                M4OSA_memcpy(pC->ewc.pVideoOutputDsi, encHeader->pBuf,
+                    encHeader->Size);
+            }
+
+            err = M4VSS3GPP_intAudioMixingDestroyVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen:\
+                    M4VSS3GPP_intAudioMixingDestroyVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL, cannot get the DSI");
+        }
+    }
+    else
+    {
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intAudioMixingOpen: input clip video stream type = 0x%x",
+            pC->ewc.VideoStreamType);
+        pC->ewc.uiVideoOutputDsiSize =
+            (M4OSA_UInt16)pC->pInputClipCtxt->pVideoStream->
+            m_basicProperties.m_decoderSpecificInfoSize;
+        pC->ewc.pVideoOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pVideoStream->
+            m_basicProperties.m_pDecoderSpecificInfo;
+    }
+
+    /**
+    * Copy the audio properties of the added clip to the output properties */
+    if( pC->bHasAudio )
+    {
+        if( pC->bRemoveOriginal == M4OSA_TRUE )
+        {
+            pC->ewc.uiNbChannels =
+                pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
+            pC->ewc.uiAudioBitrate =
+                pC->pAddedClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
+            pC->ewc.uiSamplingFrequency = pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency;
+            pC->ewc.uiSilencePcmSize =
+                pC->pAddedClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
+            pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+            /* if output settings are differents from added clip settings,
+            we need to reencode BGM */
+            if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+                != pSettings->outputAudioFormat
+                || pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency != outputASF
+                || pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels
+                != pSettings->outputNBChannels
+                || pC->pAddedClipCtxt->pSettings->
+                ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
+            {
+                /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
+                if( pC->pAddedClipCtxt->pAudioStream->
+                    m_basicProperties.m_pDecoderSpecificInfo != M4OSA_NULL )
+                {
+
+                    /*
+                     M4OSA_free((M4OSA_MemAddr32)pC->pAddedClipCtxt->pAudioStream->\
+                       m_basicProperties.m_pDecoderSpecificInfo);
+                       */
+                    pC->pAddedClipCtxt->pAudioStream->
+                        m_basicProperties.m_decoderSpecificInfoSize = 0;
+                    pC->pAddedClipCtxt->pAudioStream->
+                        m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
+                }
+
+                pC->ewc.uiNbChannels =
+                    pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+                pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
+                    ClipProperties.uiSamplingFrequency;
+                pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+                if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+                {
+                    pC->ewc.AudioStreamType = M4SYS_kAMR;
+                    pC->ewc.pSilenceFrameData =
+                        (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                    pC->ewc.uiSilenceFrameSize =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                    pC->ewc.iSilenceFrameDuration =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                    pC->ewc.uiAudioBitrate = 12200;
+                    pC->ewc.uiSamplingFrequency = 8000;
+                    pC->ewc.uiSilencePcmSize = 320;
+                    pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+                }
+                else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+                {
+                    pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+                    if( pSettings->outputAudioBitrate
+                        == M4VIDEOEDITING_kUndefinedBitrate )
+                    {
+                        switch( pC->ewc.uiSamplingFrequency )
+                        {
+                            case 16000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k24_KBPS;
+                                break;
+
+                            case 22050:
+                            case 24000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k32_KBPS;
+                                break;
+
+                            case 32000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k48_KBPS;
+                                break;
+
+                            case 44100:
+                            case 48000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+
+                            default:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+                        }
+
+                        if( pC->ewc.uiNbChannels == 2 )
+                        {
+                            /* Output bitrate have to be doubled */
+                            pC->ewc.uiAudioBitrate += pC->ewc.uiAudioBitrate;
+                        }
+                    }
+                    else
+                    {
+                        pC->ewc.uiAudioBitrate = pSettings->outputAudioBitrate;
+                    }
+
+                    if( pC->ewc.uiNbChannels == 1 )
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                    }
+                    else
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                    }
+                    pC->ewc.iSilenceFrameDuration =
+                        1024; /* AAC is always 1024/Freq sample duration */
+                }
+            }
+            else
+            {
+                switch( pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.AudioStreamType )
+                {
+                    case M4VIDEOEDITING_kAMR_NB:
+                        pC->ewc.AudioStreamType = M4SYS_kAMR;
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                        pC->ewc.iSilenceFrameDuration =
+                            M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                        break;
+
+                    case M4VIDEOEDITING_kAAC:
+                    case M4VIDEOEDITING_kAACplus:
+                    case M4VIDEOEDITING_keAACplus:
+                        pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+                        if( pC->ewc.uiNbChannels == 1 )
+                        {
+                            pC->ewc.pSilenceFrameData =
+                                (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                            pC->ewc.uiSilenceFrameSize =
+                                M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                        }
+                        else
+                        {
+                            pC->ewc.pSilenceFrameData =
+                                (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                            pC->ewc.uiSilenceFrameSize =
+                                M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                        }
+                        pC->ewc.iSilenceFrameDuration =
+                            1024; /* AAC is always 1024/Freq sample duration */
+                        break;
+
+                    case M4VIDEOEDITING_kEVRC:
+                        pC->ewc.AudioStreamType = M4SYS_kEVRC;
+                        pC->ewc.pSilenceFrameData = M4OSA_NULL;
+                        pC->ewc.uiSilenceFrameSize = 0;
+                        pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
+                                            (makes it easier to factorize amr and evrc code) */
+                        break;
+
+                    case M4VIDEOEDITING_kPCM:
+                        /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
+                        pC->pAddedClipCtxt->pAudioStream->
+                            m_basicProperties.m_decoderSpecificInfoSize = 0;
+                        pC->pAddedClipCtxt->pAudioStream->
+                            m_basicProperties.m_pDecoderSpecificInfo =
+                            M4OSA_NULL;
+
+                        if( pC->pAddedClipCtxt->pSettings->
+                            ClipProperties.uiSamplingFrequency == 8000 )
+                        {
+                            pC->ewc.AudioStreamType = M4SYS_kAMR;
+                            pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+                                *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                            pC->ewc.uiSilenceFrameSize =
+                                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                            pC->ewc.iSilenceFrameDuration =
+                                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                            pC->ewc.uiAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+                        }
+                        else if( pC->pAddedClipCtxt->pSettings->
+                            ClipProperties.uiSamplingFrequency == 16000 )
+                        {
+                            if( pC->ewc.uiNbChannels == 1 )
+                            {
+                                pC->ewc.AudioStreamType = M4SYS_kAAC;
+                                pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+                                    *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                                pC->ewc.uiSilenceFrameSize =
+                                    M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                                pC->ewc.iSilenceFrameDuration =
+                                    1024; /* AAC is always 1024/Freq sample duration */
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k32_KBPS;
+                            }
+                            else
+                            {
+                                pC->ewc.AudioStreamType = M4SYS_kAAC;
+                                pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+                                    *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                                pC->ewc.uiSilenceFrameSize =
+                                    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                                pC->ewc.iSilenceFrameDuration =
+                                    1024; /* AAC is always 1024/Freq sample duration */
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                            }
+                        }
+                        else
+                        {
+                            pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+                        }
+                        break;
+
+                    default:
+                        pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+                        break;
+                }
+            }
+
+            /* Add a link to audio dsi */
+            pC->ewc.uiAudioOutputDsiSize =
+                (M4OSA_UInt16)pC->pAddedClipCtxt->pAudioStream->
+                m_basicProperties.m_decoderSpecificInfoSize;
+            pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pAddedClipCtxt->pAudioStream->
+                m_basicProperties.m_pDecoderSpecificInfo;
+        }
+        else
+        {
+            pC->ewc.uiNbChannels =
+                pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+            pC->ewc.uiAudioBitrate =
+                pC->pInputClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
+            pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency;
+            pC->ewc.uiSilencePcmSize =
+                pC->pInputClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
+            pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+            switch( pC->pInputClipCtxt->pSettings->
+                ClipProperties.AudioStreamType )
+            {
+                case M4VIDEOEDITING_kAMR_NB:
+                    pC->ewc.AudioStreamType = M4SYS_kAMR;
+                    pC->ewc.pSilenceFrameData =
+                        (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                    pC->ewc.uiSilenceFrameSize =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                    pC->ewc.iSilenceFrameDuration =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                    break;
+
+                case M4VIDEOEDITING_kAAC:
+                case M4VIDEOEDITING_kAACplus:
+                case M4VIDEOEDITING_keAACplus:
+                    pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+                    if( pC->ewc.uiNbChannels == 1 )
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                    }
+                    else
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                    }
+                    pC->ewc.iSilenceFrameDuration =
+                        1024; /* AAC is always 1024/Freq sample duration */
+                    break;
+
+                default:
+                    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intAudioMixingOpen: No audio track in input file.");
+                    return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+                    break;
+            }
+
+            /* Add a link to audio dsi */
+            pC->ewc.uiAudioOutputDsiSize =
+                (M4OSA_UInt16)pC->pInputClipCtxt->pAudioStream->
+                m_basicProperties.m_decoderSpecificInfoSize;
+            pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pAudioStream->
+                m_basicProperties.m_pDecoderSpecificInfo;
+        }
+    }
+
+    /**
+    * Copy common 'silence frame stuff' to ClipContext */
+    pC->pInputClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+    pC->pInputClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+    pC->pInputClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+    pC->pInputClipCtxt->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
+    pC->pInputClipCtxt->scale_audio = pC->ewc.scale_audio;
+
+    pC->pInputClipCtxt->iAudioFrameCts =
+        -pC->pInputClipCtxt->iSilenceFrameDuration; /* Reset time */
+
+    /**
+    * Copy common 'silence frame stuff' to ClipContext */
+    if( pC->bHasAudio )
+    {
+        pC->pAddedClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+        pC->pAddedClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+        pC->pAddedClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+        pC->pAddedClipCtxt->iSilenceFrameDuration =
+            pC->ewc.iSilenceFrameDuration;
+        pC->pAddedClipCtxt->scale_audio = pC->ewc.scale_audio;
+
+        pC->pAddedClipCtxt->iAudioFrameCts =
+            -pC->pAddedClipCtxt->iSilenceFrameDuration; /* Reset time */
+    }
+
+    /**
+    * Check AddCts is lower than original clip duration */
+    if( ( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream)
+        && (pC->iAddCts > (M4OSA_Int32)pC->pInputClipCtxt->pVideoStream->
+        m_basicProperties.m_duration) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingOpen(): uiAddCts is larger than video duration,\
+            returning M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
+        return M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION;
+    }
+
+    /**
+    * If the audio tracks are not compatible, replace input track by silence */
+    if( M4OSA_FALSE == pC->pInputClipCtxt->pSettings->
+        ClipProperties.bAudioIsCompatibleWithMasterClip )
+    {
+        M4VSS3GPP_intClipDeleteAudioTrack(pC->pInputClipCtxt);
+    }
+
+    /**
+    * Check if audio mixing is required */
+    if( ( ( pC->bHasAudio) && (M4OSA_FALSE
+        == pC->pAddedClipCtxt->pSettings->ClipProperties.bAudioIsEditable))
+        || (M4OSA_TRUE == pC->bRemoveOriginal) ) /*||
+                                                 (pSettings->uiAddVolume >= 100)) */
+    {
+        pC->bAudioMixingIsNeeded = M4OSA_FALSE;
+    }
+    else
+    {
+        pC->bAudioMixingIsNeeded = M4OSA_TRUE;
+    }
+
+    /**
+    * Check if output audio can support silence frames
+    Trick i use bAudioIsCompatibleWithMasterClip filed to store that  */
+    if( pC->bHasAudio )
+    {
+        pC->bSupportSilence = pC->pAddedClipCtxt->pSettings->
+            ClipProperties.bAudioIsCompatibleWithMasterClip;
+
+        if( M4OSA_FALSE == pC->bSupportSilence )
+        {
+            if( pC->iAddCts > 0 )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen():\
+                    iAddCts should be set to 0 with this audio track !");
+                return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
+            }
+
+            if( 0 < pC->uiEndLoop )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen():\
+                    uiEndLoop should be set to 0 with this audio track !");
+                return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
+            }
+        }
+    }
+#if 0
+    /**
+    * Compute the volume factors */
+    if( (M4OSA_TRUE
+        == pC->bRemoveOriginal) )
+    {
+        /**
+        * In the remove original case, we keep only the added audio */
+        pC->fAddedFactor = 1.0F;
+        pC->fOrigFactor = 0.0F;
+    }
+    else
+    {
+        /**
+        * Compute the factor to apply to sample to do the mixing */
+        pC->fAddedFactor = pSettings->uiAddVolume / 100.0F;
+        pC->fOrigFactor = 1.0F - pC->fAddedFactor;
+    }
+#endif
+    if( pC->b_DuckingNeedeed == M4OSA_FALSE)
+    {
+        /**
+        * Compute the factor to apply to sample to do the mixing */
+        pC->fAddedFactor = 0.50F;
+        pC->fOrigFactor = 0.50F;
+    }
+
+
+    /**
+    * Check if SSRC is needed */
+    if( M4OSA_TRUE == pC->b_SSRCneeded )
+    {
+        M4OSA_UInt32 numerator, denominator, ratio, ratioBuffer;
+
+        /**
+        * Init the SSRC module */
+        SSRC_ReturnStatus_en
+            ReturnStatus; /* Function return status                       */
+        LVM_INT16 NrSamplesMin =
+            0; /* Minimal number of samples on the input or on the output */
+        LVM_INT32
+            ScratchSize; /* The size of the scratch memory               */
+        LVM_INT16
+            *pInputInScratch; /* Pointer to input in the scratch buffer       */
+        LVM_INT16
+            *
+            pOutputInScratch; /* Pointer to the output in the scratch buffer  */
+        SSRC_Params_t ssrcParams;          /* Memory for init parameters                    */
+
+        switch( pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiSamplingFrequency )
+        {
+            case 8000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_8000;
+                break;
+
+            case 11025:
+                ssrcParams.SSRC_Fs_In = LVM_FS_11025;
+                break;
+
+            case 12000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_12000;
+                break;
+
+            case 16000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_16000;
+                break;
+
+            case 22050:
+                ssrcParams.SSRC_Fs_In = LVM_FS_22050;
+                break;
+
+            case 24000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_24000;
+                break;
+
+            case 32000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_32000;
+                break;
+
+            case 44100:
+                ssrcParams.SSRC_Fs_In = LVM_FS_44100;
+                break;
+
+            case 48000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_48000;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen: invalid added clip sampling frequency (%d Hz),\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM",
+                    pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiSamplingFrequency);
+                return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
+        }
+
+        if( 1 == pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+        {
+            ssrcParams.SSRC_NrOfChannels = LVM_MONO;
+        }
+        else
+        {
+            ssrcParams.SSRC_NrOfChannels = LVM_STEREO;
+        }
+
+        switch( pC->ewc.uiSamplingFrequency )
+        {
+            case 8000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
+                break;
+
+            case 16000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_16000;
+                break;
+
+            case 22050:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_22050;
+                break;
+
+            case 24000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_24000;
+                break;
+
+            case 32000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_32000;
+                break;
+
+            case 44100:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_44100;
+                break;
+
+            case 48000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_48000;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen: invalid output sampling frequency (%d Hz),\
+                    returning M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED",
+                    pC->ewc.uiSamplingFrequency);
+                return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+                break;
+        }
+        ReturnStatus = 0;
+
+        switch (ssrcParams.SSRC_Fs_In){
+        case LVM_FS_8000:
+            ssrcParams.NrSamplesIn = 320;
+            break;
+        case LVM_FS_11025:
+            ssrcParams.NrSamplesIn =441;
+            break;
+        case LVM_FS_12000:
+            ssrcParams.NrSamplesIn =    480;
+            break;
+        case LVM_FS_16000:
+            ssrcParams.NrSamplesIn =    640;
+            break;
+        case LVM_FS_22050:
+            ssrcParams.NrSamplesIn =    882;
+            break;
+        case LVM_FS_24000:
+            ssrcParams.NrSamplesIn =    960;
+            break;
+        case LVM_FS_32000:
+            ssrcParams.NrSamplesIn = 1280;
+            break;
+        case LVM_FS_44100:
+            ssrcParams.NrSamplesIn = 1764;
+            break;
+        case LVM_FS_48000:
+            ssrcParams.NrSamplesIn = 1920;
+            break;
+        default:
+            ReturnStatus = -1;
+            break;
+        }
+
+        switch (ssrcParams.SSRC_Fs_Out){
+        case LVM_FS_8000:
+            ssrcParams.NrSamplesOut= 320;
+            break;
+        case LVM_FS_11025:
+            ssrcParams.NrSamplesOut =441;
+            break;
+        case LVM_FS_12000:
+            ssrcParams.NrSamplesOut=    480;
+            break;
+        case LVM_FS_16000:
+            ssrcParams.NrSamplesOut=    640;
+            break;
+        case LVM_FS_22050:
+            ssrcParams.NrSamplesOut=    882;
+            break;
+        case LVM_FS_24000:
+            ssrcParams.NrSamplesOut=    960;
+            break;
+        case LVM_FS_32000:
+            ssrcParams.NrSamplesOut = 1280;
+            break;
+        case LVM_FS_44100:
+            ssrcParams.NrSamplesOut= 1764;
+            break;
+        case LVM_FS_48000:
+            ssrcParams.NrSamplesOut = 1920;
+            break;
+        default:
+            ReturnStatus = -1;
+            break;
+        }
+        if( ReturnStatus != SSRC_OK )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen:\
+                Error code %d returned by the SSRC_GetNrSamples function",
+                ReturnStatus);
+            return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+        }
+
+        NrSamplesMin =
+            (LVM_INT16)((ssrcParams.NrSamplesIn > ssrcParams.NrSamplesOut)
+            ? ssrcParams.NrSamplesOut : ssrcParams.NrSamplesIn);
+
+        while( NrSamplesMin < M4VSS_SSRC_MINBLOCKSIZE )
+        { /* Don't take blocks smaller that the minimal block size */
+            ssrcParams.NrSamplesIn = (LVM_INT16)(ssrcParams.NrSamplesIn << 1);
+            ssrcParams.NrSamplesOut = (LVM_INT16)(ssrcParams.NrSamplesOut << 1);
+            NrSamplesMin = (LVM_INT16)(NrSamplesMin << 1);
+        }
+        pC->iSsrcNbSamplIn = (LVM_INT16)(
+            ssrcParams.
+            NrSamplesIn); /* multiplication by NrOfChannels is done below */
+        pC->iSsrcNbSamplOut = (LVM_INT16)(ssrcParams.NrSamplesOut);
+
+        numerator =
+            pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+            * pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
+        denominator =
+            pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+            * pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+
+        if( numerator % denominator == 0 )
+        {
+            ratioBuffer = (M4OSA_UInt32)(numerator / denominator);
+        }
+        else
+        {
+            ratioBuffer = (M4OSA_UInt32)(numerator / denominator) + 1;
+        }
+
+        ratio =
+            (M4OSA_UInt32)(( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+            * ratioBuffer) / (pC->iSsrcNbSamplIn * sizeof(short)
+            * pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiNbChannels));
+
+        if( ratio == 0 )
+        {
+            /* It means that the input size of SSRC bufferIn is bigger than the asked buffer */
+            pC->minimumBufferIn = pC->iSsrcNbSamplIn * sizeof(short)
+                * pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels;
+        }
+        else
+        {
+            ratio++; /* We use the immediate superior integer */
+            pC->minimumBufferIn = ratio * (pC->iSsrcNbSamplIn * sizeof(short)
+                * pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels);
+        }
+
+        /**
+        * Allocate buffer for the input of the SSRC */
+        pC->pSsrcBufferIn =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->minimumBufferIn
+            + pC->pAddedClipCtxt->
+            AudioDecBufferOut.
+            m_bufferSize,
+            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
+
+        if( M4OSA_NULL == pC->pSsrcBufferIn )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+        /**
+        * Allocate buffer for the output of the SSRC */
+        /* The "3" value below should be optimized ... one day ... */
+        pC->pSsrcBufferOut =
+            (M4OSA_MemAddr8)M4OSA_malloc(3 * pC->iSsrcNbSamplOut * sizeof(short)
+            * pC->ewc.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+        if( M4OSA_NULL == pC->pSsrcBufferOut )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+
+        /**
+        * Allocate temporary buffer needed in case of channel conversion */
+        if( pC->ChannelConversion > 0 )
+        {
+            /* The "3" value below should be optimized ... one day ... */
+            pC->pTempBuffer =
+                (M4OSA_MemAddr8)M4OSA_malloc(3 * pC->iSsrcNbSamplOut
+                * sizeof(short) * pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+            if( M4OSA_NULL == pC->pTempBuffer )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen():\
+                    unable to allocate pTempBuffer, returning M4ERR_ALLOC");
+                return M4ERR_ALLOC;
+            }
+            pC->pPosInTempBuffer = pC->pTempBuffer;
+        }
+    }
+    else if( pC->ChannelConversion > 0 )
+    {
+        pC->minimumBufferIn =
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+
+        /**
+        * Allocate buffer for the input of the SSRC */
+        pC->pSsrcBufferIn =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->minimumBufferIn
+            + pC->pAddedClipCtxt->
+            AudioDecBufferOut.
+            m_bufferSize,
+            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
+
+        if( M4OSA_NULL == pC->pSsrcBufferIn )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen(): \
+                unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+        /**
+        * Allocate buffer for the output of the SSRC */
+        /* The "3" value below should be optimized ... one day ... */
+        pC->pSsrcBufferOut = (M4OSA_MemAddr8)M4OSA_malloc(
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize,
+            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+        if( M4OSA_NULL == pC->pSsrcBufferOut )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+    }
+    else if( (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3)||
+         (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM))
+    {
+        M4OSA_UInt32 minbuffer = 0;
+
+        if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+        {
+            pC->minimumBufferIn = 2048 * pC->ewc.uiNbChannels;
+            minbuffer = pC->minimumBufferIn;
+        }
+        else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+        {
+            pC->minimumBufferIn = 320;
+
+            if( pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize > 320 )
+            {
+                minbuffer = pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+            }
+            else
+            {
+                minbuffer = pC->minimumBufferIn; /* Not really possible ...*/
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0("Bad output audio format, in case of MP3 replacing");
+            return M4ERR_PARAMETER;
+        }
+
+        /**
+        * Allocate buffer for the input of the SSRC */
+        pC->pSsrcBufferIn =
+            (M4OSA_MemAddr8)M4OSA_malloc(2 * minbuffer, M4VSS3GPP,
+            (M4OSA_Char *)"pSsrcBufferIn");
+
+        if( M4OSA_NULL == pC->pSsrcBufferIn )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen(): unable to allocate pSsrcBufferIn,\
+                returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+        pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
+        pC->pSsrcBufferOut = pC->pSsrcBufferIn;
+    }
+
+    /**
+    * Check if audio encoder is needed to do audio mixing or audio resampling */
+    if( M4OSA_TRUE == pC->bAudioMixingIsNeeded || M4VIDEOEDITING_kPCM
+        == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        || M4VIDEOEDITING_kMP3
+        == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        != pSettings->outputAudioFormat
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+        != outputASF
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels
+        != pSettings->outputNBChannels )
+    {
+        /**
+        * Init the audio encoder */
+        err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
+            pC->ewc.uiAudioBitrate);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreateAudioEncoder() returns 0x%x",
+                err);
+            return err;
+        }
+
+        /* In case of PCM, MP3 or audio replace with reencoding, use encoder DSI */
+        if( pC->ewc.uiAudioOutputDsiSize == 0 && (M4VIDEOEDITING_kPCM
+            == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+            || M4VIDEOEDITING_kMP3 == pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType
+            != pSettings->outputAudioFormat
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiSamplingFrequency != outputASF
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiNbChannels
+            != pSettings->outputNBChannels) )
+        {
+            pC->ewc.uiAudioOutputDsiSize =
+                (M4OSA_UInt16)pC->ewc.pAudioEncDSI.infoSize;
+            pC->ewc.pAudioOutputDsi = pC->ewc.pAudioEncDSI.pInfo;
+        }
+    }
+
+    /**
+    * Init the output 3GPP file */
+    /*11/12/2008 CR3283 add the max output file size for the MMS use case in VideoArtist*/
+    err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+        pC->pOsaFileWritPtr, pSettings->pOutputClipFile,
+        pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreate3GPPOutputFile() returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence()
+ * @brief    Write an audio silence frame into the writer
+ * @note    Mainly used when padding with silence
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingWriteSilence:\
+         pWriterDataFcts->pStartAU(audio) returns 0x%x!", err);
+        return err;
+    }
+
+    M4OSA_TRACE2_0("A #### silence AU");
+
+    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+        (M4OSA_MemAddr8)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+
+    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+    pC->ewc.WriterAudioAU.CTS =
+        (M4OSA_Time)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+    M4OSA_TRACE2_2("B ---- write : cts  = %ld [ 0x%x ]",
+        (M4OSA_Int32)(pC->ewc.dATo), pC->ewc.WriterAudioAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingWriteSilence:\
+            pWriterDataFcts->pProcessAU(silence) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Perform one step of video.
+ * @note
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt16 offset;
+
+    M4OSA_TRACE2_3("  VIDEO step : dVTo = %f  state = %d  offset = %ld",
+        pC->ewc.dOutputVidCts, pC->State, pC->pInputClipCtxt->iVoffset);
+
+    /**
+    * Read the input video AU */
+    err = pC->pInputClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+        pC->pInputClipCtxt->pReaderContext,
+        (M4_StreamHandler *)pC->pInputClipCtxt->pVideoStream,
+        &pC->pInputClipCtxt->VideoAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intAudioMixingStepVideo(): m_pFctGetNextAu(video) returns 0x%x",
+            err);
+        return err;
+    }
+
+    M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %ld [ 0x%x ]",
+        pC->pInputClipCtxt->VideoAU.m_CTS, pC->pInputClipCtxt->iVoffset,
+        pC->pInputClipCtxt->VideoAU.m_size);
+
+    /**
+    * Get the output AU to write into */
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    offset = 0;
+    /* for h.264 stream do not read the 1st 4 bytes as they are header indicators */
+    if( pC->pInputClipCtxt->pVideoStream->m_basicProperties.m_streamType
+        == M4DA_StreamTypeVideoMpeg4Avc )
+    {
+        M4OSA_TRACE3_0(
+            "M4VSS3GPP_intAudioMixingStepVideo(): input stream type H264");
+        offset = 4;
+    }
+    pC->pInputClipCtxt->VideoAU.m_size  -=  offset;
+    /**
+    * Check that the video AU is not larger than expected */
+    if( pC->pInputClipCtxt->VideoAU.m_size > pC->ewc.uiVideoMaxAuSize )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_intAudioMixingStepVideo: AU size greater than MaxAuSize (%d>%d)!\
+            returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
+            pC->pInputClipCtxt->VideoAU.m_size, pC->ewc.uiVideoMaxAuSize);
+        return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
+    }
+
+    /**
+    * Copy the input AU payload to the output AU */
+    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterVideoAU.dataAddress,
+        (M4OSA_MemAddr8)(pC->pInputClipCtxt->VideoAU.m_dataAddress + offset),
+        (pC->pInputClipCtxt->VideoAU.m_size));
+
+    /**
+    * Copy the input AU parameters to the output AU */
+    pC->ewc.WriterVideoAU.size = pC->pInputClipCtxt->VideoAU.m_size;
+    pC->ewc.WriterVideoAU.CTS =
+        (M4OSA_UInt32)(pC->pInputClipCtxt->VideoAU.m_CTS + 0.5);
+    pC->ewc.WriterVideoAU.attribute = pC->pInputClipCtxt->VideoAU.m_attribute;
+
+    /**
+    * Write the AU */
+    M4OSA_TRACE2_2("D ---- write : cts  = %lu [ 0x%x ]",
+        pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingStepVideo(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Perform one step of audio.
+ * @note
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_3("  AUDIO mix  : dATo = %f  state = %d  offset = %ld",
+        pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
+
+    switch( pC->State )
+    {
+        /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+            {
+                err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                        M4VSS3GPP_intAudioMixingCopyOrig(1) returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the AddCts */
+                if( pC->ewc.dATo >= pC->iAddCts )
+                {
+                    /**
+                    * First segment is over, state transition to second and return OK */
+                    pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+
+                    /* Transition from reading state to encoding state */
+                    err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Return with no error so the step function will be called again */
+                    pC->pAddedClipCtxt->iAoffset =
+                        (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+                    M4OSA_TRACE2_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR (1->2)");
+
+                    return M4NO_ERROR;
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+            {
+                if( M4OSA_TRUE == pC->bAudioMixingIsNeeded ) /**< Mix */
+                {
+                    /**
+                    * Read the added audio AU */
+                    if( pC->ChannelConversion > 0 || pC->b_SSRCneeded == M4OSA_TRUE
+                        || pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
+                    {
+                        /* In case of sampling freq conversion and/or channel conversion,
+                           the read next AU will be    called by the
+                           M4VSS3GPP_intAudioMixingDoMixing function */
+                    }
+                    else
+                    {
+                        err =
+                            M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+
+                        M4OSA_TRACE2_3("E .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pAddedClipCtxt->iAudioFrameCts
+                            / pC->pAddedClipCtxt->scale_audio,
+                            pC->pAddedClipCtxt->iAoffset
+                            / pC->pAddedClipCtxt->scale_audio,
+                            pC->pAddedClipCtxt->uiAudioFrameSize);
+
+                        if( M4WAR_NO_MORE_AU == err )
+                        {
+                            /**
+                            * Decide what to do when audio is over */
+                            if( pC->uiEndLoop > 0 )
+                            {
+                                /**
+                                * Jump at the Begin loop time */
+                                M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->
+                                    m_pFctJump(
+                                    pC->pAddedClipCtxt->pReaderContext,
+                                    (M4_StreamHandler
+                                    *)pC->pAddedClipCtxt->pAudioStream,
+                                    &time);
+
+                                if( M4NO_ERROR != err )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                        m_pReader->m_pFctJump(audio returns 0x%x",
+                                        err);
+                                    return err;
+                                }
+                            }
+                            else
+                            {
+                                /* Transition from encoding state to reading state */
+                                err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                                if( M4NO_ERROR != err )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                        pre-encode fails err = 0x%x",
+                                        err);
+                                    return err;
+                                }
+
+                                /**
+                                * Second segment is over, state transition to third and
+                                 return OK */
+                                pC->State =
+                                    M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                                /**
+                                * Return with no error so the step function will be
+                                 called again */
+                                M4OSA_TRACE2_0(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    returning M4NO_ERROR (2->3) a");
+                                return M4NO_ERROR;
+                            }
+                        }
+                        else if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                m_pFctGetNextAu(audio) returns 0x%x",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Read the original audio AU */
+                    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+                    M4OSA_TRACE2_3("F .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                        pC->pInputClipCtxt->iAudioFrameCts
+                        / pC->pInputClipCtxt->scale_audio,
+                        pC->pInputClipCtxt->iAoffset
+                        / pC->pInputClipCtxt->scale_audio,
+                        pC->pInputClipCtxt->uiAudioFrameSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE3_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                            m_pFctGetNextAu(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    if( pC->ChannelConversion == 0
+                        && pC->b_SSRCneeded == M4OSA_FALSE
+                        && pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.AudioStreamType != M4VIDEOEDITING_kMP3 )
+                    {
+                        /**
+                        * Get the output AU to write into */
+                        err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                            pC->ewc.p3gpWriterContext,
+                            M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                            &pC->ewc.WriterAudioAU);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Perform the audio mixing */
+                    err = M4VSS3GPP_intAudioMixingDoMixing(pC);
+
+                    if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+                    {
+                        return M4NO_ERROR;
+                    }
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                            M4VSS3GPP_intAudioMixingDoMixing returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< No mix, just copy added audio */
+                {
+                    err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
+
+                    if( M4WAR_NO_MORE_AU == err )
+                    {
+                        /**
+                        * Decide what to do when audio is over */
+                        if( pC->uiEndLoop > 0 )
+                        {
+                            /**
+                            * Jump at the Begin loop time */
+                            M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                            err =
+                                pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                                pC->pAddedClipCtxt->pReaderContext,
+                                (M4_StreamHandler
+                                *)pC->pAddedClipCtxt->pAudioStream,
+                                &time);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    m_pReader->m_pFctJump(audio returns 0x%x",
+                                    err);
+                                return err;
+                            }
+
+                            /**
+                            * 'BZZZ' bug fix:
+                            * add a silence frame */
+                            err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                                    err);
+                                return err;
+                            }
+
+                            /**
+                            * Return with no error so the step function will be called again to
+                              read audio data */
+                            pC->pAddedClipCtxt->iAoffset =
+                                (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio
+                                + 0.5);
+
+                            M4OSA_TRACE2_0(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    returning M4NO_ERROR (loop)");
+                            return M4NO_ERROR;
+                        }
+                        else
+                        {
+                            /* Transition to begin cut */
+                            err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    pre-encode fails err = 0x%x",
+                                    err);
+                                return err;
+                            }
+
+                            /**
+                            * Second segment is over, state transition to third */
+                            pC->State =
+                                M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                            /**
+                            * Return with no error so the step function will be called again */
+                            M4OSA_TRACE2_0(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                returning M4NO_ERROR (2->3) b");
+                            return M4NO_ERROR;
+                        }
+                    }
+                    else if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                            M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Check if we reached the end of the video */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix(): Video duration reached,\
+                        returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+            {
+                err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                        M4VSS3GPP_intAudioMixingCopyOrig(3) returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the end of the video */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                        Video duration reached, returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+       default:
+            break;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Perform one step of audio.
+ * @note
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_3("  AUDIO repl : dATo = %f  state = %d  offset = %ld",
+        pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
+
+    switch( pC->State )
+    {
+        /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+            {
+                /**
+                * Replace the SID (silence) payload in the writer AU */
+                err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                        M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the AddCts */
+                if( pC->ewc.dATo >= pC->iAddCts )
+                {
+                    /**
+                    * First segment is over, state transition to second and return OK */
+                    pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+
+                    /**
+                    * Return with no error so the step function will be called again */
+                    pC->pAddedClipCtxt->iAoffset =
+                        (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+                    M4OSA_TRACE2_0("M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                         returning M4NO_ERROR (1->2)");
+                    return M4NO_ERROR;
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+            {
+                err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
+
+                if( M4WAR_NO_MORE_AU == err )
+                {
+                    /**
+                    * Decide what to do when audio is over */
+
+                    if( pC->uiEndLoop > 0 )
+                    {
+                        /**
+                        * Jump at the Begin loop time */
+                        M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                        err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                            pC->pAddedClipCtxt->pReaderContext,
+                            (M4_StreamHandler
+                            *)pC->pAddedClipCtxt->pAudioStream, &time);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                                m_pReader->m_pFctJump(audio returns 0x%x",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * 'BZZZ' bug fix:
+                        * add a silence frame */
+                        err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * Return with no error so the step function will be called again to
+                          read audio data */
+                        pC->pAddedClipCtxt->iAoffset =
+                            (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+                        M4OSA_TRACE2_0(
+                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                            returning M4NO_ERROR (loop)");
+
+                        return M4NO_ERROR;
+                    }
+                    else if( M4OSA_TRUE == pC->bSupportSilence )
+                    {
+                        /**
+                        * Second segment is over, state transition to third and return OK */
+                        pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                        /**
+                        * Return with no error so the step function will be called again */
+                        M4OSA_TRACE2_0(
+                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                                 returning M4NO_ERROR (2->3)");
+                        return M4NO_ERROR;
+                    }
+                    else
+                    {
+                        /**
+                        * The third segment (silence) is only done if supported.
+                        * In other case, we finish here. */
+                        pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+
+                        /**
+                        * Return with no error so the step function will be called again */
+                        M4OSA_TRACE2_0(
+                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                                 returning M4NO_ERROR (2->F)");
+                        return M4NO_ERROR;
+                    }
+                }
+                else if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                        M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the end of the clip */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioReplace(): Clip duration reached,\
+                        returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+            {
+                /**
+                * Replace the SID (silence) payload in the writer AU */
+                err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                        M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the end of the video */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                        Video duration reached, returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+        default:
+            break;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingStepAudioReplace(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Read one AU from the original audio file and write it to the output
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Read the input original audio AU */
+    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+    M4OSA_TRACE2_3("G .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+        pC->pInputClipCtxt->iAudioFrameCts / pC->pInputClipCtxt->scale_audio,
+        pC->pInputClipCtxt->iAoffset / pC->pInputClipCtxt->scale_audio,
+        pC->pInputClipCtxt->uiAudioFrameSize);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intAudioMixingCopyOrig(): m_pFctGetNextAu(audio) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Get the output AU to write into */
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Copy the input AU properties to the output AU */
+    pC->ewc.WriterAudioAU.size = pC->pInputClipCtxt->uiAudioFrameSize;
+    pC->ewc.WriterAudioAU.CTS =
+        pC->pInputClipCtxt->iAudioFrameCts + pC->pInputClipCtxt->iAoffset;
+
+    /**
+    * Copy the AU itself */
+    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+        pC->pInputClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+
+    /**
+    * Write the mixed AU */
+    M4OSA_TRACE2_2("H ---- write : cts  = %ld [ 0x%x ]",
+        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+        pC->ewc.WriterAudioAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Increment the audio CTS for the next step */
+    pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyOrig(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Read one AU from the added audio file and write it to the output
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    if(pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 ||
+        pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM ||
+        pC->b_SSRCneeded == M4OSA_TRUE ||
+        pC->ChannelConversion > 0)
+    {
+        M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
+        M4ENCODER_AudioBuffer
+            pEncOutBuffer; /**< Encoder output buffer for api */
+        M4OSA_Time
+            frameTimeDelta; /**< Duration of the encoded (then written) data */
+        M4OSA_MemAddr8 tempPosBuffer;
+
+        err = M4VSS3GPP_intAudioMixingConvert(pC);
+
+        if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+        {
+            M4OSA_TRACE2_0(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                M4VSS3GPP_intAudioMixingConvert end of added file");
+            return M4NO_ERROR;
+        }
+        else if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingCopyAdded:\
+                M4VSS3GPP_intAudioMixingConvert returned 0x%x", err);
+            return err;
+        }
+
+        /**
+        * Get the output AU to write into */
+        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /* [Mono] or [Stereo interleaved] : all is in one buffer */
+        pEncInBuffer.pTableBuffer[0] = pC->pSsrcBufferOut;
+        pEncInBuffer.pTableBufferSize[0] =
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+        pEncInBuffer.pTableBufferSize[1] = 0;
+
+        /* Time in ms from data size, because it is PCM16 samples */
+        frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+            / pC->ewc.uiNbChannels;
+
+        /**
+        * Prepare output buffer */
+        pEncOutBuffer.pTableBuffer[0] =
+            (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+        pEncOutBuffer.pTableBufferSize[0] = 0;
+
+        M4OSA_TRACE2_0("K **** blend AUs");
+#if 0
+
+        {
+            M4OSA_Char filename[13];
+            M4OSA_Context pGIFFileInDebug = M4OSA_NULL;
+            M4OSA_FilePosition pos = 0;
+
+            sprintf(filename, "toto.pcm");
+
+            err = pC->pOsaFileWritPtr->openWrite(&pGIFFileInDebug, filename,
+                M4OSA_kFileWrite | M4OSA_kFileAppend);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't open input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+
+            err = pC->pOsaFileWritPtr->seek(pGIFFileInDebug, M4OSA_kFileSeekEnd,
+                &pos);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't seek input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+
+            err = pC->pOsaFileWritPtr->writeData(pGIFFileInDebug,
+                pC->pSsrcBufferOut,
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't write input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+
+            err = pC->pOsaFileWritPtr->closeWrite(pGIFFileInDebug);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't close input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+        }
+
+#endif
+        /**
+        * Encode the PCM audio */
+
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+            pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing():\
+                pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * Set AU cts and size */
+        pC->ewc.WriterAudioAU.size =
+            pEncOutBuffer.
+            pTableBufferSize[0]; /**< Get the size of encoded data */
+        pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+        /* Update decoded buffer here */
+        if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
+        {
+            tempPosBuffer = pC->pSsrcBufferOut
+                + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+            M4OSA_memmove(pC->pSsrcBufferOut, tempPosBuffer,
+                pC->pPosInSsrcBufferOut - tempPosBuffer);
+            pC->pPosInSsrcBufferOut -=
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        }
+        else
+        {
+            tempPosBuffer = pC->pSsrcBufferIn + pC->minimumBufferIn;
+            M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                pC->pPosInSsrcBufferIn - tempPosBuffer);
+            pC->pPosInSsrcBufferIn -= pC->minimumBufferIn;
+        }
+
+        /**
+        * Write the mixed AU */
+        M4OSA_TRACE2_2("J ---- write : cts  = %ld [ 0x%x ]",
+            (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+            pC->ewc.WriterAudioAU.size);
+
+        err =
+            pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Increment the audio CTS for the next step */
+        pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
+    }
+    else
+    {
+        /**
+        * Read the added audio AU */
+        err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+
+        M4OSA_TRACE2_3("I .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+            pC->pAddedClipCtxt->iAudioFrameCts
+            / pC->pAddedClipCtxt->scale_audio,
+            pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
+            pC->pAddedClipCtxt->uiAudioFrameSize);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded(): m_pFctGetNextAu(audio) returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * Get the output AU to write into */
+        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Copy the input AU properties to the output AU */
+
+        /** THE CHECK BELOW IS ADDED TO PREVENT ISSUES LINKED TO PRE-ALLOCATED MAX AU SIZE
+        max AU size is set based on M4VSS3GPP_AUDIO_MAX_AU_SIZE defined in file
+        M4VSS3GPP_InternalConfig.h, If this error occurs increase the limit set in this file
+        */
+        if( pC->pAddedClipCtxt->uiAudioFrameSize > pC->ewc.WriterAudioAU.size )
+        {
+            M4OSA_TRACE1_2(
+                "ERROR: audio AU size (%d) to copy larger than allocated one (%d) => abort",
+                pC->pAddedClipCtxt->uiAudioFrameSize,
+                pC->ewc.WriterAudioAU.size);
+            M4OSA_TRACE1_0(
+                "PLEASE CONTACT SUPPORT TO EXTEND MAX AU SIZE IN THE PRODUCT LIBRARY");
+            err = M4ERR_UNSUPPORTED_MEDIA_TYPE;
+            return err;
+        }
+        pC->ewc.WriterAudioAU.size = pC->pAddedClipCtxt->uiAudioFrameSize;
+        pC->ewc.WriterAudioAU.CTS =
+            pC->pAddedClipCtxt->iAudioFrameCts + pC->pAddedClipCtxt->iAoffset;
+
+        /**
+        * Copy the AU itself */
+        M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+            pC->pAddedClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+
+        /**
+        * Write the mixed AU */
+        M4OSA_TRACE2_2("J ---- write : cts  = %ld [ 0x%x ]",
+            (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+            pC->ewc.WriterAudioAU.size);
+
+        err =
+            pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Increment the audio CTS for the next step */
+        pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyAdded(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intAudioMixingConvert(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Convert PCM of added track to the right ASF / nb of Channels
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    int ssrcErr; /**< Error while ssrc processing */
+    M4OSA_UInt32 uiChannelConvertorNbSamples =
+        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short)
+        / pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+    M4OSA_MemAddr8 tempPosBuffer;
+
+    M4OSA_UInt32 outFrameCount = uiChannelConvertorNbSamples;
+    /* Do we need to feed SSRC buffer In ? */
+    /**
+    * RC: This is not really optimum (memmove). We should handle this with linked list. */
+    while( pC->pPosInSsrcBufferIn - pC->pSsrcBufferIn < (M4OSA_Int32)pC->minimumBufferIn )
+    {
+        /* We need to get more PCM data */
+        if (pC->bNoLooping == M4OSA_TRUE)
+        {
+            err = M4WAR_NO_MORE_AU;
+        }
+        else
+        {
+        err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+        }
+        if(pC->bjumpflag)
+        {
+        /**
+            * Jump at the Begin loop time */
+            M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+            err =
+                pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump\
+                    (pC->pAddedClipCtxt->pReaderContext,
+                     (M4_StreamHandler*)pC->pAddedClipCtxt->pAudioStream, &time);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingConvert():\
+                     m_pReader->m_pFctJump(audio returns 0x%x", err);
+                return err;
+            }
+            pC->bjumpflag = M4OSA_FALSE;
+        }
+        M4OSA_TRACE2_3("E .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+             pC->pAddedClipCtxt->iAudioFrameCts / pC->pAddedClipCtxt->scale_audio,
+                 pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
+                     pC->pAddedClipCtxt->uiAudioFrameSize);
+        if( M4WAR_NO_MORE_AU == err )
+        {
+            if(pC->bNoLooping == M4OSA_TRUE)
+            {
+                pC->uiEndLoop =0; /* Value 0 means no looping is required */
+            }
+            /**
+            * Decide what to do when audio is over */
+            if( pC->uiEndLoop > 0 )
+            {
+                /**
+                * Jump at the Begin loop time */
+                M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                    pC->pAddedClipCtxt->pReaderContext,
+                    (M4_StreamHandler *)pC->pAddedClipCtxt->
+                    pAudioStream, &time);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingConvert():\
+                        m_pReader->m_pFctJump(audio returns 0x%x",
+                        err);
+                    return err;
+                }
+            }
+            else
+            {
+                /* Transition from encoding state to reading state */
+                err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Second segment is over, state transition to third and return OK */
+                pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                /**
+                * Return with no error so the step function will be called again */
+                M4OSA_TRACE2_0(
+                    "M4VSS3GPP_intAudioMixingConvert():\
+                    returning M4VSS3GPP_WAR_END_OF_ADDED_AUDIO (2->3) a");
+                return M4VSS3GPP_WAR_END_OF_ADDED_AUDIO;
+            }
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingConvert(): m_pFctGetNextAu(audio) returns 0x%x",
+                err);
+            return err;
+        }
+
+        err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing:\
+                M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
+                err);
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+
+        /* Copy decoded data into SSRC buffer in */
+        M4OSA_memcpy(pC->pPosInSsrcBufferIn,
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress,
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize);
+        /* Update position pointer into SSRC buffer In */
+
+        pC->pPosInSsrcBufferIn +=
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+    }
+
+    /* Do the resampling / channel conversion if needed (=feed buffer out) */
+    if( pC->b_SSRCneeded == M4OSA_TRUE )
+    {
+        pC->ChannelConversion = 0;
+        if( pC->ChannelConversion > 0 )
+        {
+            while( pC->pPosInTempBuffer - pC->pTempBuffer
+                < (M4OSA_Int32)(pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+                *pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels)
+                / pC->ChannelConversion )
+                /* We use ChannelConversion variable because in case 2, we need twice less data */
+            {
+                ssrcErr = 0;
+                M4OSA_memset(pC->pPosInTempBuffer,
+                    (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels),0);
+
+                LVAudioresample_LowQuality((short*)pC->pPosInTempBuffer,
+                    (short*)pC->pSsrcBufferIn,
+                    pC->iSsrcNbSamplOut,
+                    pC->pLVAudioResampler);
+                if( 0 != ssrcErr )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
+                        ssrcErr);
+                    return ssrcErr;
+                }
+
+                pC->pPosInTempBuffer += pC->iSsrcNbSamplOut * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels;
+
+                /* Update SSRC bufferIn */
+                tempPosBuffer =
+                    pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels);
+                M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                    pC->pPosInSsrcBufferIn - tempPosBuffer);
+                pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels;
+            }
+        }
+        else
+        {
+            while( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+            {
+                ssrcErr = 0;
+                M4OSA_memset(pC->pPosInSsrcBufferOut,
+                    (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels),0);
+
+                LVAudioresample_LowQuality((short*)pC->pPosInSsrcBufferOut,
+                    (short*)pC->pSsrcBufferIn,
+                    pC->iSsrcNbSamplOut,
+                    pC->pLVAudioResampler);
+                if( 0 != ssrcErr )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
+                        ssrcErr);
+                    return ssrcErr;
+                }
+                pC->pPosInSsrcBufferOut +=
+                    pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels;
+
+                /* Update SSRC bufferIn */
+                tempPosBuffer =
+                    pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels);
+                M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                    pC->pPosInSsrcBufferIn - tempPosBuffer);
+                pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels;
+            }
+        }
+
+        /* Convert Stereo<->Mono */
+        switch( pC->ChannelConversion )
+        {
+            case 0: /* No channel conversion */
+                break;
+
+            case 1: /* stereo to mono */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    From2iToMono_16((short *)pC->pTempBuffer,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)(uiChannelConvertorNbSamples));
+                    /* Update pTempBuffer */
+                    tempPosBuffer = pC->pTempBuffer
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.
+                        uiNbChannels); /* Buffer is in bytes */
+                    M4OSA_memmove(pC->pTempBuffer, tempPosBuffer,
+                        pC->pPosInTempBuffer - tempPosBuffer);
+                    pC->pPosInTempBuffer -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+
+            case 2: /* mono to stereo */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    MonoTo2I_16((short *)pC->pTempBuffer,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)uiChannelConvertorNbSamples);
+                    tempPosBuffer = pC->pTempBuffer
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    M4OSA_memmove(pC->pTempBuffer, tempPosBuffer,
+                        pC->pPosInTempBuffer - tempPosBuffer);
+                    pC->pPosInTempBuffer -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+        }
+    }
+    else if( pC->ChannelConversion > 0 )
+    {
+        //M4OSA_UInt32 uiChannelConvertorNbSamples =
+        // pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short) /
+        // pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+        /* Convert Stereo<->Mono */
+        switch( pC->ChannelConversion )
+        {
+            case 0: /* No channel conversion */
+                break;
+
+            case 1: /* stereo to mono */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    From2iToMono_16((short *)pC->pSsrcBufferIn,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)(uiChannelConvertorNbSamples));
+                    /* Update pTempBuffer */
+                    tempPosBuffer = pC->pSsrcBufferIn
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.
+                        uiNbChannels); /* Buffer is in bytes */
+                    M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                        pC->pPosInSsrcBufferIn - tempPosBuffer);
+                    pC->pPosInSsrcBufferIn -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+
+            case 2: /* mono to stereo */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    MonoTo2I_16((short *)pC->pSsrcBufferIn,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)uiChannelConvertorNbSamples);
+                    tempPosBuffer = pC->pSsrcBufferIn
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                        pC->pPosInSsrcBufferIn - tempPosBuffer);
+                    pC->pPosInSsrcBufferIn -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+        }
+    }
+    else
+    {
+        /* No channel conversion nor sampl. freq. conversion needed, just buffer management */
+        pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_Int32 M4VSS3GPP_getDecibelSound( M4OSA_UInt32 value )
+    {
+    int dbSound = 1;
+
+    if( value == 0 )
+        return 0;
+
+    if( value > 0x4000 && value <= 0x8000 )      // 32768
+        dbSound = 90;
+
+    else if( value > 0x2000 && value <= 0x4000 ) // 16384
+        dbSound = 84;
+
+    else if( value > 0x1000 && value <= 0x2000 ) // 8192
+        dbSound = 78;
+
+    else if( value > 0x0800 && value <= 0x1000 ) // 4028
+        dbSound = 72;
+
+    else if( value > 0x0400 && value <= 0x0800 ) // 2048
+        dbSound = 66;
+
+    else if( value > 0x0200 && value <= 0x0400 ) // 1024
+        dbSound = 60;
+
+    else if( value > 0x0100 && value <= 0x0200 ) // 512
+        dbSound = 54;
+
+    else if( value > 0x0080 && value <= 0x0100 ) // 256
+        dbSound = 48;
+
+    else if( value > 0x0040 && value <= 0x0080 ) // 128
+        dbSound = 42;
+
+    else if( value > 0x0020 && value <= 0x0040 ) // 64
+        dbSound = 36;
+
+    else if( value > 0x0010 && value <= 0x0020 ) // 32
+        dbSound = 30;
+
+    else if( value > 0x0008 && value <= 0x0010 ) //16
+        dbSound = 24;
+
+    else if( value > 0x0007 && value <= 0x0008 ) //8
+        dbSound = 24;
+
+    else if( value > 0x0003 && value <= 0x0007 ) // 4
+        dbSound = 18;
+
+    else if( value > 0x0001 && value <= 0x0003 ) //2
+        dbSound = 12;
+
+    else if( value > 0x000 && value <= 0x0001 )  // 1
+        dbSound = 6;
+
+    else
+        dbSound = 0;
+
+    return dbSound;
+    }
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intAudioMixingDoMixing(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Mix the current audio AUs (decoder, mix, encode)
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_Int16 *pPCMdata1;
+    M4OSA_Int16 *pPCMdata2;
+    M4OSA_UInt32 uiPCMsize;
+
+    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
+    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+    M4OSA_Time
+        frameTimeDelta; /**< Duration of the encoded (then written) data */
+    M4OSA_MemAddr8 tempPosBuffer;
+    /* ducking variable */
+    M4OSA_UInt16 loopIndex = 0;
+    M4OSA_Int16 *pPCM16Sample = M4OSA_NULL;
+    M4OSA_Int32 peakDbValue = 0;
+    M4OSA_Int32 previousDbValue = 0;
+    M4OSA_UInt32 i;
+
+    /**
+    * Decode original audio track AU */
+
+    err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pInputClipCtxt);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingDoMixing:\
+            M4VSS3GPP_intClipDecodeCurrentAudioFrame(orig) returns 0x%x",
+            err);
+        return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+    }
+
+    if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        == M4VIDEOEDITING_kMP3 )
+    {
+        err = M4VSS3GPP_intAudioMixingConvert(pC);
+
+        if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+        {
+            return err;
+        }
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing: M4VSS3GPP_intAudioMixingConvert returned 0x%x",
+                err);
+            return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+        }
+
+        /**
+        * Get the output AU to write into */
+        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        pPCMdata2 = (M4OSA_Int16 *)pC->pSsrcBufferOut;
+    }
+    else
+    {
+        /**
+        * Decode added audio track AU */
+        err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing:\
+                M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
+                err);
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+
+        /**
+        * Check both clips decoded the same amount of PCM samples */
+        if( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+            != pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingDoMixing:\
+                both clips AU must have the same decoded PCM size!");
+            return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+        }
+        pPCMdata2 = (M4OSA_Int16 *)pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress;
+    }
+
+    /**
+    * Mix the two decoded PCM audios */
+    pPCMdata1 =
+        (M4OSA_Int16 *)pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+    uiPCMsize = pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+        / 2; /*buffer size (bytes) to number of sample (int16)*/
+
+    if( pC->b_DuckingNeedeed )
+    {
+        loopIndex = 0;
+        peakDbValue = 0;
+        previousDbValue = peakDbValue;
+
+        pPCM16Sample = (M4OSA_Int16 *)pC->pInputClipCtxt->
+            AudioDecBufferOut.m_dataAddress;
+
+        //Calculate the peak value
+         while( loopIndex
+             < pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+            / sizeof(M4OSA_Int16) )
+        {
+            if( pPCM16Sample[loopIndex] >= 0 )
+            {
+                peakDbValue = previousDbValue > pPCM16Sample[loopIndex]
+                ? previousDbValue : pPCM16Sample[loopIndex];
+                previousDbValue = peakDbValue;
+            }
+            else
+            {
+                peakDbValue = previousDbValue > -pPCM16Sample[loopIndex]
+                ? previousDbValue : -pPCM16Sample[loopIndex];
+                previousDbValue = peakDbValue;
+            }
+            loopIndex++;
+        }
+
+        pC->audioVolumeArray[pC->audVolArrIndex] =
+            M4VSS3GPP_getDecibelSound(peakDbValue);
+
+        /* WINDOW_SIZE is 10 by default and check for threshold is done after 10 cycles */
+        if( pC->audVolArrIndex >= WINDOW_SIZE - 1 )
+        {
+            pC->bDoDucking =
+                M4VSS3GPP_isThresholdBreached((M4OSA_Int32 *)&(pC->audioVolumeArray),
+                pC->audVolArrIndex, pC->InDucking_threshold);
+
+            pC->audVolArrIndex = 0;
+        }
+        else
+        {
+            pC->audVolArrIndex++;
+        }
+
+        /*
+        *Below logic controls the mixing weightage for Background Track and Primary Track
+        *for the duration of window under analysis to give fade-out for Background and fade-in
+        *for primary
+        *
+        *Current fading factor is distributed in equal range over the defined window size.
+        *
+        *For a window size = 25 (500 ms (window under analysis) / 20 ms (sample duration))
+        *
+        */
+
+        if( pC->bDoDucking )
+        {
+            if( pC->duckingFactor
+                > pC->InDucking_lowVolume ) // FADE OUT BG Track
+            {
+                    // decrement ducking factor in total steps in factor of low volume steps to reach
+                    // low volume level
+                pC->duckingFactor -= (pC->InDucking_lowVolume);
+            }
+            else
+            {
+                pC->duckingFactor = pC->InDucking_lowVolume;
+            }
+        }
+        else
+        {
+            if( pC->duckingFactor < 1.0 ) // FADE IN BG Track
+            {
+                // increment ducking factor in total steps of low volume factor to reach
+                // orig.volume level
+                pC->duckingFactor += (pC->InDucking_lowVolume);
+            }
+        else
+           {
+                pC->duckingFactor = 1.0;
+            }
+        }
+        /* endif - ducking_enable */
+
+        /* Mixing Logic */
+
+        while( uiPCMsize-- > 0 )
+        {
+            M4OSA_Int32 temp;
+
+           /* set vol factor for BT and PT */
+            *pPCMdata2 = (M4OSA_Int16)(*pPCMdata2 * pC->fBTVolLevel);
+
+            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fPTVolLevel);
+
+            /* mix the two samples */
+
+            *pPCMdata2 = (M4OSA_Int16)(( *pPCMdata2) * (pC->duckingFactor));
+            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata2 / 2 + *pPCMdata1 / 2);
+
+
+            if( *pPCMdata1 < 0 )
+            {
+                temp = -( *pPCMdata1)
+                    * 2; // bring to same Amplitude level as it was original
+
+                if( temp > 32767 )
+                {
+                    *pPCMdata1 = -32766; // less then max allowed value
+                }
+                else
+                {
+                    *pPCMdata1 = (M4OSA_Int16)(-temp);
+               }
+        }
+        else
+        {
+            temp = ( *pPCMdata1)
+                * 2; // bring to same Amplitude level as it was original
+
+            if( temp > 32768 )
+            {
+                *pPCMdata1 = 32767; // less than max allowed value
+            }
+            else
+            {
+                *pPCMdata1 = (M4OSA_Int16)temp;
+            }
+        }
+
+            pPCMdata2++;
+            pPCMdata1++;
+        }
+    }
+    else
+    {
+        while( uiPCMsize-- > 0 )
+       {
+        /* mix the two samples */
+            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fOrigFactor * pC->fPTVolLevel
+               + *pPCMdata2 * pC->fAddedFactor * pC->fBTVolLevel );
+
+            pPCMdata1++;
+            pPCMdata2++;
+        }
+    }
+
+    /* Update pC->pSsrcBufferOut buffer */
+
+    if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
+    {
+        tempPosBuffer = pC->pSsrcBufferOut
+            + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        M4OSA_memmove(pC->pSsrcBufferOut, tempPosBuffer,
+            pC->pPosInSsrcBufferOut - tempPosBuffer);
+        pC->pPosInSsrcBufferOut -=
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+    }
+    else if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        == M4VIDEOEDITING_kMP3 )
+    {
+        tempPosBuffer = pC->pSsrcBufferIn
+            + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+            pC->pPosInSsrcBufferIn - tempPosBuffer);
+        pC->pPosInSsrcBufferIn -=
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+    }
+
+    /* [Mono] or [Stereo interleaved] : all is in one buffer */
+    pEncInBuffer.pTableBuffer[0] =
+        pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+    pEncInBuffer.pTableBufferSize[0] =
+        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+    pEncInBuffer.pTableBufferSize[1] = 0;
+
+    /* Time in ms from data size, because it is PCM16 samples */
+    frameTimeDelta =
+        pEncInBuffer.pTableBufferSize[0] / sizeof(short) / pC->ewc.uiNbChannels;
+
+    /**
+    * Prepare output buffer */
+    pEncOutBuffer.pTableBuffer[0] =
+        (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+    pEncOutBuffer.pTableBufferSize[0] = 0;
+
+    M4OSA_TRACE2_0("K **** blend AUs");
+
+    /**
+    * Encode the PCM audio */
+    err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(pC->ewc.pAudioEncCtxt,
+        &pEncInBuffer, &pEncOutBuffer);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingDoMixing(): pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Set AU cts and size */
+    pC->ewc.WriterAudioAU.size =
+        pEncOutBuffer.pTableBufferSize[0]; /**< Get the size of encoded data */
+    pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+    /**
+    * Write the AU */
+    M4OSA_TRACE2_2("L ---- write : cts  = %ld [ 0x%x ]",
+        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+        pC->ewc.WriterAudioAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingDoMixing: pWriterDataFcts->pProcessAU returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Increment the audio CTS for the next step */
+    pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingDoMixing(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intAudioMixingTransition(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Decode/encode a few AU backward to initiate the encoder for later Mix segment.
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
+    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+    M4OSA_Time
+        frameTimeDelta = 0; /**< Duration of the encoded (then written) data */
+
+    M4OSA_Int32 iTargetCts, iCurrentCts;
+
+    /**
+    * 'BZZZ' bug fix:
+    * add a silence frame */
+    err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingTransition():\
+            M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+            err);
+        return err;
+    }
+
+    iCurrentCts = (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+    /* Do not do pre-encode step if there is no mixing (remove, 100 %, or not editable) */
+    if( M4OSA_FALSE == pC->bAudioMixingIsNeeded )
+    {
+        /**
+        * Advance in the original audio stream to reach the current time
+        * (We don't want iAudioCTS to be modified by the jump function,
+        * so we have to use a local variable). */
+        err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iCurrentCts);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingTransition:\
+             M4VSS3GPP_intClipJumpAudioAt() returns 0x%x!", err);
+            return err;
+        }
+    }
+    else
+    {
+        /**< don't try to pre-decode if clip is at its beginning... */
+        if( iCurrentCts > 0 )
+        {
+            /**
+            * Get the output AU to write into */
+            err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                &pC->ewc.WriterAudioAU);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingTransition:\
+                    pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                    err);
+                return err;
+            }
+
+            /**
+            * Jump a few AUs backward */
+            iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+                * pC->ewc.iSilenceFrameDuration;
+
+            if( iTargetCts < 0 )
+            {
+                iTargetCts = 0; /**< Sanity check */
+            }
+
+            err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iTargetCts);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+                    M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+                    err);
+                return err;
+            }
+
+            /**
+            * Decode/encode up to the wanted position */
+            while( pC->pInputClipCtxt->iAudioFrameCts < iCurrentCts )
+            {
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+                M4OSA_TRACE2_3("M .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pInputClipCtxt->iAudioFrameCts
+                    / pC->pInputClipCtxt->scale_audio,
+                    pC->pInputClipCtxt->iAoffset
+                    / pC->pInputClipCtxt->scale_audio,
+                    pC->pInputClipCtxt->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+                        M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(
+                    pC->pInputClipCtxt);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                pEncInBuffer.pTableBuffer[0] =
+                    pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+                pEncInBuffer.pTableBufferSize[0] =
+                    pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                pEncInBuffer.pTableBufferSize[1] = 0;
+
+                /* Time in ms from data size, because it is PCM16 samples */
+                frameTimeDelta =
+                    pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                    / pC->ewc.uiNbChannels;
+
+                /**
+                * Prepare output buffer */
+                pEncOutBuffer.pTableBuffer[0] =
+                    (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                M4OSA_TRACE2_0("N **** pre-encode");
+
+                /**
+                * Encode the PCM audio */
+                err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                    pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition():\
+                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                        err);
+                    return err;
+                }
+            }
+
+            /**
+            * Set AU cts and size */
+            pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+                0]; /**< Get the size of encoded data */
+                pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+                /**
+                * Write the AU */
+                M4OSA_TRACE2_2("O ---- write : cts  = %ld [ 0x%x ]",
+                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                    pC->ewc.WriterAudioAU.size);
+
+                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition:\
+                        pWriterDataFcts->pProcessAU returns 0x%x!",    err);
+                    return err;
+                }
+
+                /**
+                * Increment the audio CTS for the next step */
+                pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder()
+ * @brief    Creates the video encoder
+ * @note
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    M4ENCODER_AdvancedParams EncParams;
+
+    /**
+    * Simulate a writer interface with our specific function */
+    pC->ewc.OurWriterDataInterface.pProcessAU =
+        M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
+                                but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pStartAU =
+        M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
+                              but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pWriterContext =
+        (M4WRITER_Context)
+        pC; /**< We give the internal context as writer context */
+
+    /**
+    * Get the encoder interface, if not already done */
+    if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
+    {
+        err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
+            pC->ewc.VideoStreamType);
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCreateVideoEncoder: setCurrentEncoder returns 0x%x",
+            err);
+        M4ERR_CHECK_RETURN(err);
+    }
+
+    /**
+    * Set encoder shell parameters according to VSS settings */
+
+    /* Common parameters */
+    EncParams.InputFormat = M4ENCODER_kIYUV420;
+    EncParams.FrameWidth = pC->ewc.uiVideoWidth;
+    EncParams.FrameHeight = pC->ewc.uiVideoHeight;
+    EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
+
+    /* No strict regulation in video editor */
+    /* Because of the effects and transitions we should allow more flexibility */
+    /* Also it prevents to drop important frames
+      (with a bad result on sheduling and block effetcs) */
+    EncParams.bInternalRegulation = M4OSA_FALSE;
+    EncParams.FrameRate = M4ENCODER_kVARIABLE_FPS;
+
+    /**
+    * Other encoder settings (defaults) */
+    EncParams.uiHorizontalSearchRange = 0;     /* use default */
+    EncParams.uiVerticalSearchRange = 0;       /* use default */
+    EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+    EncParams.uiIVopPeriod = 0;                /* use default */
+    EncParams.uiMotionEstimationTools = 0;     /* M4V_MOTION_EST_TOOLS_ALL */
+    EncParams.bAcPrediction = M4OSA_TRUE;      /* use AC prediction */
+    EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+    EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+
+    switch( pC->ewc.VideoStreamType )
+    {
+        case M4SYS_kH263:
+
+            EncParams.Format = M4ENCODER_kH263;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            break;
+
+        case M4SYS_kMPEG_4:
+
+            EncParams.Format = M4ENCODER_kMPEG4;
+
+            EncParams.uiStartingQuantizerValue = 8;
+            EncParams.uiRateFactor = 1;
+
+            if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
+            {
+                EncParams.bErrorResilience = M4OSA_FALSE;
+                EncParams.bDataPartitioning = M4OSA_FALSE;
+            }
+            else
+            {
+                EncParams.bErrorResilience = M4OSA_TRUE;
+                EncParams.bDataPartitioning = M4OSA_TRUE;
+            }
+            break;
+
+        case M4SYS_kH264:
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingCreateVideoEncoder: M4SYS_H264");
+
+            EncParams.Format = M4ENCODER_kH264;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCreateVideoEncoder: Unknown videoStreamType 0x%x",
+                pC->ewc.VideoStreamType);
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    /* In case of EMP we overwrite certain parameters */
+    if( M4OSA_TRUE == pC->ewc.bActivateEmp )
+    {
+        EncParams.uiHorizontalSearchRange = 15;    /* set value */
+        EncParams.uiVerticalSearchRange = 15;      /* set value */
+        EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+        EncParams.uiIVopPeriod = 15; /* one I frame every 15 frames */
+        EncParams.uiMotionEstimationTools = 1; /* M4V_MOTION_EST_TOOLS_NO_4MV */
+        EncParams.bAcPrediction = M4OSA_FALSE;     /* no AC prediction */
+        EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+        EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+    }
+
+    EncParams.Bitrate =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
+
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctInit");
+    /**
+    * Init the video encoder (advanced settings version of the encoder Open function) */
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
+        &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
+        pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
+        pC->ShellAPI.pCurrentVideoEncoderUserData);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+            pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctOpen");
+
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
+        &pC->ewc.WriterVideoAU, &EncParams);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+            pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctStart");
+
+    if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
+    {
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+                pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder()
+ * @brief    Destroy the video encoder
+ * @note
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( M4OSA_NULL != pC->ewc.pEncContext )
+    {
+        if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
+        {
+            if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+            {
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
+                    pC->ewc.pEncContext);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+                        pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+                        err);
+                }
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+        }
+
+        /* Has the encoder actually been opened? Don't close it if that's not the case. */
+        if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
+                pC->ewc.pEncContext);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+                    pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+                    err);
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+        }
+
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+                pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+        }
+
+        pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+        /**
+        * Reset variable */
+        pC->ewc.pEncContext = M4OSA_NULL;
+    }
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_intAudioMixingDestroyVideoEncoder: returning 0x%x", err);
+    return err;
+}
+
+M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
+                                         M4OSA_Int32 storeCount, M4OSA_Int32 thresholdValue )
+{
+    M4OSA_Bool result = 0;
+    int i;
+    int finalValue = 0;
+
+    for ( i = 0; i < storeCount; i++ )
+        finalValue += averageValue[i];
+
+    finalValue = finalValue / storeCount;
+
+
+    if( finalValue > thresholdValue )
+        result = M4OSA_TRUE;
+    else
+        result = M4OSA_FALSE;
+
+    return result;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Clip.c b/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
new file mode 100755
index 0000000..0a3b737
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
@@ -0,0 +1,2035 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_Clip.c
+ * @brief    Implementation of functions related to input clip management.
+ * @note    All functions in this file are static, i.e. non public
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ *    Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+
+/**
+ *    OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h"  /* OSAL debug management */
+
+
+/**
+ * Common headers (for aac) */
+#include "M4_Common.h"
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+/* Osal header fileno */
+#include "M4OSA_CharStar.h"
+
+/**
+ ******************************************************************************
+ * define    Static function prototypes
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
+    M4VSS3GPP_ClipContext *pClipCtxt );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipOpen()
+ * @brief    Open a clip. Creates a clip context.
+ * @note
+ * @param   hClipCtxt            (OUT) Return the internal clip context
+ * @param   pClipSettings        (IN) Edit settings of this clip. The module will keep a
+ *                               reference to this pointer
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param    bSkipAudioTrack        (IN) If true, do not open the audio
+ * @param    bFastOpenMode        (IN) If true, use the fast mode of the 3gpp reader
+ *                             (only the first AU is read)
+ * @return    M4NO_ERROR:                No error
+ * @return    M4ERR_ALLOC:            There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_intClipInit( M4VSS3GPP_ClipContext ** hClipCtxt,
+                                M4OSA_FileReadPointer *pFileReadPtrFct )
+{
+    M4VSS3GPP_ClipContext *pClipCtxt;
+    M4OSA_ERR err;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == hClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipInit: hClipCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipInit: pFileReadPtrFct is M4OSA_NULL");
+
+    /**
+    * Allocate the clip context */
+    *hClipCtxt =
+        (M4VSS3GPP_ClipContext *)M4OSA_malloc(sizeof(M4VSS3GPP_ClipContext),
+        M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_ClipContext");
+
+    if( M4OSA_NULL == *hClipCtxt )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipInit(): unable to allocate M4VSS3GPP_ClipContext,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+    M4OSA_TRACE3_1("M4VSS3GPP_intClipInit(): clipCtxt=0x%x", *hClipCtxt);
+
+
+    /**
+    * Use this shortcut to simplify the code */
+    pClipCtxt = *hClipCtxt;
+
+    /* Inialization of context Variables */
+    M4OSA_memset((M4OSA_MemAddr8)pClipCtxt, sizeof(M4VSS3GPP_ClipContext), 0);
+
+    pClipCtxt->pSettings = M4OSA_NULL;
+
+    /**
+    * Init the clip context */
+    pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_READ;
+    pClipCtxt->Astatus = M4VSS3GPP_kClipStatus_READ;
+
+    pClipCtxt->pReaderContext = M4OSA_NULL;
+    pClipCtxt->pVideoStream = M4OSA_NULL;
+    pClipCtxt->pAudioStream = M4OSA_NULL;
+    pClipCtxt->VideoAU.m_dataAddress = M4OSA_NULL;
+    pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+
+    pClipCtxt->pViDecCtxt = M4OSA_NULL;
+    pClipCtxt->bVideoAuAvailable = M4OSA_FALSE;
+    pClipCtxt->bFirstAuWritten = M4OSA_FALSE;
+
+    pClipCtxt->bMpeg4GovState = M4OSA_FALSE;
+
+    pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
+    pClipCtxt->pAudioFramePtr = M4OSA_NULL;
+    pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+
+    pClipCtxt->pFileReadPtrFct = pFileReadPtrFct;
+
+    /*
+    * Reset pointers for media and codecs interfaces */
+    err = M4VSS3GPP_clearInterfaceTables(&pClipCtxt->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /*
+    *  Call the media and codecs subscription module */
+    err = M4VSS3GPP_subscribeMediaAndCodec(&pClipCtxt->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    return M4NO_ERROR;
+}
+
+/* Note: if the clip is opened in fast mode, it can only be used for analysis and nothing else. */
+M4OSA_ERR M4VSS3GPP_intClipOpen( M4VSS3GPP_ClipContext *pClipCtxt,
+                                M4VSS3GPP_ClipSettings *pClipSettings, M4OSA_Bool bSkipAudioTrack,
+                                M4OSA_Bool bFastOpenMode, M4OSA_Bool bAvoidOpeningVideoDec )
+{
+    M4OSA_ERR err;
+    M4READER_MediaFamily mediaFamily;
+    M4_StreamHandler *pStreamHandler;
+    M4OSA_Int32 iDuration;
+    M4OSA_Void *decoderUserData;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4DECODER_MPEG4_DecoderConfigInfo dummy;
+    M4DECODER_VideoSize videoSizeFromDSI;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    M4DECODER_OutputFilter FilterOption;
+    M4OSA_Char pTempFile[100];
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipOpen: pClipCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipOpen: pClipSettings is M4OSA_NULL");
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intClipOpen: called with pClipCtxt: 0x%x, bAvoidOpeningVideoDec=0x%x",
+        pClipCtxt, bAvoidOpeningVideoDec);
+    /**
+    * Keep a pointer to the clip settings. Remember that we don't possess it! */
+    pClipCtxt->pSettings = pClipSettings;
+
+    /**
+    * Get the correct reader interface */
+    err = M4VSS3GPP_setCurrentReader(&pClipCtxt->ShellAPI,
+        pClipCtxt->pSettings->FileType);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Init the 3GPP or MP3 reader */
+    err =
+        pClipCtxt->ShellAPI.m_pReader->m_pFctCreate(&pClipCtxt->pReaderContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctCreate returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Link the reader interface to the reader context (used by the decoder to know the reader) */
+    pClipCtxt->ShellAPI.m_pReaderDataIt->m_readerContext =
+        pClipCtxt->pReaderContext;
+
+    /**
+    * Set the OSAL read function set */
+    err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+        pClipCtxt->pReaderContext,
+        M4READER_kOptionID_SetOsaFileReaderFctsPtr,
+        (M4OSA_DataOption)(pClipCtxt->pFileReadPtrFct));
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Set the fast open mode if asked (3GPP only) */
+    if( M4VIDEOEDITING_kFileType_3GPP == pClipCtxt->pSettings->FileType )
+    {
+        if( M4OSA_TRUE == bFastOpenMode )
+        {
+            err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+                pClipCtxt->pReaderContext,
+                M4READER_3GP_kOptionID_FastOpenMode, M4OSA_NULL);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipOpen():\
+                    m_pReader->m_pFctSetOption(FastOpenMode) returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+
+        /**
+        * Set the skip audio option if asked */
+        if( M4OSA_TRUE == bSkipAudioTrack )
+        {
+            err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+                pClipCtxt->pReaderContext,
+                M4READER_3GP_kOptionID_VideoOnly, M4OSA_NULL);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption(VideoOnly) returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+    }
+    if(pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM)
+    {
+
+
+
+
+        M4OSA_chrNCopy(pTempFile,pClipSettings->pFile,M4OSA_chrLength(pClipSettings->pFile));
+
+
+    switch (pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency)
+    {
+        case 8000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_8000",6);
+        break;
+        case 11025:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_11025",6);
+        break;
+        case 12000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_12000",6);
+        break;
+        case 16000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_16000",6);
+        break;
+        case 22050:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_22050",6);
+        break;
+        case 24000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_24000",6);
+        break;
+        case 32000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_32000",6);
+        break;
+        case 44100:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_44100",6);
+        break;
+        case 48000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_48000",6);
+        break;
+        default:
+            M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: invalid input for BG tracksampling \
+                frequency (%d Hz), returning M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY"\
+                    ,pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency );
+            return M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
+    }
+
+
+
+        //M4OSA_chrNCat(pTempFile,
+        //    itoa(pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency),5);
+        switch(pClipCtxt->pSettings->ClipProperties.uiNbChannels)
+        {
+            case 1:
+                M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_1.pcm",6);
+            break;
+            case 2:
+                M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_2.pcm",6);
+            break;
+            default:
+            M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: invalid input for BG track no.\
+                 of channels (%d ), returning M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS",\
+                    pClipCtxt->pSettings->ClipProperties.uiNbChannels);
+            return    M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
+        }
+        //M4OSA_chrNCat(pTempFile,itoa(pClipCtxt->pSettings->ClipProperties.uiNbChannels),1);
+
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext, pTempFile);
+
+    }
+    else
+    {
+    /**
+        * Open the 3GPP/MP3 clip file */
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext,
+             pClipSettings->pFile);
+    }
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_UInt32 uiDummy, uiCoreId;
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctOpen returns 0x%x", err);
+
+        /**
+        * If the error is from the core reader, we change it to a public VSS3GPP error */
+        M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
+
+        if( M4MP4_READER == uiCoreId )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intClipOpen(): returning M4VSS3GPP_ERR_INVALID_3GPP_FILE");
+            return M4VSS3GPP_ERR_INVALID_3GPP_FILE;
+        }
+        return err;
+    }
+
+    /**
+    * Get the audio and video streams */
+    while( err == M4NO_ERROR )
+    {
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetNextStream(
+            pClipCtxt->pReaderContext, &mediaFamily, &pStreamHandler);
+
+        /*in case we found a BIFS stream or something else...*/
+        if( ( err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
+            || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)) )
+        {
+            err = M4NO_ERROR;
+            continue;
+        }
+
+        if( M4NO_ERROR == err ) /**< One stream found */
+        {
+            /**
+            * Found a video stream */
+            if( ( mediaFamily == M4READER_kMediaFamilyVideo)
+                && (M4OSA_NULL == pClipCtxt->pVideoStream) )
+            {
+                if( ( M4DA_StreamTypeVideoH263 == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeVideoMpeg4
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeVideoMpeg4Avc
+                    == pStreamHandler->m_streamType) )
+                {
+                    M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipOpen():\
+                        Found a H263 or MPEG-4 or H264 video stream in input 3gpp clip; %d",
+                        pStreamHandler->m_streamType);
+
+                    /**
+                    * Keep pointer to the video stream */
+                    pClipCtxt->pVideoStream =
+                        (M4_VideoStreamHandler *)pStreamHandler;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pVideoStream);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(video) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pVideoStream,
+                        &pClipCtxt->VideoAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen():\
+                            m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< Not H263 or MPEG-4 (H264, etc.) */
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS_editClipOpen():\
+                        Found an unsupported video stream (0x%x) in input 3gpp clip",
+                        pStreamHandler->m_streamType);
+
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+            /**
+            * Found an audio stream */
+            else if( ( mediaFamily == M4READER_kMediaFamilyAudio)
+                && (M4OSA_NULL == pClipCtxt->pAudioStream) )
+            {
+                if( ( M4DA_StreamTypeAudioAmrNarrowBand
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioAac == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioMp3
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioEvrc
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioPcm
+                    == pStreamHandler->m_streamType) )
+                {
+                    M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipOpen(): \
+                        Found an AMR-NB or AAC or MP3 audio stream in input clip; %d",
+                        pStreamHandler->m_streamType);
+
+                    /**
+                    * Keep pointer to the audio stream */
+                    pClipCtxt->pAudioStream =
+                        (M4_AudioStreamHandler *)pStreamHandler;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pAudioStream);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                        &pClipCtxt->AudioAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen():\
+                            m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< Not AMR-NB or AAC (AMR-WB...) */
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intClipOpen():\
+                        Found an unsupported audio stream (0x%x) in input 3gpp/mp3 clip",
+                        pStreamHandler->m_streamType);
+
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+        }
+        else if( M4OSA_ERR_IS_ERROR(err) )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctGetNextStream() returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Init Video decoder */
+    if( M4OSA_NULL != pClipCtxt->pVideoStream )
+    {
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+  /* If external decoders are possible, it's best to avoid opening the decoder if the clip is only
+  going to be used for analysis, as we're not going to use it for the analysis in the case of a
+  possible external decoder anyway, and either there could be no decoder at this point or the HW
+  decoder could be present, which we want to avoid opening for that. See comments in
+  intBuildAnalysis for more details. */
+
+  /* CHANGEME Temporarily only do this for MPEG4, since for now only MPEG4 external decoders are
+  supported, and the following wouldn't work for H263 so a release where external decoders are
+  possible, but not used, wouldn't work with H263 stuff. */
+
+        if( bAvoidOpeningVideoDec && M4DA_StreamTypeVideoMpeg4
+            == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+        {
+            /* Oops! The mere act of opening the decoder also results in the image size being
+            filled in the video stream! Compensate for this by using ParseVideoDSI to fill
+            this info. */
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipOpen: Mpeg4 stream; vid dec not started");
+            err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
+                m_basicProperties.m_pDecoderSpecificInfo,
+                pClipCtxt->pVideoStream->
+                m_basicProperties.m_decoderSpecificInfoSize,
+                &dummy, &videoSizeFromDSI);
+
+            pClipCtxt->pVideoStream->m_videoWidth = videoSizeFromDSI.m_uiWidth;
+            pClipCtxt->pVideoStream->m_videoHeight =
+                videoSizeFromDSI.m_uiHeight;
+        }
+        else
+        {
+
+#endif
+
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipOpen: Mp4/H263/H264 stream; set current vid dec");
+            err = M4VSS3GPP_setCurrentVideoDecoder(&pClipCtxt->ShellAPI,
+                pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
+            M4ERR_CHECK_RETURN(err);
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+            decoderUserData =
+                pClipCtxt->ShellAPI.m_pCurrentVideoDecoderUserData;
+
+#else
+
+            decoderUserData = M4OSA_NULL;
+
+#endif
+
+            err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctCreate(
+                &pClipCtxt->pViDecCtxt,
+                &pClipCtxt->pVideoStream->m_basicProperties,
+                pClipCtxt->ShellAPI.m_pReaderDataIt,
+                &pClipCtxt->VideoAU, decoderUserData);
+
+            if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
+                || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
+            {
+                /**
+                * Our decoder is not compatible with H263 profile other than 0.
+                * So it returns this internal error code.
+                * We translate it to our own error code */
+                return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctCreate returns 0x%x",
+                    err);
+                return err;
+            }
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipOpen: Vid dec started; pViDecCtxt=0x%x",
+                pClipCtxt->pViDecCtxt);
+
+            if( M4DA_StreamTypeVideoMpeg4Avc
+                == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+            {
+                FilterOption.m_pFilterFunction =
+                    (M4OSA_Void *) &M4VIFI_ResizeBilinearYUV420toYUV420;
+                FilterOption.m_pFilterUserData = M4OSA_NULL;
+                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+                    pClipCtxt->pViDecCtxt, M4DECODER_kOptionID_OutputFilter,
+                    (M4OSA_DataOption) &FilterOption);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption returns 0x%x",
+                        err);
+                    return err;
+                }
+                else
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption\
+                        M4DECODER_kOptionID_OutputFilter OK");
+                }
+            }
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+        }
+
+#endif
+
+    }
+
+    /**
+    * Init Audio decoder */
+    if( M4OSA_NULL != pClipCtxt->pAudioStream )
+    {
+        err = M4VSS3GPP_intClipPrepareAudioDecoder(pClipCtxt);
+        M4ERR_CHECK_RETURN(err);
+        M4OSA_TRACE3_1("M4VSS3GPP_intClipOpen: Audio dec started; context=0x%x",
+            pClipCtxt->pAudioDecCtxt);
+    }
+    else
+    {
+        pClipCtxt->AudioAU.m_streamID = 0;
+        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+        pClipCtxt->AudioAU.m_size = 0;
+        pClipCtxt->AudioAU.m_CTS = 0;
+        pClipCtxt->AudioAU.m_DTS = 0;
+        pClipCtxt->AudioAU.m_attribute = 0;
+        pClipCtxt->AudioAU.m_maxsize = 0;
+        pClipCtxt->AudioAU.m_structSize = sizeof(pClipCtxt->AudioAU);
+    }
+
+    /**
+    * Get the duration of the longest stream */
+    if( M4OSA_TRUE == pClipCtxt->pSettings->ClipProperties.bAnalysed )
+    {
+        /* If already calculated set it to previous value */
+        /* Because fast open and full open can return a different value,
+           it can mismatch user settings */
+        /* Video track is more important than audio track (if video track is shorter than
+           audio track, it can led to cut larger than expected) */
+        iDuration = pClipCtxt->pSettings->ClipProperties.uiClipVideoDuration;
+
+        if( iDuration == 0 )
+        {
+            iDuration = pClipCtxt->pSettings->ClipProperties.uiClipDuration;
+        }
+    }
+    else
+    {
+        /* Else compute it from streams */
+        iDuration = 0;
+
+        if( M4OSA_NULL != pClipCtxt->pVideoStream )
+        {
+            iDuration = (M4OSA_Int32)(
+                pClipCtxt->pVideoStream->m_basicProperties.m_duration);
+        }
+
+        if( ( M4OSA_NULL != pClipCtxt->pAudioStream) && ((M4OSA_Int32)(
+            pClipCtxt->pAudioStream->m_basicProperties.m_duration)
+            > iDuration) && iDuration == 0 )
+        {
+            iDuration = (M4OSA_Int32)(
+                pClipCtxt->pAudioStream->m_basicProperties.m_duration);
+        }
+    }
+
+    /**
+    * If end time is not used, we set it to the video track duration */
+    if( 0 == pClipCtxt->pSettings->uiEndCutTime )
+    {
+        pClipCtxt->pSettings->uiEndCutTime = (M4OSA_UInt32)iDuration;
+    }
+
+    pClipCtxt->iEndTime = pClipCtxt->pSettings->uiEndCutTime;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intClipOpen(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
+ * @brief    Delete the audio track. Clip will be like if it had no audio track
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ ******************************************************************************
+ */
+M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    /**
+    * But we don't have to free the audio stream. It will be freed by the reader when closing it*/
+    pClipCtxt->pAudioStream = M4OSA_NULL;
+
+    /**
+    * We will return a constant silence AMR AU.
+    * We set it here once, instead of at each read audio step. */
+    pClipCtxt->pAudioFramePtr = (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+    pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+
+    /**
+    * Free the decoded audio buffer (it needs to be re-allocated to store silence
+      frame eventually)*/
+    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipCtxt->AudioDecBufferOut.m_dataAddress);
+        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    }
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCurrentTime()
+ * @brief    Jump to the previous RAP and decode up to the current video time
+ * @param   pClipCtxt    (IN) Internal clip context
+ * @param   iCts        (IN) Target CTS
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCts( M4VSS3GPP_ClipContext *pClipCtxt,
+                                              M4OSA_Int32 iCts )
+{
+    M4OSA_Int32 iRapCts, iClipCts;
+    M4_MediaTime dDecodeTime;
+    M4OSA_Bool bClipJump = M4OSA_FALSE;
+    M4OSA_ERR err;
+
+    /**
+    * Compute the time in the clip base */
+    iClipCts = iCts - pClipCtxt->iVoffset;
+
+    /**
+    * If we were reading the clip, we must jump to the previous RAP
+    * to decode from that point. */
+    if( M4VSS3GPP_kClipStatus_READ == pClipCtxt->Vstatus )
+    {
+        /**
+        * Jump to the previous RAP in the clip (first get the time, then jump) */
+        iRapCts = iClipCts;
+
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetPrevRapTime(
+            pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pVideoStream, &iRapCts);
+
+        if( M4WAR_READER_INFORMATION_NOT_PRESENT == err )
+        {
+            /* No RAP table, jump backward and predecode */
+            iRapCts = iClipCts - M4VSS3GPP_NO_STSS_JUMP_POINT;
+
+            if( iRapCts < 0 )
+                iRapCts = 0;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctGetPrevRapTime returns 0x%x!",
+                err);
+            return err;
+        }
+
+        err =
+            pClipCtxt->ShellAPI.m_pReader->m_pFctJump(pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pVideoStream, &iRapCts);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctJump returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * The decoder must be told that we jumped */
+        bClipJump = M4OSA_TRUE;
+        pClipCtxt->iVideoDecCts = iRapCts;
+
+        /**
+        * Remember the clip reading state */
+        pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE_UP_TO;
+    }
+
+    /**
+    * If we are in decodeUpTo() process, check if we need to do
+    one more step or if decoding is finished */
+    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pClipCtxt->Vstatus )
+    {
+        /* Do a step of 500 ms decoding */
+        pClipCtxt->iVideoDecCts += 500;
+
+        if( pClipCtxt->iVideoDecCts > iClipCts )
+        {
+            /* Target time reached, we switch back to DECODE mode */
+            pClipCtxt->iVideoDecCts = iClipCts;
+            pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE;
+        }
+
+        M4OSA_TRACE2_1("c ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
+    }
+    else
+    {
+        /* Just decode at current clip cts */
+        pClipCtxt->iVideoDecCts = iClipCts;
+
+        M4OSA_TRACE2_1("d ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
+    }
+
+    /**
+    * Decode up to the target */
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intClipDecodeVideoUpToCts: Decoding upTo CTS %.3f, pClipCtxt=0x%x",
+        dDecodeTime, pClipCtxt);
+
+    dDecodeTime = (M4OSA_Double)pClipCtxt->iVideoDecCts;
+    pClipCtxt->isRenderDup = M4OSA_FALSE;
+    err =
+        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDecode(pClipCtxt->pViDecCtxt,
+        &dDecodeTime, bClipJump);
+
+    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
+        && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctDecode returns 0x%x!",
+            err);
+        return err;
+    }
+
+    if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+    {
+        pClipCtxt->isRenderDup = M4OSA_TRUE;
+    }
+
+    /**
+    * Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeVideoUpToCts: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame()
+ * @brief    Read one AU frame in the clip
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame(
+    M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err;
+
+    /* ------------------------------ */
+    /* ---------- NO AUDIO ---------- */
+    /* ------------------------------ */
+
+    if( M4OSA_NULL == pClipCtxt->pAudioStream )
+    {
+        /* If there is no audio track, we return silence AUs */
+        pClipCtxt->pAudioFramePtr =
+            (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+        pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+        pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+        M4OSA_TRACE2_0("b #### blank track");
+    }
+
+    /* ---------------------------------- */
+    /* ---------- AMR-NB, EVRC ---------- */
+    /* ---------------------------------- */
+
+    else if( ( M4VIDEOEDITING_kAMR_NB
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+        || (M4VIDEOEDITING_kEVRC
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+    {
+        if( M4OSA_FALSE == pClipCtxt->bAudioFrameAvailable )
+        {
+            /**
+            * No AU available, so we must must read one from the original track reader */
+            err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                pClipCtxt->pReaderContext,
+                (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                &pClipCtxt->AudioAU);
+
+            if( M4NO_ERROR == err )
+            {
+                /**
+                * Set the current AMR frame position at the beginning of the read AU */
+                pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+
+                /**
+                * Set the AMR frame CTS */
+                pClipCtxt->iAudioFrameCts =
+                    (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS
+                    * pClipCtxt->scale_audio + 0.5);
+            }
+            else if( ( M4WAR_NO_MORE_AU == err) && (M4VIDEOEDITING_kAMR_NB
+                == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+            {
+                /**
+                * If there is less audio than the stream duration indicated,
+                * we return silence at the end of the stream. */
+                pClipCtxt->pAudioFramePtr =
+                    (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+                pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+                pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+                M4OSA_TRACE2_0("a #### silence AU");
+
+                /**
+                * Return with M4WAR_NO_MORE_AU */
+                M4OSA_TRACE3_0(
+                    "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: \
+                    returning M4WAR_NO_MORE_AU (silence)");
+                return M4WAR_NO_MORE_AU;
+            }
+            else /**< fatal error (or no silence in EVRC) */
+            {
+                M4OSA_TRACE3_1(
+                    "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: m_pFctGetNextAu() returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+        else /* bAudioFrameAvailable */
+        {
+            /**
+            * Go to the next AMR frame in the AU */
+            pClipCtxt->pAudioFramePtr += pClipCtxt->uiAudioFrameSize;
+
+            /**
+            * Increment CTS: one AMR frame is 20 ms long */
+            pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+        }
+
+        /**
+        * Get the size of the pointed AMR frame */
+        switch( pClipCtxt->pSettings->ClipProperties.AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pClipCtxt->uiAudioFrameSize =
+                    (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_AMRNB(
+                    pClipCtxt->pAudioFramePtr);
+                break;
+
+            case M4VIDEOEDITING_kEVRC:
+                pClipCtxt->uiAudioFrameSize =
+                    (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_EVRC(
+                    pClipCtxt->pAudioFramePtr);
+                break;
+            default:
+                break;
+        }
+
+        if( 0 == pClipCtxt->uiAudioFrameSize )
+        {
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size == 0,\
+                returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+        else if( pClipCtxt->uiAudioFrameSize > pClipCtxt->AudioAU.m_size )
+        {
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size greater than AU size!,\
+                returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+
+        /**
+        * Check if the end of the current AU has been reached or not */
+        if( ( pClipCtxt->pAudioFramePtr + pClipCtxt->uiAudioFrameSize)
+            < (pClipCtxt->AudioAU.m_dataAddress + pClipCtxt->AudioAU.m_size) )
+        {
+            pClipCtxt->bAudioFrameAvailable = M4OSA_TRUE;
+        }
+        else
+        {
+            pClipCtxt->bAudioFrameAvailable =
+                M4OSA_FALSE; /**< will be used for next call */
+        }
+    }
+
+    /* ------------------------- */
+    /* ---------- AAC ---------- */
+    /* ------------------------- */
+
+    else if( ( M4VIDEOEDITING_kAAC
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+        || (M4VIDEOEDITING_kAACplus
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+        || (M4VIDEOEDITING_keAACplus
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+    {
+        err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+            pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pAudioStream,
+            &pClipCtxt->AudioAU);
+
+        if( M4NO_ERROR == err )
+        {
+            pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+            pClipCtxt->uiAudioFrameSize =
+                (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+            pClipCtxt->iAudioFrameCts =
+                (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+                + 0.5);
+
+            /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+            /* (cts is not an integer with frequency 24 kHz for example) */
+            pClipCtxt->iAudioFrameCts = ( ( pClipCtxt->iAudioFrameCts
+                + pClipCtxt->iSilenceFrameDuration / 2)
+                / pClipCtxt->iSilenceFrameDuration)
+                * pClipCtxt->iSilenceFrameDuration;
+        }
+        else if( M4WAR_NO_MORE_AU == err )
+        {
+            /**
+            * If there is less audio than the stream duration indicated,
+            * we return silence at the end of the stream. */
+            pClipCtxt->pAudioFramePtr =
+                (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+            pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+            pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+            M4OSA_TRACE2_0("a #### silence AU");
+
+            /**
+            * Return with M4WAR_NO_MORE_AU */
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AAC:\
+                returning M4WAR_NO_MORE_AU (silence)");
+            return M4WAR_NO_MORE_AU;
+        }
+        else /**< fatal error */
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AAC: m_pFctGetNextAu() returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /* --------------------------------- */
+    /* ---------- MP3, others ---------- */
+    /* --------------------------------- */
+
+    else
+    {
+        err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+            pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pAudioStream,
+            &pClipCtxt->AudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-MP3: m_pFctGetNextAu() returns 0x%x",
+                err);
+            return err;
+        }
+
+        pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+        pClipCtxt->uiAudioFrameSize = (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+        pClipCtxt->iAudioFrameCts =
+            (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+            + 0.5);
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intClipReadNextAudioFrame(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder()
+ * @brief    Creates and initialize the audio decoder for the clip.
+ * @note
+ * @param   pClipCtxt        (IN) internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
+    M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4_StreamType audiotype;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    M4_AACType iAacType = 0;
+
+#endif
+
+    /**
+    * Set the proper audio decoder */
+
+    audiotype = pClipCtxt->pAudioStream->m_basicProperties.m_streamType;
+
+    //EVRC
+    if( M4DA_StreamTypeAudioEvrc
+        != audiotype ) /* decoder not supported yet, but allow to do null encoding */
+
+        err = M4VSS3GPP_setCurrentAudioDecoder(&pClipCtxt->ShellAPI, audiotype);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Creates the audio decoder */
+    if( M4OSA_NULL == pClipCtxt->ShellAPI.m_pAudioDecoder )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipPrepareAudioDecoder(): Fails to initiate the audio decoder.");
+        return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
+    }
+
+    if( M4OSA_NULL == pClipCtxt->pAudioDecCtxt )
+    {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        if( M4OSA_TRUE == pClipCtxt->ShellAPI.bAllowFreeingOMXCodecInterface )
+        {
+            /* NXP SW codec interface is used*/
+            if( M4DA_StreamTypeAudioAac == audiotype )
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                &(pClipCtxt->AacProperties));
+            else
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                M4OSA_NULL /* to be changed with HW interfaces */);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipPrepareAudioDecoder: m_pAudioDecoder->m_pFctCreateAudioDec\
+                    returns 0x%x", err);
+                return err;
+            }
+        }
+        else
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                Creating external audio decoder of type 0x%x", audiotype);
+            /* External OMX codecs are used*/
+            if( M4DA_StreamTypeAudioAac == audiotype )
+            {
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                    &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                    pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
+
+                if( M4NO_ERROR == err )
+                {
+                    /* AAC properties*/
+                    /*get from Reader; temporary, till Audio decoder shell API
+                      available to get the AAC properties*/
+                    pClipCtxt->AacProperties.aNumChan =
+                        pClipCtxt->pAudioStream->m_nbChannels;
+                    pClipCtxt->AacProperties.aSampFreq =
+                        pClipCtxt->pAudioStream->m_samplingFrequency;
+
+                    err = pClipCtxt->ShellAPI.m_pAudioDecoder->
+                        m_pFctGetOptionAudioDec(pClipCtxt->pAudioDecCtxt,
+                        M4AD_kOptionID_StreamType,
+                        (M4OSA_DataOption) &iAacType);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                            m_pAudioDecoder->m_pFctGetOptionAudioDec returns err 0x%x", err);
+                        iAacType = M4_kAAC; //set to default
+                        err = M4NO_ERROR;
+                    }
+                    else {
+                        M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipPrepareAudioDecoder: \
+                        m_pAudioDecoder->m_pFctGetOptionAudioDec returns streamType %d",
+                        iAacType);
+                       }
+                    switch( iAacType )
+                    {
+                        case M4_kAAC:
+                            pClipCtxt->AacProperties.aSBRPresent = 0;
+                            pClipCtxt->AacProperties.aPSPresent = 0;
+                            break;
+
+                        case M4_kAACplus:
+                            pClipCtxt->AacProperties.aSBRPresent = 1;
+                            pClipCtxt->AacProperties.aPSPresent = 0;
+                            pClipCtxt->AacProperties.aExtensionSampFreq =
+                                pClipCtxt->pAudioStream->m_samplingFrequency;
+                            break;
+
+                        case M4_keAACplus:
+                            pClipCtxt->AacProperties.aSBRPresent = 1;
+                            pClipCtxt->AacProperties.aPSPresent = 1;
+                            pClipCtxt->AacProperties.aExtensionSampFreq =
+                                pClipCtxt->pAudioStream->m_samplingFrequency;
+                            break;
+                        default:
+                            break;
+                    }
+                    M4OSA_TRACE3_2(
+                        "M4VSS3GPP_intClipPrepareAudioDecoder: AAC NBChans=%d, SamplFreq=%d",
+                        pClipCtxt->AacProperties.aNumChan,
+                        pClipCtxt->AacProperties.aSampFreq);
+                }
+            }
+            else
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                    m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+
+#else
+        /* Trick, I use pUserData to retrieve aac properties,
+           waiting for some better implementation... */
+
+        if( M4DA_StreamTypeAudioAac == audiotype )
+            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+            &pClipCtxt->pAudioDecCtxt,
+            pClipCtxt->pAudioStream, &(pClipCtxt->AacProperties));
+        else
+            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+            &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+            M4OSA_NULL /* to be changed with HW interfaces */);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+                err);
+            return err;
+        }
+
+#endif
+
+    }
+
+    if( M4DA_StreamTypeAudioAmrNarrowBand == audiotype ) {
+        /* AMR DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4DA_StreamTypeAudioEvrc == audiotype ) {
+        /* EVRC DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4DA_StreamTypeAudioMp3 == audiotype ) {
+        /* MP3 DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4DA_StreamTypeAudioAac == audiotype )
+    {
+        /* AAC DECODER CONFIGURATION */
+
+        /* Decode high quality aac but disable PS and SBR */
+        /* Because we have to mix different kind of AAC so we must take the lowest capability */
+        /* In MCS it was not needed because there is only one stream */
+        M4_AacDecoderConfig AacDecParam;
+
+        AacDecParam.m_AACDecoderProfile = AAC_kAAC;
+        AacDecParam.m_DownSamplingMode = AAC_kDS_OFF;
+
+        if( M4ENCODER_kMono == pClipCtxt->pAudioStream->m_nbChannels )
+        {
+            AacDecParam.m_OutputMode = AAC_kMono;
+        }
+        else
+        {
+            AacDecParam.m_OutputMode = AAC_kStereo;
+        }
+
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
+            pClipCtxt->pAudioDecCtxt,
+            M4AD_kOptionID_UserParam, (M4OSA_DataOption) &AacDecParam);
+    }
+
+    if( M4OSA_NULL != pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec )
+    {
+        /* Not implemented in all decoders */
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec(
+            pClipCtxt->pAudioDecCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                m_pAudioDecoder->m_pFctStartAudioDec returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Allocate output buffer for the audio decoder */
+    pClipCtxt->AudioDecBufferOut.m_bufferSize =
+        pClipCtxt->pAudioStream->m_byteFrameLength
+        * pClipCtxt->pAudioStream->m_byteSampleSize
+        * pClipCtxt->pAudioStream->m_nbChannels;
+    pClipCtxt->AudioDecBufferOut.m_dataAddress =
+        (M4OSA_MemAddr8)M4OSA_malloc(pClipCtxt->AudioDecBufferOut.m_bufferSize
+        * sizeof(M4OSA_Int16),
+        M4VSS3GPP, (M4OSA_Char *)"AudioDecBufferOut.m_bufferSize");
+
+    if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipPrepareAudioDecoder():\
+            unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame()
+ * @brief    Decode the current AUDIO frame.
+ * @note
+ * @param   pClipCtxt        (IN) internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame(
+    M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Silence mode */
+    if( pClipCtxt->pSilenceFrameData
+        == (M4OSA_UInt8 *)pClipCtxt->pAudioFramePtr )
+    {
+        if( pClipCtxt->AudioDecBufferOut.m_dataAddress == M4OSA_NULL )
+        {
+            /**
+            * Allocate output buffer for the audio decoder */
+            pClipCtxt->AudioDecBufferOut.m_bufferSize =
+                pClipCtxt->uiSilencePcmSize;
+            pClipCtxt->AudioDecBufferOut.m_dataAddress =
+                (M4OSA_MemAddr8)M4OSA_malloc(
+                pClipCtxt->AudioDecBufferOut.m_bufferSize
+                * sizeof(M4OSA_Int16),
+                M4VSS3GPP,(M4OSA_Char *) "AudioDecBufferOut.m_bufferSize");
+
+            if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
+                    unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+                return M4ERR_ALLOC;
+            }
+        }
+
+        /* Fill it with 0 (= pcm silence) */
+        M4OSA_memset(pClipCtxt->AudioDecBufferOut.m_dataAddress,
+             pClipCtxt->AudioDecBufferOut.m_bufferSize * sizeof(M4OSA_Int16), 0);
+    }
+    else if (pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM)
+    {
+        pClipCtxt->AudioDecBufferIn.m_dataAddress = (M4OSA_MemAddr8) pClipCtxt->pAudioFramePtr;
+        pClipCtxt->AudioDecBufferIn.m_bufferSize  = pClipCtxt->uiAudioFrameSize;
+
+        M4OSA_memcpy(pClipCtxt->AudioDecBufferOut.m_dataAddress,
+            pClipCtxt->AudioDecBufferIn.m_dataAddress, pClipCtxt->AudioDecBufferIn.m_bufferSize);
+        pClipCtxt->AudioDecBufferOut.m_bufferSize = pClipCtxt->AudioDecBufferIn.m_bufferSize;
+        /**
+        * Return with no error */
+
+        M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
+        return M4NO_ERROR;
+    }
+    /**
+    * Standard decoding mode */
+    else
+    {
+        /**
+        * Decode current AMR frame */
+        pClipCtxt->AudioDecBufferIn.m_dataAddress =
+            (M4OSA_MemAddr8)pClipCtxt->pAudioFramePtr;
+        pClipCtxt->AudioDecBufferIn.m_bufferSize = pClipCtxt->uiAudioFrameSize;
+
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
+            pClipCtxt->pAudioDecCtxt,
+            &pClipCtxt->AudioDecBufferIn, &pClipCtxt->AudioDecBufferOut,
+            M4OSA_FALSE);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
+                m_pAudioDecoder->m_pFctStepAudio returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt()
+ * @brief    Jump in the audio track of the clip.
+ * @note
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param   pJumpCts            (IN/OUT) in:target CTS, out: reached CTS
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt( M4VSS3GPP_ClipContext *pClipCtxt,
+                                       M4OSA_Int32 *pJumpCts )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iTargetCts;
+    M4OSA_Int32 iJumpCtsMs;
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipJumpAudioAt: pClipCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pJumpCts), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipJumpAudioAt: pJumpCts is M4OSA_NULL");
+
+    iTargetCts = *pJumpCts;
+
+    /**
+    * If there is no audio stream, we simulate a jump at the target jump CTS */
+    if( M4OSA_NULL == pClipCtxt->pAudioStream )
+    {
+        /**
+        * the target CTS will be reached at next ReadFrame call (thus the -20) */
+        *pJumpCts = iTargetCts - pClipCtxt->iSilenceFrameDuration;
+
+        /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+        /* (cts is not an integer with frequency 24 kHz for example) */
+        *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
+            / pClipCtxt->iSilenceFrameDuration)
+            * pClipCtxt->iSilenceFrameDuration;
+        pClipCtxt->iAudioFrameCts =
+            *
+            pJumpCts; /* simulate a read at jump position for later silence AUs */
+    }
+    else
+    {
+        M4OSA_Int32 current_time = 0;
+        M4OSA_Int32 loop_counter = 0;
+
+        if( (M4DA_StreamTypeAudioMp3
+            == pClipCtxt->pAudioStream->m_basicProperties.m_streamType) )
+        {
+            while( ( loop_counter < M4VSS3GPP_MP3_JUMPED_AU_NUMBER_MAX)
+                && (current_time < iTargetCts) )
+            {
+                err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                    pClipCtxt->pReaderContext,
+                    (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                    &pClipCtxt->AudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipJumpAudioAt: m_pFctGetNextAu() returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                current_time = (M4OSA_Int32)pClipCtxt->AudioAU.m_CTS;
+                loop_counter++;
+            }
+
+            /**
+            * The current AU is stored */
+            pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+            pClipCtxt->uiAudioFrameSize =
+                (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+            pClipCtxt->iAudioFrameCts =
+                (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+                + 0.5);
+
+            *pJumpCts = pClipCtxt->iAudioFrameCts;
+        }
+        else
+        {
+            /**
+            * Jump in the audio stream */
+            iJumpCtsMs =
+                (M4OSA_Int32)(*pJumpCts / pClipCtxt->scale_audio + 0.5);
+
+            err = pClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                pClipCtxt->pReaderContext,
+                (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                &iJumpCtsMs);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipJumpAudioAt(): m_pFctJump() returns 0x%x",
+                    err);
+                return err;
+            }
+
+            *pJumpCts =
+                (M4OSA_Int32)(iJumpCtsMs * pClipCtxt->scale_audio + 0.5);
+
+            /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+            /* (cts is not an integer with frequency 24 kHz for example) */
+            *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
+                / pClipCtxt->iSilenceFrameDuration)
+                * pClipCtxt->iSilenceFrameDuration;
+            pClipCtxt->iAudioFrameCts = 0; /* No frame read yet */
+
+            /**
+            * To detect some may-be bugs, I prefer to reset all these after a jump */
+            pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
+            pClipCtxt->pAudioFramePtr = M4OSA_NULL;
+
+            /**
+            * In AMR, we have to manage multi-framed AUs,
+            but also in AAC the jump can be 1 AU too much backward */
+            if( *pJumpCts < iTargetCts )
+            {
+                /**
+                * Jump doesn't read any AU, we must read at least one */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intClipJumpAudioAt():\
+                        M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Read AU frames as long as we reach the AU before the target CTS
+                * (so the target will be reached when the user call ReadNextAudioFrame). */
+                while( pClipCtxt->iAudioFrameCts
+                    < (iTargetCts - pClipCtxt->iSilenceFrameDuration) )
+                {
+                    err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
+
+                    if( M4OSA_ERR_IS_ERROR(err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipJumpAudioAt():\
+                            M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Return the CTS that will be reached at next ReadFrame */
+                *pJumpCts = pClipCtxt->iAudioFrameCts
+                    + pClipCtxt->iSilenceFrameDuration;
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intClipJumpAudioAt(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipClose()
+ * @brief    Close a clip. Destroy the context.
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipClose( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err;
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipClose: pClipCtxt is M4OSA_NULL");
+
+    /**
+    * Free the video decoder context */
+    if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
+    {
+        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
+            pClipCtxt->pViDecCtxt);
+        pClipCtxt->pViDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the audio decoder context  */
+    if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
+    {
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
+            pClipCtxt->pAudioDecCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipClose: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the decoded audio buffer */
+    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipCtxt->AudioDecBufferOut.m_dataAddress);
+        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    }
+
+    /**
+    * Audio AU is allocated by reader.
+    * If no audio track, audio AU is set at 'silent' (SID) by VSS.
+    * As a consequence, if audio AU is set to 'silent' (static)
+    it can't be free unless it is set to NULL */
+    if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
+        == pClipCtxt->AudioAU.m_dataAddress)
+        || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
+        == pClipCtxt->AudioAU.m_dataAddress) )
+    {
+        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pClipCtxt->pReaderContext )
+    {
+        /**
+        * Close the 3GPP or MP3 reader */
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipClose(): m_pReader->m_pFctClose returns 0x%x",
+                err);
+        }
+
+        /**
+        * Destroy the 3GPP or MP3 reader context */
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipClose(): m_pReader->m_pFctDestroy returns 0x%x",
+                err);
+        }
+
+        pClipCtxt->pReaderContext = M4OSA_NULL;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_1("M4VSS3GPP_intClipClose(Ctxt=0x%x): returning M4NO_ERROR",
+        pClipCtxt);
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4VSS3GPP_intClipCleanUp( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err = M4NO_ERROR, err2;
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipCleanUp: pClipCtxt is M4OSA_NULL");
+
+    /**
+    * Free the video decoder context */
+    if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
+    {
+        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
+            pClipCtxt->pViDecCtxt);
+        pClipCtxt->pViDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the audio decoder context  */
+    if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
+    {
+        err2 = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
+            pClipCtxt->pAudioDecCtxt);
+
+        if( M4NO_ERROR != err2 )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipCleanUp: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+            if( M4NO_ERROR != err )
+                err = err2;
+        }
+
+        pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the decoded audio buffer */
+    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipCtxt->AudioDecBufferOut.m_dataAddress);
+        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    }
+
+    /**
+    * Audio AU is allocated by reader.
+    * If no audio track, audio AU is set at 'silent' (SID) by VSS.
+    * As a consequence, if audio AU is set to 'silent' (static)
+    it can't be free unless it is set to NULL */
+    if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
+        == pClipCtxt->AudioAU.m_dataAddress)
+        || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
+        == pClipCtxt->AudioAU.m_dataAddress) )
+    {
+        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pClipCtxt->pReaderContext )
+    {
+        /**
+        * Close the 3GPP or MP3 reader */
+        err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err2 )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctClose returns 0x%x",
+                err);
+
+            if( M4NO_ERROR != err )
+                err = err2;
+        }
+
+        /**
+        * Destroy the 3GPP or MP3 reader context */
+        err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err2 )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctDestroy returns 0x%x",
+                err);
+
+            if( M4NO_ERROR != err )
+                err = err2;
+        }
+
+        pClipCtxt->pReaderContext = M4OSA_NULL;
+    }
+
+    /**
+    * Free the shells interfaces */
+    M4VSS3GPP_unRegisterAllWriters(&pClipCtxt->ShellAPI);
+    M4VSS3GPP_unRegisterAllEncoders(&pClipCtxt->ShellAPI);
+    M4VSS3GPP_unRegisterAllReaders(&pClipCtxt->ShellAPI);
+    M4VSS3GPP_unRegisterAllDecoders(&pClipCtxt->ShellAPI);
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intClipCleanUp: pClipCtxt=0x%x", pClipCtxt);
+    /**
+    * Free the clip context */
+    M4OSA_free((M4OSA_MemAddr32)pClipCtxt);
+
+    return err;
+}
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+M4OSA_ERR
+M4VSS3GPP_intClipRegisterExternalVideoDecoder( M4VSS3GPP_ClipContext *pClipCtxt,
+                                              M4VD_VideoType decoderType,
+                                              M4VD_Interface *pDecoderInterface,
+                                              M4OSA_Void *pUserData )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4DECODER_VideoInterface *shellInterface;
+    M4DECODER_VideoType nativeType;
+    M4DECODER_EXTERNAL_UserDataType shellUserData;
+
+    switch( decoderType )
+    {
+        case M4VD_kMpeg4VideoDec:
+        case M4VD_kH263VideoDec:
+            nativeType = M4DECODER_kVideoTypeMPEG4;
+            break;
+
+        case M4VD_kH264VideoDec:
+            nativeType = M4DECODER_kVideoTypeAVC;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipRegisterExternalVideoDecoder: unknown decoderType %d",
+                decoderType);
+            return M4ERR_PARAMETER;
+            break;
+    }
+
+    shellUserData =
+        (M4DECODER_EXTERNAL_UserDataType)M4OSA_malloc(sizeof(*shellUserData),
+        M4VSS3GPP, (M4OSA_Char *)"userData structure for the external shell decoder");
+
+    if( M4OSA_NULL == shellUserData )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipRegisterExternalVideoDecoder:\
+            failed to allocate userData structure for the external shell decoder");
+        return M4ERR_ALLOC;
+    }
+
+    shellUserData->externalFuncs = pDecoderInterface;
+    shellUserData->externalUserData = pUserData;
+
+    err = M4DECODER_EXTERNAL_getInterface(&shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipRegisterExternalVideoDecoder:\
+            M4DECODER_EXTERNAL_getInterface failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellUserData);
+        return err;
+    }
+
+    err = M4VSS3GPP_registerVideoDecoder(&(pClipCtxt->ShellAPI), nativeType,
+        shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipRegisterExternalVideoDecoder:\
+            M4VSS3GPP_registerVideoDecoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellInterface);
+        M4OSA_free((M4OSA_MemAddr32)shellUserData);
+        return err;
+    }
+
+    pClipCtxt->ShellAPI.m_pVideoDecoderUserDataTable[nativeType] =
+        shellUserData;
+
+    return M4NO_ERROR;
+}
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB()
+ * @brief   Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
+ * @note
+ * @param   pAudioFrame   (IN) AMRNB frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+ */
+
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB( M4OSA_MemAddr8 pAudioFrame )
+{
+    M4OSA_UInt32 frameSize = 0;
+    M4OSA_UInt32 frameType = ( ( *pAudioFrame) &(0xF << 3)) >> 3;
+
+    switch( frameType )
+    {
+        case 0:
+            frameSize = 95;
+            break; /*  4750 bps */
+
+        case 1:
+            frameSize = 103;
+            break; /*  5150 bps */
+
+        case 2:
+            frameSize = 118;
+            break; /*  5900 bps */
+
+        case 3:
+            frameSize = 134;
+            break; /*  6700 bps */
+
+        case 4:
+            frameSize = 148;
+            break; /*  7400 bps */
+
+        case 5:
+            frameSize = 159;
+            break; /*  7950 bps */
+
+        case 6:
+            frameSize = 204;
+            break; /* 10200 bps */
+
+        case 7:
+            frameSize = 244;
+            break; /* 12000 bps */
+
+        case 8:
+            frameSize = 39;
+            break; /* SID (Silence) */
+
+        case 15:
+            frameSize = 0;
+            break; /* No data */
+
+        default:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intGetFrameSize_AMRNB(): Corrupted AMR frame! returning 0.");
+            return 0;
+    }
+
+    return (1 + (( frameSize + 7) / 8));
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC()
+ * @brief   Return the length, in bytes, of the EVRC frame contained in the given buffer
+ * @note
+ *     0 1 2 3
+ *    +-+-+-+-+
+ *    |fr type|              RFC 3558
+ *    +-+-+-+-+
+ *
+ * Frame Type: 4 bits
+ *    The frame type indicates the type of the corresponding codec data
+ *    frame in the RTP packet.
+ *
+ * For EVRC and SMV codecs, the frame type values and size of the
+ * associated codec data frame are described in the table below:
+ *
+ * Value   Rate      Total codec data frame size (in octets)
+ * ---------------------------------------------------------
+ *   0     Blank      0    (0 bit)
+ *   1     1/8        2    (16 bits)
+ *   2     1/4        5    (40 bits; not valid for EVRC)
+ *   3     1/2       10    (80 bits)
+ *   4     1         22    (171 bits; 5 padded at end with zeros)
+ *   5     Erasure    0    (SHOULD NOT be transmitted by sender)
+ *
+ * @param   pCpAudioFrame   (IN) EVRC frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC( M4OSA_MemAddr8 pAudioFrame )
+{
+    M4OSA_UInt32 frameSize = 0;
+    M4OSA_UInt32 frameType = ( *pAudioFrame) &0x0F;
+
+    switch( frameType )
+    {
+        case 0:
+            frameSize = 0;
+            break; /*  blank */
+
+        case 1:
+            frameSize = 16;
+            break; /*  1/8 */
+
+        case 2:
+            frameSize = 40;
+            break; /*  1/4 */
+
+        case 3:
+            frameSize = 80;
+            break; /*  1/2 */
+
+        case 4:
+            frameSize = 171;
+            break; /*  1 */
+
+        case 5:
+            frameSize = 0;
+            break; /*  erasure */
+
+        default:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intGetFrameSize_EVRC(): Corrupted EVRC frame! returning 0.");
+            return 0;
+    }
+
+    return (1 + (( frameSize + 7) / 8));
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c b/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
new file mode 100755
index 0000000..471cb6d
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
@@ -0,0 +1,1388 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_ClipAnalysis.c
+ * @brief    Implementation of functions related to analysis of input clips
+ * @note    All functions in this file are static, i.e. non public
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ *    Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+
+#endif
+
+/**
+ *    OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h"  /* OSAL debug management */
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editAnalyseClip()
+ * @brief    This function allows checking if a clip is compatible with VSS 3GPP editing
+ * @note    It also fills a ClipAnalysis structure, which can be used to check if two
+ *        clips are compatible
+ * @param    pClip                (IN) File descriptor of the input 3GPP/MP3 clip file.
+ * @param    pClipProperties        (IN) Pointer to a valid ClipProperties structure.
+ * @param    FileType            (IN) Type of the input file (.3gp, .amr, .mp3)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return   M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED
+ * @return   M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return   M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC
+ * @return   M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT
+ * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editAnalyseClip( M4OSA_Void *pClip,
+                                    M4VIDEOEDITING_FileType FileType,
+                                    M4VIDEOEDITING_ClipProperties *pClipProperties,
+                                    M4OSA_FileReadPointer *pFileReadPtrFct )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClipContext;
+    M4VSS3GPP_ClipSettings ClipSettings;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editAnalyseClip called with pClip=0x%x, pClipProperties=0x%x",
+        pClip, pClipProperties);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip), M4ERR_PARAMETER,
+        "M4VSS3GPP_editAnalyseClip: pClip is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipProperties), M4ERR_PARAMETER,
+        "M4VSS3GPP_editAnalyseClip: pClipProperties is M4OSA_NULL");
+
+    /**
+    * Build dummy clip settings, in order to use the editClipOpen function */
+    ClipSettings.pFile = pClip;
+    ClipSettings.FileType = FileType;
+    ClipSettings.uiBeginCutTime = 0;
+    ClipSettings.uiEndCutTime = 0;
+
+    /* Clip properties not build yet, set at least this flag */
+    ClipSettings.ClipProperties.bAnalysed = M4OSA_FALSE;
+
+    /**
+    * Open the clip in fast open mode */
+    err = M4VSS3GPP_intClipInit(&pClipContext, pFileReadPtrFct);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipInit() returns 0x%x!",
+            err);
+
+        /**
+        * Free the clip */
+        if( M4OSA_NULL != pClipContext )
+        {
+            M4VSS3GPP_intClipCleanUp(pClipContext);
+        }
+        return err;
+    }
+
+    err = M4VSS3GPP_intClipOpen(pClipContext, &ClipSettings, M4OSA_FALSE,
+        M4OSA_TRUE, M4OSA_TRUE);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipOpen() returns 0x%x!",
+            err);
+
+        M4VSS3GPP_intClipCleanUp(pClipContext);
+
+        /**
+        * Here it is better to return the Editing specific error code */
+        if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
+            || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editAnalyseClip:\
+                M4VSS3GPP_intClipOpen() returns M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
+            return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
+        }
+        return err;
+    }
+
+    /**
+    * Analyse the clip */
+    err = M4VSS3GPP_intBuildAnalysis(pClipContext, pClipProperties);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intBuildAnalysis() returns 0x%x!",
+            err);
+
+        /**
+        * Free the clip */
+        M4VSS3GPP_intClipCleanUp(pClipContext);
+        return err;
+    }
+
+    /**
+    * Free the clip */
+    err = M4VSS3GPP_intClipClose(pClipContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS_intClipClose() returns 0x%x!",
+            err);
+        M4VSS3GPP_intClipCleanUp(pClipContext);
+        return err;
+    }
+
+    M4VSS3GPP_intClipCleanUp(pClipContext);
+
+    /**
+    * Check the clip is compatible with VSS editing */
+    err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClipProperties);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip:\
+            M4VSS3GPP_intCheckClipCompatibleWithVssEditing() returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editAnalyseClip(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility()
+ * @brief    This function allows checking if two clips are compatible with each other for
+ *        VSS 3GPP editing assembly feature.
+ * @note
+ * @param    pClip1Properties        (IN) Clip analysis of the first clip
+ * @param    pClip2Properties        (IN) Clip analysis of the second clip
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING
+ * @return  M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY
+ * @return  M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility( M4VIDEOEDITING_ClipProperties *pClip1Properties,
+                                                M4VIDEOEDITING_ClipProperties *pClip2Properties )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_ERR video_err = M4NO_ERROR;
+    M4OSA_ERR audio_err = M4NO_ERROR;
+
+    M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
+    M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
+
+    M4OSA_TRACE3_2("M4VSS3GPP_editCheckClipCompatibility called with pClip1Analysis=0x%x,\
+                   pClip2Analysis=0x%x", pClip1Properties, pClip2Properties);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip1Properties), M4ERR_PARAMETER,
+        "M4VSS3GPP_editCheckClipCompatibility: pClip1Properties is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip2Properties), M4ERR_PARAMETER,
+        "M4VSS3GPP_editCheckClipCompatibility: pClip2Properties is M4OSA_NULL");
+
+    /**
+    * Check if the two clips are, alone, comptible with VSS 3GPP.
+    *
+    * Note: if a clip is not compatible with VSS3GPP, M4VSS3GPP_editAnalyseClip()
+    * did return an error to the integrator. So he should not call
+    * M4VSS3GPP_editCheckClipCompatibility
+    * with the ClipAnalysis...
+    * Still, I think it is good to redo the test here, to be sure.
+    * M4VSS3GPP_intCheckClipCompatibleWithVssEditing is not a long function to execute.*/
+    err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClip1Properties);
+
+    if( err != M4NO_ERROR )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editCheckClipCompatibility: Clip1 not compatible with VSS3GPP,\
+            returning 0x%x", err);
+        return err;
+    }
+    err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClip2Properties);
+
+    if( err != M4NO_ERROR )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editCheckClipCompatibility: Clip2 not compatible with VSS3GPP,\
+            returning 0x%x", err);
+        return err;
+    }
+
+    if( ( M4VIDEOEDITING_kFileType_MP3 == pClip1Properties->FileType)
+        || (M4VIDEOEDITING_kFileType_AMR == pClip1Properties->FileType) )
+    {
+        if( pClip1Properties != pClip2Properties )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility: MP3 CAN ONLY BE CUT,\
+                returning M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
+            return M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY;
+        }
+        else
+        {
+            /* We are in VSS Splitter mode */
+            goto audio_analysis;
+        }
+    }
+
+    /********** Video ************/
+
+    /**
+    * Check both clips have same video stream type */
+    if( pClip1Properties->VideoStreamType != pClip2Properties->VideoStreamType )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same video format");
+        video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT;
+        goto audio_analysis;
+    }
+
+    /**
+    * Check both clips have the same video frame size */
+    if( ( pClip1Properties->uiVideoWidth != pClip2Properties->uiVideoWidth)
+        || (pClip1Properties->uiVideoHeight
+        != pClip2Properties->uiVideoHeight) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same video frame size");
+        video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE;
+        goto audio_analysis;
+    }
+
+    switch( pClip1Properties->VideoStreamType )
+    {
+        case M4VIDEOEDITING_kH263:
+        case M4VIDEOEDITING_kH264:
+            /**< nothing to check here */
+            break;
+
+        case M4VIDEOEDITING_kMPEG4_EMP:
+        case M4VIDEOEDITING_kMPEG4:
+            /**
+            * Check both streams have the same time scale */
+            if( pClip1Properties->uiVideoTimeScale
+                != pClip2Properties->uiVideoTimeScale )
+            {
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same video time\
+                    scale (%d != %d), returning M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE",
+                    pClip1Properties->uiVideoTimeScale,
+                    pClip2Properties->uiVideoTimeScale);
+                video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE;
+                goto audio_analysis;
+            }
+            /**
+            * Check both streams have the same use of data partitioning */
+            if( pClip1Properties->bMPEG4dataPartition
+                != pClip2Properties->bMPEG4dataPartition )
+            {
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_editCheckClipCompatibility:\
+                    Clips don't have the same use of data partitioning (%d != %d),\
+                    returning M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING",
+                    pClip1Properties->bMPEG4dataPartition,
+                    pClip2Properties->bMPEG4dataPartition);
+                video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING;
+                goto audio_analysis;
+            }
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCheckClipCompatibility: unknown video stream type (0x%x),\
+                returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT",
+                pClip1Properties->VideoStreamType);
+            video_err =
+                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT; /**< this error should never happen,
+                                                              it's here for code safety only... */
+            goto audio_analysis;
+    }
+
+    pClip2Properties->bVideoIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+    /********** Audio ************/
+
+audio_analysis:
+    if( M4VIDEOEDITING_kNoneAudio != pClip1Properties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AAC */
+        switch( pClip1Properties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                bClip1IsAAC = M4OSA_TRUE;
+                break;
+            default:
+                break;
+        }
+    }
+
+    if( M4VIDEOEDITING_kNoneAudio != pClip2Properties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AAC */
+        switch( pClip2Properties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                bClip2IsAAC = M4OSA_TRUE;
+                break;
+            default:
+                break;
+        }
+    }
+
+    /**
+    * If there is no audio, the clips are compatibles ... */
+    if( ( pClip1Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio)
+        && (pClip2Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio) )
+    {
+        /**
+        * Check both clips have same audio stream type
+        * And let_s say AAC, AAC+ and eAAC+ are mixable */
+        if( ( pClip1Properties->AudioStreamType
+            != pClip2Properties->AudioStreamType)
+            && (( M4OSA_FALSE == bClip1IsAAC) || (M4OSA_FALSE == bClip2IsAAC)) )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility:\
+                Clips don't have the same Audio Stream Type");
+
+            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE;
+            goto analysis_done;
+        }
+
+        /**
+        * Check both clips have same number of channels */
+        if( pClip1Properties->uiNbChannels != pClip2Properties->uiNbChannels )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same Nb of Channels");
+            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
+            goto analysis_done;
+        }
+
+        /**
+        * Check both clips have same sampling frequency */
+        if( pClip1Properties->uiSamplingFrequency
+            != pClip2Properties->uiSamplingFrequency )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility:\
+                Clips don't have the same Sampling Frequency");
+            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
+            goto analysis_done;
+        }
+    }
+
+    pClip2Properties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+    /**
+    * Return with no error */
+
+analysis_done:
+    if( video_err != M4NO_ERROR )
+        return video_err;
+
+    if( audio_err != M4NO_ERROR )
+        return audio_err;
+
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_editCheckClipCompatibility(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intBuildAnalysis()
+ * @brief    Get video and audio properties from the clip streams
+ * @note    This function must return fatal errors only (errors that should not happen
+ *        in the final integrated product).
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param    pClipProperties        (OUT) Pointer to a valid ClipProperties structure.
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intBuildAnalysis( M4VSS3GPP_ClipContext *pClipCtxt,
+                                     M4VIDEOEDITING_ClipProperties *pClipProperties )
+{
+    M4OSA_ERR err;
+    M4DECODER_MPEG4_DecoderConfigInfo DecConfigInfo;
+    M4DECODER_VideoSize dummySize;
+    M4DECODER_AVCProfileLevel AVCProfle;
+
+    pClipProperties->bAnalysed = M4OSA_FALSE;
+
+    /**
+    * Reset video characteristics */
+    pClipProperties->VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+    pClipProperties->uiClipVideoDuration = 0;
+    pClipProperties->uiVideoBitrate = 0;
+    pClipProperties->uiVideoMaxAuSize = 0;
+    pClipProperties->uiVideoWidth = 0;
+    pClipProperties->uiVideoHeight = 0;
+    pClipProperties->uiVideoTimeScale = 0;
+    pClipProperties->fAverageFrameRate = 0.0;
+    pClipProperties->ProfileAndLevel =
+        M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+    pClipProperties->uiH263level = 0;
+    pClipProperties->uiVideoProfile = 0;
+    pClipProperties->bMPEG4dataPartition = M4OSA_FALSE;
+    pClipProperties->bMPEG4rvlc = M4OSA_FALSE;
+    pClipProperties->bMPEG4resynchMarker = M4OSA_FALSE;
+
+    M4OSA_memset((M4OSA_MemAddr8) &pClipProperties->ftyp,
+        sizeof(pClipProperties->ftyp), 0);
+
+    /**
+    * Video Analysis */
+    if( M4OSA_NULL != pClipCtxt->pVideoStream )
+    {
+        pClipProperties->uiVideoWidth = pClipCtxt->pVideoStream->m_videoWidth;
+        pClipProperties->uiVideoHeight = pClipCtxt->pVideoStream->m_videoHeight;
+        pClipProperties->fAverageFrameRate =
+            pClipCtxt->pVideoStream->m_averageFrameRate;
+
+        switch( pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+        {
+            case M4DA_StreamTypeVideoMpeg4:
+
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kMPEG4;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+   /* This issue is so incredibly stupid that it's depressing. Basically, a file can be analysed
+   outside of any context (besides that of the clip itself), so that for instance two clips can
+   be checked for compatibility before allocating an edit context for editing them. But this
+   means there is no way in heck to pass an external video decoder (to begin with) to this
+   function, as they work by being registered in an existing context; furthermore, it is actually
+   pretty overkill to use a full decoder for that, moreso a HARDWARE decoder just to get the
+   clip config info. In fact, the hardware itself doesn't provide this service, in the case of a
+   HW decoder, the shell builds the config info itself, so we don't need the actual decoder, only
+   a detached functionality of it. So in case HW/external decoders may be present, we instead use
+   directly the DSI parsing function of the shell HW decoder (which we know to be present, since
+   HW decoders are possible) to get the config info. Notice this function is used even if the
+   software decoder is actually present and even if it will end up being actually used: figuring
+   out the config does not involve actual decoding nor the particularities of a specific decoder,
+   it's the fact that it's MPEG4 that matters, so it should not be functionally any different
+   from the way it was done before (and it's light enough for performance not to be any problem
+         whatsoever). */
+
+                err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo,
+                    pClipCtxt->pVideoStream->
+                    m_basicProperties.m_decoderSpecificInfoSize,
+                    &DecConfigInfo, &dummySize);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intBuildAnalysis():\
+                        M4DECODER_EXTERNAL_ParseVideoDSI returns 0x%08X", err);
+                    return err;
+                }
+
+    #else /* an external decoder cannot be present, so we can rely on the
+                software decoder to be installed already */
+                /* Get MPEG-4 decoder config. */
+
+                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctGetOption(
+                    pClipCtxt->pViDecCtxt,
+                    M4DECODER_MPEG4_kOptionID_DecoderConfigInfo,
+                    &DecConfigInfo);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): m_pFctGetOption(DecConfigInfo)\
+                        returns 0x%x", err);
+                    return err;
+                }
+
+    #endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+                pClipProperties->uiVideoProfile = DecConfigInfo.uiProfile;
+                pClipProperties->uiVideoTimeScale = DecConfigInfo.uiTimeScale;
+                pClipProperties->bMPEG4dataPartition =
+                    DecConfigInfo.bDataPartition;
+                pClipProperties->bMPEG4rvlc = DecConfigInfo.bUseOfRVLC;
+                pClipProperties->bMPEG4resynchMarker =
+                    DecConfigInfo.uiUseOfResynchMarker;
+
+                /* Supported enum value for profile and level */
+                switch( pClipProperties->uiVideoProfile )
+                {
+                    case 0x08:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_0;
+                        break;
+
+                    case 0x09:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_0b;
+                        break;
+
+                    case 0x01:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_1;
+                        break;
+
+                    case 0x02:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_2;
+                        break;
+
+                    case 0x03:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_3;
+                        break;
+
+                    case 0x04:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_4a;
+                        break;
+
+                    case 0x05:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_5;
+                        break;
+                }
+                break;
+
+            case M4DA_StreamTypeVideoH263:
+
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kH263;
+
+                /* Get H263 level, which is sixth byte in the DSI */
+                pClipProperties->uiH263level = pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo[5];
+                /* Get H263 profile, which is fifth byte in the DSI */
+                pClipProperties->uiVideoProfile = pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo[6];
+                /* H263 time scale is always 30000 */
+                pClipProperties->uiVideoTimeScale = 30000;
+
+                /* Supported enum value for profile and level */
+                if( pClipProperties->uiVideoProfile == 0 )
+                {
+                    switch( pClipProperties->uiH263level )
+                    {
+                        case 10:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_10;
+                            break;
+
+                        case 20:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_20;
+                            break;
+
+                        case 30:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_30;
+                            break;
+
+                        case 40:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_40;
+                            break;
+
+                        case 45:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_45;
+                            break;
+                    }
+                }
+                break;
+
+            case M4DA_StreamTypeVideoMpeg4Avc:
+
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kH264;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+                err = M4DECODER_EXTERNAL_ParseAVCDSI(pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo,
+                    pClipCtxt->pVideoStream->
+                    m_basicProperties.m_decoderSpecificInfoSize,
+                    &AVCProfle);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intBuildAnalysis(): \
+                         M4DECODER_EXTERNAL_ParseAVCDSI returns 0x%08X",
+                         err);
+                    return err;
+                }
+
+#else /* an external decoder cannot be present, so we can rely on the
+                software decoder to be installed already */
+
+                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctGetOption(
+                    pClipCtxt->pViDecCtxt,
+                    M4DECODER_kOptionID_AVCProfileAndLevel, &AVCProfle);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intBuildAnalysis(): m_pFctGetOption(AVCProfileInfo)\
+                            returns 0x%x", err);
+                    return err;
+                }
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+                switch( AVCProfle )
+                {
+                    case M4DECODER_AVC_kProfile_0_Level_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1b:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1b;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1_3:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1_3;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_2_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_2_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_2_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_2_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_3:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_3;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_3_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_3_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_3_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_3_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_4:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_4;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_4_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_4_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_4_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_4_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_5:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_5;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_5_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_5_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_and_Level_Out_Of_Range:
+                    default:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+                }
+
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intBuildAnalysis: unknown input video format (0x%x),\
+                    returning M4NO_ERROR",pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
+                return
+                    M4NO_ERROR; /**< We do not return error here.
+                                The video format compatibility check will be done latter */
+        }
+
+        pClipProperties->uiClipVideoDuration =
+            (M4OSA_UInt32)pClipCtxt->pVideoStream->m_basicProperties.m_duration;
+        pClipProperties->uiVideoMaxAuSize =
+            pClipCtxt->pVideoStream->m_basicProperties.m_maxAUSize;
+
+        /* if video bitrate not available retrieve an estimation of the overall bitrate */
+        pClipProperties->uiVideoBitrate =
+            (M4OSA_UInt32)pClipCtxt->pVideoStream->
+            m_basicProperties.m_averageBitRate;
+
+        if( 0 == pClipProperties->uiVideoBitrate )
+        {
+            pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+                pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
+                &pClipProperties->uiVideoBitrate);
+
+            if( M4OSA_NULL != pClipCtxt->pAudioStream )
+            {
+                /* we get the overall bitrate, substract the audio bitrate if any */
+                pClipProperties->uiVideoBitrate -=
+                    pClipCtxt->pAudioStream->m_basicProperties.m_averageBitRate;
+            }
+        }
+    }
+
+    /**
+    * Reset audio characteristics */
+    pClipProperties->AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+    pClipProperties->uiClipAudioDuration = 0;
+    pClipProperties->uiAudioBitrate = 0;
+    pClipProperties->uiAudioMaxAuSize = 0;
+    pClipProperties->uiNbChannels = 0;
+    pClipProperties->uiSamplingFrequency = 0;
+    pClipProperties->uiExtendedSamplingFrequency = 0;
+    pClipProperties->uiDecodedPcmSize = 0;
+
+    /**
+    * Audio Analysis */
+    if( M4OSA_NULL != pClipCtxt->pAudioStream )
+    {
+        switch( pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
+        {
+            case M4DA_StreamTypeAudioAmrNarrowBand:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAMR_NB;
+                break;
+
+            case M4DA_StreamTypeAudioAac:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAAC;
+                break;
+
+            case M4DA_StreamTypeAudioMp3:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kMP3;
+                break;
+
+            case M4DA_StreamTypeAudioEvrc:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kEVRC;
+                break;
+
+            case M4DA_StreamTypeAudioPcm:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kPCM;
+                break;
+
+            default:
+
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intBuildAnalysis: unknown input audio format (0x%x),\
+                    returning M4NO_ERROR!",
+                    pClipCtxt->pAudioStream->m_basicProperties.m_streamType);
+                return
+                    M4NO_ERROR; /**< We do not return error here.
+                                The audio format compatibility check will be done latter */
+        }
+
+        pClipProperties->uiAudioMaxAuSize =
+            pClipCtxt->pAudioStream->m_basicProperties.m_maxAUSize;
+        pClipProperties->uiClipAudioDuration =
+            (M4OSA_UInt32)pClipCtxt->pAudioStream->m_basicProperties.m_duration;
+
+        pClipProperties->uiNbChannels = pClipCtxt->pAudioStream->m_nbChannels;
+        pClipProperties->uiSamplingFrequency =
+            pClipCtxt->pAudioStream->m_samplingFrequency;
+        pClipProperties->uiDecodedPcmSize =
+            pClipCtxt->pAudioStream->m_byteFrameLength
+            * pClipCtxt->pAudioStream->m_byteSampleSize
+            * pClipCtxt->pAudioStream->m_nbChannels;
+
+        /**
+        * Bugfix P4ME00001128: With some IMTC files, the AMR bit rate is 0 kbps
+        according the GetProperties function */
+        pClipProperties->uiAudioBitrate =
+            (M4OSA_UInt32)pClipCtxt->pAudioStream->
+            m_basicProperties.m_averageBitRate;
+
+        if( 0 == pClipProperties->uiAudioBitrate )
+        {
+            if( M4VIDEOEDITING_kAMR_NB == pClipProperties->AudioStreamType )
+            {
+                /**
+                *Better returning a guessed 12.2 kbps value than a sure-to-be-false 0 kbps value!*/
+                pClipProperties->uiAudioBitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
+            }
+            else if( M4VIDEOEDITING_kEVRC == pClipProperties->AudioStreamType )
+            {
+                /**
+                *Better returning a guessed 9.2 kbps value than a sure-to-be-false 0 kbps value!*/
+                pClipProperties->uiAudioBitrate =
+                    M4VSS3GPP_EVRC_DEFAULT_BITRATE;
+            }
+            else
+            {
+                pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+                    pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
+                    &pClipProperties->uiAudioBitrate);
+
+                if( M4OSA_NULL != pClipCtxt->pVideoStream )
+                {
+                    /* we get the overall bitrate, substract the video bitrate if any */
+                    pClipProperties->uiAudioBitrate -= pClipCtxt->pVideoStream->
+                        m_basicProperties.m_averageBitRate;
+                }
+            }
+        }
+
+        /* New aac properties */
+        if( M4DA_StreamTypeAudioAac
+            == pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
+        {
+            pClipProperties->uiNbChannels = pClipCtxt->AacProperties.aNumChan;
+            pClipProperties->uiSamplingFrequency =
+                pClipCtxt->AacProperties.aSampFreq;
+
+            if( pClipCtxt->AacProperties.aSBRPresent )
+            {
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAACplus;
+                pClipProperties->uiExtendedSamplingFrequency =
+                    pClipCtxt->AacProperties.aExtensionSampFreq;
+            }
+
+            if( pClipCtxt->AacProperties.aPSPresent )
+            {
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_keAACplus;
+            }
+        }
+    }
+
+    /* Get 'ftyp' atom */
+    err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+        pClipCtxt->pReaderContext,
+        M4READER_kOptionID_3gpFtypBox, &pClipProperties->ftyp);
+
+    if( M4NO_ERROR == err )
+    {
+        M4OSA_UInt8 i;
+
+        for ( i = 0; i < pClipProperties->ftyp.nbCompatibleBrands; i++ )
+            if( M4VIDEOEDITING_BRAND_EMP
+                == pClipProperties->ftyp.compatible_brands[i] )
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kMPEG4_EMP;
+    }
+
+    /**
+    * We write the VSS 3GPP version in the clip analysis to be sure the integrator doesn't
+    * mix older analysis results with newer libraries */
+    pClipProperties->Version[0] = M4VIDEOEDITING_VERSION_MAJOR;
+    pClipProperties->Version[1] = M4VIDEOEDITING_VERSION_MINOR;
+    pClipProperties->Version[2] = M4VIDEOEDITING_VERSION_REVISION;
+
+    pClipProperties->FileType = pClipCtxt->pSettings->FileType;
+
+    if( pClipProperties->uiClipVideoDuration
+        > pClipProperties->uiClipAudioDuration )
+        pClipProperties->uiClipDuration = pClipProperties->uiClipVideoDuration;
+    else
+        pClipProperties->uiClipDuration = pClipProperties->uiClipAudioDuration;
+
+    /* Reset compatibility chart */
+    pClipProperties->bVideoIsEditable = M4OSA_FALSE;
+    pClipProperties->bAudioIsEditable = M4OSA_FALSE;
+    pClipProperties->bVideoIsCompatibleWithMasterClip = M4OSA_FALSE;
+    pClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+    /* Analysis successfully completed */
+    pClipProperties->bAnalysed = M4OSA_TRUE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intBuildAnalysis(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing()
+ * @brief    Check if the clip is compatible with VSS editing
+ * @note
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param    pClipProperties     (OUT) Pointer to a valid ClipProperties structure.
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
+    M4VIDEOEDITING_ClipProperties *pClipProperties )
+{
+    M4OSA_UInt32 uiNbOfValidStreams = 0;
+    M4OSA_ERR video_err = M4NO_ERROR;
+    M4OSA_ERR audio_err = M4NO_ERROR;
+
+    /**
+    * Check that analysis has been generated by this version of the VSS3GPP library */
+    if( ( pClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+        || (pClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+        || (pClipProperties->Version[2]
+    != M4VIDEOEDITING_VERSION_REVISION) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing: The clip analysis has been generated\
+            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+    }
+
+    /********* file type *********/
+
+    if( M4VIDEOEDITING_kFileType_AMR == pClipProperties->FileType )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing:\
+            returning M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
+        return M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED;
+    }
+
+    if( M4VIDEOEDITING_kFileType_MP3 == pClipProperties->FileType )
+    {
+        M4OSA_TRACE3_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
+        return M4NO_ERROR;
+    }
+
+    /********* Video *********/
+
+    if( M4VIDEOEDITING_kNoneVideo
+        != pClipProperties->VideoStreamType ) /**< if there is a video stream */
+    {
+        /**
+        * Check video format is MPEG-4 or H263 */
+        switch( pClipProperties->VideoStreamType )
+        {
+            case M4VIDEOEDITING_kH263:
+                if( M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range
+                    == pClipProperties->ProfileAndLevel )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing():\
+                        unsupported H263 profile");
+                    video_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE;
+                    break;
+                }
+                uiNbOfValidStreams++;
+                pClipProperties->bVideoIsEditable = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kMPEG4_EMP:
+            case M4VIDEOEDITING_kMPEG4:
+                if( M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range
+                    == pClipProperties->ProfileAndLevel )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing():\
+                        unsupported MPEG-4 profile");
+                    video_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE;
+                    break;
+                }
+
+                if( M4OSA_TRUE == pClipProperties->bMPEG4rvlc )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing():\
+                        unsupported MPEG-4 RVLC tool");
+                    video_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC;
+                    break;
+                }
+                uiNbOfValidStreams++;
+                pClipProperties->bVideoIsEditable = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kH264:
+
+                uiNbOfValidStreams++;
+                pClipProperties->bVideoIsEditable = M4OSA_TRUE;
+                break;
+
+            default: /*< KO, we return error */
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported video format");
+                video_err = M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+                break;
+        }
+    }
+    else
+    {
+        /**
+        * Audio only stream are currently not supported by the VSS editing feature
+        (unless in the MP3 case) */
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): No video stream in clip");
+        video_err = M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE;
+    }
+
+    /********* Audio *********/
+    if( M4VIDEOEDITING_kNoneAudio != pClipProperties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AMR-NB, EVRC or AAC */
+        switch( pClipProperties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                uiNbOfValidStreams++;
+                break;
+
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                switch( pClipProperties->uiSamplingFrequency )
+                {
+                case 8000:
+                case 16000:
+                case 22050:
+                case 24000:
+                case 32000:
+                case 44100:
+                case 48000:
+                    pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                    break;
+
+                default:
+                    break;
+                }
+                uiNbOfValidStreams++;
+                break;
+
+            case M4VIDEOEDITING_kEVRC:
+                /*< OK, we proceed, no return */
+                uiNbOfValidStreams++;
+                break;
+
+            default: /*< KO, we return error */
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported audio format");
+                audio_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+                break;
+        }
+    }
+    else
+    {
+        /* Silence is always editable */
+        pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+    }
+
+    /**
+    * Check there is at least one valid stream in the file... */
+    if( video_err != M4NO_ERROR )
+        return video_err;
+
+    if( audio_err != M4NO_ERROR )
+        return audio_err;
+
+    if( 0 == uiNbOfValidStreams )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): File contains no supported stream,\
+            returning M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
+        return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility()
+ * @brief    This function allows checking if two clips are compatible with each other for
+ *        VSS 3GPP audio mixing feature.
+ * @note
+ * @param    pC                            (IN) Context of the audio mixer
+ * @param    pInputClipProperties        (IN) Clip analysis of the first clip
+ * @param    pAddedClipProperties        (IN) Clip analysis of the second clip
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return  M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP
+ * @return  M4NO_ERROR
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_intAudioMixingCompatibility( M4VSS3GPP_InternalAudioMixingContext
+                                      *pC, M4VIDEOEDITING_ClipProperties *pInputClipProperties,
+                                      M4VIDEOEDITING_ClipProperties *pAddedClipProperties )
+{
+    M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
+    M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
+
+    /**
+    * Reset settings */
+    pInputClipProperties->bAudioIsEditable = M4OSA_FALSE;
+    pAddedClipProperties->bAudioIsEditable = M4OSA_FALSE;
+    pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+    pAddedClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+    /**
+    * Check that analysis has been generated by this version of the VSS3GPP library */
+    if( ( pInputClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+        || (pInputClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+        || (pInputClipProperties->Version[2]
+    != M4VIDEOEDITING_VERSION_REVISION) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
+            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+    }
+
+    if( ( pAddedClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+        || (pAddedClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+        || (pAddedClipProperties->Version[2]
+    != M4VIDEOEDITING_VERSION_REVISION) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
+            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+    }
+
+    /********* input file type *********/
+
+    if( M4VIDEOEDITING_kFileType_3GPP != pInputClipProperties->FileType )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingCompatibility:\
+            returning M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
+        return M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP;
+    }
+
+    /********* input audio *********/
+
+    if( M4VIDEOEDITING_kNoneAudio != pInputClipProperties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AMR-NB or AAC */
+        switch( pInputClipProperties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                switch( pInputClipProperties->uiSamplingFrequency )
+                {
+                case 8000:
+                case 16000:
+                case 22050:
+                case 24000:
+                case 32000:
+                case 44100:
+                case 48000:
+                    pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                    break;
+
+                default:
+                    break;
+            }
+            bClip1IsAAC = M4OSA_TRUE;
+            break;
+          default:
+            break;
+        }
+    }
+    else
+    {
+        /* Silence is always editable */
+        pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+    }
+
+    /********* added audio *********/
+
+    if( M4VIDEOEDITING_kNoneAudio != pAddedClipProperties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AMR-NB or AAC */
+        switch( pAddedClipProperties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+                break;
+
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                switch( pAddedClipProperties->uiSamplingFrequency )
+                {
+                case 8000:
+                case 16000:
+                case 22050:
+                case 24000:
+                case 32000:
+                case 44100:
+                case 48000:
+                    pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                    break;
+
+                default:
+                    break;
+                }
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+                bClip2IsAAC = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kEVRC:
+                break;
+
+            case M4VIDEOEDITING_kPCM:
+                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+
+                if( pAddedClipProperties->uiSamplingFrequency == 16000 )
+                {
+                    bClip2IsAAC = M4OSA_TRUE;
+                }
+                break;
+
+            case M4VIDEOEDITING_kMP3: /*RC*/
+                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+                break;
+
+            default:
+                /* The writer cannot write this  into a 3gpp */
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingCompatibility:\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
+                return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
+        }
+    }
+    else
+    {
+        /* Silence is always editable */
+        pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+        pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+            M4OSA_TRUE; /* I use this field to know if silence supported */
+    }
+
+    if( pC->bRemoveOriginal == M4OSA_FALSE )
+    {
+        if( pInputClipProperties->uiSamplingFrequency
+            != pAddedClipProperties->uiSamplingFrequency )
+        {
+            /* We need to call SSRC in order to align ASF and/or nb of channels */
+            /* Moreover, audio encoder may be needed in case of audio replacing... */
+            pC->b_SSRCneeded = M4OSA_TRUE;
+        }
+
+        if( pInputClipProperties->uiNbChannels
+            < pAddedClipProperties->uiNbChannels )
+        {
+            /* Stereo to Mono */
+            pC->ChannelConversion = 1;
+        }
+        else if( pInputClipProperties->uiNbChannels
+            > pAddedClipProperties->uiNbChannels )
+        {
+            /* Mono to Stereo */
+            pC->ChannelConversion = 2;
+        }
+    }
+
+    pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingCompatibility(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c b/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
new file mode 100755
index 0000000..547b099
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4VSS3GPP_Codecs.c
+ * @brief  VSS implementation
+ * @note   This file contains all functions related to audio/video
+ *            codec manipulations.
+ *************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Debug.h"             /**< Include for OSAL debug services */
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h" /**< Internal types of the VSS */
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_clearInterfaceTables()
+ * @brief    Clear encoders, decoders, reader and writers interfaces tables
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    The context is null
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_clearInterfaceTables( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_UInt8 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    /* Initialisation that will allow to check if registering twice */
+    pC->pWriterGlobalFcts = M4OSA_NULL;
+    pC->pWriterDataFcts = M4OSA_NULL;
+    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+    pC->pCurrentAudioEncoderUserData = M4OSA_NULL;
+    pC->pCurrentAudioDecoderUserData = M4OSA_NULL;
+
+    pC->pCurrentVideoEncoderExternalAPI = M4OSA_NULL;
+    pC->pCurrentVideoEncoderUserData = M4OSA_NULL;
+
+    for ( i = 0; i < M4WRITER_kType_NB; i++ )
+    {
+        pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+        pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+    }
+
+    for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
+    {
+        pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+        pC->pVideoEncoderExternalAPITable[i] = M4OSA_NULL;
+        pC->pVideoEncoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
+    {
+        pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+        pC->pAudioEncoderFlag[i] = M4OSA_FALSE;
+        pC->pAudioEncoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    /* Initialisation that will allow to check if registering twice */
+    pC->m_pReader = M4OSA_NULL;
+    pC->m_pReaderDataIt = M4OSA_NULL;
+    pC->m_uiNbRegisteredReaders = 0;
+
+    for ( i = 0; i < M4READER_kMediaType_NB; i++ )
+    {
+        pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+        pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+    }
+
+    pC->m_pVideoDecoder = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    pC->m_pCurrentVideoDecoderUserData = M4OSA_NULL;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    pC->m_uiNbRegisteredVideoDec = 0;
+
+    for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
+    {
+        pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+        pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    }
+
+    pC->m_pAudioDecoder = M4OSA_NULL;
+
+    for ( i = 0; i < M4AD_kType_NB; i++ )
+    {
+        pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+        pC->m_pAudioDecoderFlagTable[i] = M4OSA_FALSE;
+        pC->pAudioDecoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerWriter()
+ * @brief    This function will register a specific file format writer.
+ * @note    According to the Mediatype, this function will store in the internal
+ *        context the writer context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext,pWtrGlobalInterface or pWtrDataInterface is M4OSA_NULL
+ *                          (debug only), or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                   M4WRITER_OutputFileType MediaType,
+                                   M4WRITER_GlobalInterface *pWtrGlobalInterface,
+                                   M4WRITER_DataInterface *pWtrDataInterface )
+{
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerWriter");
+    M4OSA_DEBUG_IF2((pWtrGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pWtrGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
+    M4OSA_DEBUG_IF2((pWtrDataInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pWtrDataInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
+
+    M4OSA_TRACE3_3(
+        "VSS: M4VSS3GPP_registerWriter called with pContext=0x%x, pWtrGlobalInterface=0x%x,\
+        pWtrDataInterface=0x%x",
+        pC, pWtrGlobalInterface, pWtrDataInterface);
+
+    if( ( MediaType == M4WRITER_kUnknown) || (MediaType >= M4WRITER_kType_NB) )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->WriterInterface[MediaType].pGlobalFcts != M4OSA_NULL )
+    {
+        /* a writer corresponding to this media type has already been registered !*/
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "This media type has already been registered");
+        return M4ERR_PARAMETER;
+    }
+
+    /*
+    * Save writer interface in context */
+    pC->WriterInterface[MediaType].pGlobalFcts = pWtrGlobalInterface;
+    pC->WriterInterface[MediaType].pDataFcts = pWtrDataInterface;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerVideoEncoder()
+ * @brief    This function will register a specific video encoder.
+ * @note    According to the Mediatype, this function will store in the internal
+ *        context the encoder context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
+ *                          or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4ENCODER_Format MediaType,
+                                         M4ENCODER_GlobalInterface *pEncGlobalInterface )
+{
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
+    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
+
+    M4OSA_TRACE3_3(
+        "VSS: M4VSS3GPP_registerEncoder called with pContext=0x%x, pEncGlobalInterface=0x%x,\
+        MediaType=0x%x",
+        pC, pEncGlobalInterface, MediaType);
+
+    if( MediaType >= M4ENCODER_kVideo_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid video encoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->pVideoEncoderInterface[MediaType] != M4OSA_NULL )
+    {
+        /* can be legitimate, in cases where we have one version that can use external encoders
+        but which still has the built-in one to be able to work without an external encoder; in
+        this case the new encoder simply replaces the old one (i.e. we unregister it first). */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+        {
+
+#endif
+
+            M4OSA_free((M4OSA_MemAddr32)pC->pVideoEncoderInterface[MediaType]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        }
+
+#endif
+
+        pC->pVideoEncoderInterface[MediaType] = M4OSA_NULL;
+    }
+
+    /*
+    * Save encoder interface in context */
+    pC->pVideoEncoderInterface[MediaType] = pEncGlobalInterface;
+    /* The actual userData and external API will be set by the registration function in the case
+    of an external encoder (add it as a parameter to this function in the long run?) */
+    pC->pVideoEncoderUserDataTable[MediaType] = M4OSA_NULL;
+    pC->pVideoEncoderExternalAPITable[MediaType] = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerAudioEncoder()
+ * @brief    This function will register a specific audio encoder.
+ * @note    According to the Mediatype, this function will store in the internal
+ *        context the encoder context.
+ * @param    pContext:                (IN) Execution context.
+ * @param    mediaType:                (IN) The media type.
+ * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4ENCODER_AudioFormat MediaType,
+                                         M4ENCODER_AudioGlobalInterface *pEncGlobalInterface )
+{
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
+    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
+
+    M4OSA_TRACE3_3(
+        "VSS: M4VSS3GPP_registerAudioEncoder called pContext=0x%x, pEncGlobalInterface=0x%x,\
+        MediaType=0x%x",
+        pC, pEncGlobalInterface, MediaType);
+
+    if( MediaType >= M4ENCODER_kAudio_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid audio encoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->pAudioEncoderInterface[MediaType] != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderInterface[MediaType]);
+        pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
+    }
+    /*
+    * Save encoder interface in context */
+    pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
+    pC->pAudioEncoderFlag[MediaType] = M4OSA_FALSE; /* internal encoder */
+    pC->pAudioEncoderUserDataTable[MediaType] = M4OSA_NULL;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_registerAudioEncoder: pC->pAudioEncoderInterface[0x%x] = 0x%x",
+        MediaType, pC->pAudioEncoderInterface[MediaType]);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerReader()
+ * @brief    Register reader.
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                   M4READER_MediaType mediaType,
+                                   M4READER_GlobalInterface *pRdrGlobalInterface,
+                                   M4READER_DataInterface *pRdrDataInterface )
+{
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrGlobalInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerReader: invalid pointer on global interface");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrDataInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerReader: invalid pointer on data interface");
+
+    if( mediaType == M4READER_kMediaTypeUnknown
+        || mediaType >= M4READER_kMediaType_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->m_pReaderGlobalItTable[mediaType] != M4OSA_NULL )
+    {
+        /* a reader corresponding to this media type has already been registered !*/
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "This media type has already been registered");
+        return M4ERR_PARAMETER;
+    }
+
+    pC->m_pReaderGlobalItTable[mediaType] = pRdrGlobalInterface;
+    pC->m_pReaderDataItTable[mediaType] = pRdrDataInterface;
+
+    pC->m_uiNbRegisteredReaders++;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerVideoDecoder()
+ * @brief    Register video decoder
+ * @param    pContext                (IN/OUT) VSS context.
+ * @param    decoderType            (IN) Decoder type
+ * @param    pDecoderInterface    (IN) Decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only),
+ *                                or the decoder type is invalid
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4DECODER_VideoType decoderType,
+                                         M4DECODER_VideoInterface *pDecoderInterface )
+{
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerVideoDecoder: invalid pointer on decoder interface");
+
+    if( decoderType >= M4DECODER_kVideoType_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid video decoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->m_pVideoDecoderItTable[decoderType] != M4OSA_NULL )
+    {
+#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
+        /* a decoder corresponding to this media type has already been registered !*/
+
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Decoder has already been registered");
+        return M4ERR_PARAMETER;
+
+#else /* external decoders are possible */
+        /* can be legitimate, in cases where we have one version that can use external decoders
+        but which still has the built-in one to be able to work without an external decoder; in
+        this case the new decoder simply replaces the old one (i.e. we unregister it first). */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+        {
+
+#endif
+
+            M4OSA_free(
+                (M4OSA_MemAddr32)pC->m_pVideoDecoderItTable[decoderType]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        }
+
+#endif
+
+        pC->m_pVideoDecoderItTable[decoderType] = M4OSA_NULL;
+        /* oh, and don't forget the user data, too. */
+        if( pC->m_pVideoDecoderUserDataTable[decoderType] != M4OSA_NULL )
+        {
+            M4OSA_free(
+                (M4OSA_MemAddr32)pC->m_pVideoDecoderUserDataTable[decoderType]);
+            pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+        }
+#endif /* are external decoders possible? */
+
+    }
+
+    pC->m_pVideoDecoderItTable[decoderType] = pDecoderInterface;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+    /* The actual userData will be set by the registration function in the case
+    of an external decoder (add it as a parameter to this function in the long run?) */
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    pC->m_uiNbRegisteredVideoDec++;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerAudioDecoder()
+ * @brief    Register audio decoder
+ * @note    This function is used internaly by the VSS to register NXP audio decoders,
+ * @param    context                (IN/OUT) VSS context.
+ * @param    decoderType            (IN) Audio decoder type
+ * @param    pDecoderInterface    (IN) Audio decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:   A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4AD_Type decoderType, M4AD_Interface *pDecoderInterface)
+{
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerAudioDecoder: invalid pointer on decoder interface");
+
+    if( decoderType >= M4AD_kType_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid audio decoder type");
+        return M4ERR_PARAMETER;
+    }
+    if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[decoderType]);
+        pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+
+        if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[decoderType]);
+            pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+        }
+    }
+
+
+
+    pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
+    pC->m_pAudioDecoderFlagTable[decoderType] =
+        M4OSA_FALSE; /* internal decoder */
+    pC->pAudioDecoderUserDataTable[decoderType] = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllWriters()
+ * @brief    Unregister writer
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllWriters( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    for ( i = 0; i < M4WRITER_kType_NB; i++ )
+    {
+        if( pC->WriterInterface[i].pGlobalFcts != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->WriterInterface[i].pGlobalFcts);
+            pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+        }
+
+        if( pC->WriterInterface[i].pDataFcts != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->WriterInterface[i].pDataFcts);
+            pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+        }
+    }
+
+    pC->pWriterGlobalFcts = M4OSA_NULL;
+    pC->pWriterDataFcts = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllEncoders()
+ * @brief    Unregister the encoders
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllEncoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllEncoders: pC=0x%x", pC);
+
+    for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
+    {
+        if( pC->pVideoEncoderInterface[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+
+                M4OSA_free((M4OSA_MemAddr32)pC->pVideoEncoderInterface[i]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+        }
+    }
+
+    for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
+    {
+        if( pC->pAudioEncoderInterface[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+                /*Don't free external audio encoders interfaces*/
+
+                if( M4OSA_FALSE == pC->pAudioEncoderFlag[i] )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderInterface[i]);
+                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllReaders()
+ * @brief    Unregister reader
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllReaders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    for ( i = 0; i < M4READER_kMediaType_NB; i++ )
+    {
+        if( pC->m_pReaderGlobalItTable[i] != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderGlobalItTable[i]);
+            pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+        }
+
+        if( pC->m_pReaderDataItTable[i] != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderDataItTable[i]);
+            pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->m_uiNbRegisteredReaders = 0;
+    pC->m_pReader = M4OSA_NULL;
+    pC->m_pReaderDataIt = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllDecoders()
+ * @brief    Unregister the decoders
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllDecoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllDecoders: pC=0x%x", pC);
+
+    for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
+    {
+        if( pC->m_pVideoDecoderItTable[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+
+                M4OSA_free((M4OSA_MemAddr32)pC->m_pVideoDecoderItTable[i]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#if 0 /* This is to avoid freeing OMX core context, passed as user data */
+
+            if( pC->m_pVideoDecoderUserDataTable[i] != M4OSA_NULL )
+            {
+                M4OSA_free(
+                    (M4OSA_MemAddr32)pC->m_pVideoDecoderUserDataTable[i]);
+                /* there ought to be a better pattern... right? */
+                pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
+            }
+
+#endif
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+        }
+    }
+
+    for ( i = 0; i < M4AD_kType_NB; i++ )
+    {
+        if( pC->m_pAudioDecoderItTable[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+                /*Don't free external audio decoders interfaces*/
+
+                if( M4OSA_FALSE == pC->m_pAudioDecoderFlagTable[i] )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[i]);
+                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->m_uiNbRegisteredVideoDec = 0;
+    pC->m_pVideoDecoder = M4OSA_NULL;
+
+    pC->m_pAudioDecoder = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentWriter()
+ * @brief    Set current writer
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                     M4VIDEOEDITING_FileType mediaType )
+{
+    M4WRITER_OutputFileType writerType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    switch( mediaType )
+    {
+        case M4VIDEOEDITING_kFileType_3GPP:
+            writerType = M4WRITER_k3GPP;
+            break;
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+                "Writer type not supported");
+            return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+
+    pC->pWriterGlobalFcts = pC->WriterInterface[writerType].pGlobalFcts;
+    pC->pWriterDataFcts = pC->WriterInterface[writerType].pDataFcts;
+
+    if( pC->pWriterGlobalFcts == M4OSA_NULL
+        || pC->pWriterDataFcts == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+            "Writer type not supported");
+        M4OSA_TRACE1_0("Writer type not supported");
+        return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+
+    pC->pWriterDataFcts->pWriterContext = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentVideoEncoder()
+ * @brief    Set a video encoder
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    MediaType           (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4SYS_StreamType mediaType )
+{
+    M4ENCODER_Format encoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoEncoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4SYS_kH263:
+            encoderType = M4ENCODER_kH263;
+            break;
+
+        case M4SYS_kMPEG_4:
+            encoderType = M4ENCODER_kMPEG4;
+            break;
+
+        case M4SYS_kH264:
+            encoderType = M4ENCODER_kH264;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
+                "Video encoder type not supported");
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    pC->pVideoEncoderGlobalFcts = pC->pVideoEncoderInterface[encoderType];
+    pC->pCurrentVideoEncoderExternalAPI =
+        pC->pVideoEncoderExternalAPITable[encoderType];
+    pC->pCurrentVideoEncoderUserData =
+        pC->pVideoEncoderUserDataTable[encoderType];
+
+    if( pC->pVideoEncoderGlobalFcts == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
+            "Video encoder type not supported");
+        M4OSA_TRACE1_0("Video encoder type not supported");
+        return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentAudioEncoder()
+ * @brief    Set an audio encoder
+ * @param    context            (IN/OUT) VSS context.
+ * @param    MediaType        (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4SYS_StreamType mediaType )
+{
+    M4ENCODER_AudioFormat encoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioEncoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4SYS_kAMR:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_setCurrentAudioEncoder: encoder type AMR");
+            encoderType = M4ENCODER_kAMRNB;
+            break;
+
+        case M4SYS_kAAC:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_setCurrentAudioEncoder: encoder type AAC");
+            encoderType = M4ENCODER_kAAC;
+            break;
+
+       default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
+                "Audio encoder type not supported");
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+    }
+
+    pC->pAudioEncoderGlobalFcts = pC->pAudioEncoderInterface[encoderType];
+    pC->pCurrentAudioEncoderUserData =
+        pC->pAudioEncoderUserDataTable[encoderType];
+
+    M4OSA_TRACE3_3(
+        "M4VSS3GPP_setCurrentAudioEncoder: pC->pAudioEncoderInterface[0x%x]=0x%x,\
+        pC->pAudioEncoderGlobalFcts = 0x%x",
+        encoderType, pC->pAudioEncoderInterface[encoderType],
+        pC->pAudioEncoderGlobalFcts);
+
+    if( pC->pAudioEncoderGlobalFcts == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
+            "Audio encoder type not supported");
+        M4OSA_TRACE1_0("Audio encoder type not supported");
+        return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentReader()
+ * @brief    Set current reader
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                     M4VIDEOEDITING_FileType mediaType )
+{
+    M4READER_MediaType readerType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    switch( mediaType )
+    {
+        case M4VIDEOEDITING_kFileType_3GPP:
+
+        case M4VIDEOEDITING_kFileType_MP4:
+            readerType = M4READER_kMediaType3GPP;
+            break;
+
+        case M4VIDEOEDITING_kFileType_AMR:
+            readerType = M4READER_kMediaTypeAMR;
+            break;
+
+        case M4VIDEOEDITING_kFileType_MP3:
+            readerType = M4READER_kMediaTypeMP3;
+            break;
+
+        case M4VIDEOEDITING_kFileType_PCM:
+            readerType = M4READER_kMediaTypePCM;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+                "Reader type not supported");
+            return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+
+    pC->m_pReader = pC->m_pReaderGlobalItTable[readerType];
+    pC->m_pReaderDataIt = pC->m_pReaderDataItTable[readerType];
+
+    if( pC->m_pReader == M4OSA_NULL || pC->m_pReaderDataIt == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+            "Reader type not supported");
+        M4OSA_TRACE1_0("Reader type not supported");
+        return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentVideoDecoder()
+ * @brief    Set a video decoder
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4_StreamType mediaType )
+{
+    M4DECODER_VideoType decoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoDecoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4DA_StreamTypeVideoMpeg4:
+        case M4DA_StreamTypeVideoH263:
+            decoderType = M4DECODER_kVideoTypeMPEG4;
+            break;
+
+        case M4DA_StreamTypeVideoMpeg4Avc:
+            decoderType = M4DECODER_kVideoTypeAVC;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
+                "Video decoder type not supported");
+            return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+    }
+
+    pC->m_pVideoDecoder = pC->m_pVideoDecoderItTable[decoderType];
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    pC->m_pCurrentVideoDecoderUserData =
+        pC->m_pVideoDecoderUserDataTable[decoderType];
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    if( pC->m_pVideoDecoder == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
+            "Video decoder type not supported");
+        M4OSA_TRACE1_0("Video decoder type not supported");
+        return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentAudioDecoder()
+ * @brief    Set an audio decoder
+ * @param    context            (IN/OUT) VSS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4_StreamType mediaType )
+{
+    M4AD_Type decoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioDecoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4DA_StreamTypeAudioAmrNarrowBand:
+            decoderType = M4AD_kTypeAMRNB;
+            break;
+
+        case M4DA_StreamTypeAudioAac:
+        case M4DA_StreamTypeAudioAacADTS:
+        case M4DA_StreamTypeAudioAacADIF:
+            decoderType = M4AD_kTypeAAC;
+            break;
+
+        case M4DA_StreamTypeAudioMp3:
+            decoderType = M4AD_kTypeMP3;
+            break;
+
+        case M4DA_StreamTypeAudioPcm:
+            decoderType = M4AD_kTypePCM;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
+                "Audio decoder type not supported");
+            return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+    }
+
+    pC->m_pAudioDecoder = pC->m_pAudioDecoderItTable[decoderType];
+    pC->pCurrentAudioDecoderUserData =
+        pC->pAudioDecoderUserDataTable[decoderType];
+
+    if( pC->m_pAudioDecoder == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
+            "Audio decoder type not supported");
+        M4OSA_TRACE1_0("Audio decoder type not supported");
+        return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Edit.c b/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
new file mode 100755
index 0000000..7bd1a99
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
@@ -0,0 +1,4130 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_Edit.c
+ * @brief    Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h"   /**< OSAL memory management */
+#include "M4OSA_Debug.h"    /**< OSAL debug management */
+#include "M4OSA_CharStar.h" /**< OSAL string management */
+
+#ifdef WIN32
+#include "string.h"         /**< for strcpy (Don't want to get dependencies
+                                 with M4OSA_String...) */
+
+#endif                      /* WIN32 */
+
+/************************************************************************/
+/* Static local functions                                               */
+/************************************************************************/
+static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
+    M4VSS3GPP_ClipSettings *pClip );
+static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
+    M4VSS3GPP_TransitionSettings *pTransition );
+static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
+    M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_ERR
+M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
+                                 M4OSA_Void *pOutputFile );
+static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
+    M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_ERR
+M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
+                                           M4OSA_UInt8 uiMasterClip );
+static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
+    M4VSS3GPP_InternalEditContext *pC );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetVersion()
+ * @brief    Get the VSS 3GPP version.
+ * @note    Can be called anytime. Do not need any context.
+ * @param    pVersionInfo        (OUT) Pointer to a version info structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_GetVersion( M4_VersionInfo *pVersionInfo )
+{
+    M4OSA_TRACE3_1("M4VSS3GPP_GetVersion called with pVersionInfo=0x%x",
+        pVersionInfo);
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pVersionInfo), M4ERR_PARAMETER,
+        "M4VSS3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
+
+    pVersionInfo->m_major = M4VSS_VERSION_MAJOR;
+    pVersionInfo->m_minor = M4VSS_VERSION_MINOR;
+    pVersionInfo->m_revision = M4VSS_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editInit()
+ * @brief    Initializes the VSS 3GPP edit operation (allocates an execution context).
+ * @note
+ * @param    pContext            (OUT) Pointer on the VSS 3GPP edit context to allocate
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editInit( M4VSS3GPP_EditContext *pContext,
+                             M4OSA_FileReadPointer *pFileReadPtrFct,
+                             M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+    M4VSS3GPP_InternalEditContext *pC;
+    M4OSA_ERR err;
+    M4OSA_UInt32 i;
+
+    M4OSA_TRACE3_3(
+        "M4VSS3GPP_editInit called with pContext=0x%x, \
+        pFileReadPtrFct=0x%x, pFileWritePtrFct=0x%x",
+        pContext, pFileReadPtrFct, pFileWritePtrFct);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editInit: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_editInit: pFileReadPtrFct is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_editInit: pFileWritePtrFct is M4OSA_NULL");
+
+    /**
+    * Allocate the VSS context and return it to the user */
+    pC = (M4VSS3GPP_InternalEditContext
+        *)M4OSA_malloc(sizeof(M4VSS3GPP_InternalEditContext),
+        M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_InternalContext");
+    *pContext = pC;
+        /* Inialization of context Variables */
+    M4OSA_memset((M4OSA_MemAddr8)pC, sizeof(M4VSS3GPP_InternalEditContext), 0);
+
+    if( M4OSA_NULL == pC )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editInit(): unable to allocate M4VSS3GPP_InternalContext,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+
+    /* Init the context. */
+    pC->pClipList = M4OSA_NULL;
+    pC->pTransitionList = M4OSA_NULL;
+    pC->pEffectsList = M4OSA_NULL;
+    pC->pActiveEffectsList = M4OSA_NULL;
+    pC->pActiveEffectsList1 = M4OSA_NULL;
+    pC->pC1 = M4OSA_NULL;
+    pC->pC2 = M4OSA_NULL;
+    pC->yuv1[0].pac_data = pC->yuv1[1].pac_data = pC->
+        yuv1[2].pac_data = M4OSA_NULL;
+    pC->yuv2[0].pac_data = pC->yuv2[1].pac_data = pC->
+        yuv2[2].pac_data = M4OSA_NULL;
+    pC->yuv3[0].pac_data = pC->yuv3[1].pac_data = pC->
+        yuv3[2].pac_data = M4OSA_NULL;
+    pC->yuv4[0].pac_data = pC->yuv4[1].pac_data = pC->
+        yuv4[2].pac_data = M4OSA_NULL;
+    pC->bClip1AtBeginCut = M4OSA_FALSE;
+    pC->bTransitionEffect = M4OSA_FALSE;
+    pC->bSupportSilence = M4OSA_FALSE;
+
+    /**
+    * Init PC->ewc members */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+    pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
+    pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+    pC->ewc.bActivateEmp = M4OSA_FALSE;
+    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+    pC->ewc.uiNbChannels = 1;
+    pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+    pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
+    pC->ewc.pSilenceFrameData = M4OSA_NULL;
+    pC->ewc.pEncContext = M4OSA_NULL;
+    pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+    pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+    pC->ewc.p3gpWriterContext = M4OSA_NULL;
+    /**
+    * Keep the OSAL file functions pointer set in our context */
+    pC->pOsaFileReadPtr = pFileReadPtrFct;
+    pC->pOsaFileWritPtr = pFileWritePtrFct;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    for ( i = 0; i < M4VD_kVideoType_NB; i++ )
+    {
+        pC->registeredExternalDecs[i].pDecoderInterface = M4OSA_NULL;
+        pC->registeredExternalDecs[i].pUserData = M4OSA_NULL;
+        pC->registeredExternalDecs[i].registered = M4OSA_FALSE;
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    for ( i = 0; i < M4VSS3GPP_kCodecType_NB; i++ )
+    {
+        pC->m_codecInterface[i] = M4OSA_NULL;
+    }
+    pC->pOMXUserData = M4OSA_NULL;
+#endif /* M4VSS_SUPPORT_OMX_CODECS */
+    /*
+    * Reset pointers for media and codecs interfaces */
+
+    err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /*
+    *  Call the media and codecs subscription module */
+    err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Update main state automaton */
+    pC->State = M4VSS3GPP_kEditState_CREATED;
+    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+    pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+
+    pC->bIsMMS = M4OSA_FALSE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editInit(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCreateClipSettings()
+ * @brief    Allows filling a clip settings structure with default values
+ *
+ * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ *                   pClipSettings->pFile      will be allocated in this function.
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   pFile               (IN) Clip file name
+ * @param   filePathSize        (IN) Clip path size (needed for UTF 16 conversion)
+ * @param    nbEffects           (IN) Nb of effect settings to allocate
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_editCreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
+                                 M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
+                                 M4OSA_UInt8 nbEffects )
+{
+    M4OSA_UInt8 uiFx;
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editCreateClipSettings called with pClipSettings=0x%p",
+        pClipSettings);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_editCreateClipSettings: pClipSettings is NULL");
+
+    /**
+    * Set the clip settings to default */
+    pClipSettings->pFile = M4OSA_NULL;        /**< no file */
+    pClipSettings->FileType =
+        M4VIDEOEDITING_kFileType_Unsupported; /**< undefined */
+
+    if( M4OSA_NULL != pFile )
+    {
+        //pClipSettings->pFile = (M4OSA_Char*) M4OSA_malloc(M4OSA_chrLength(pFile)+1, M4VSS3GPP,
+        // "pClipSettings->pFile");
+        /*FB: add clip path size because of utf 16 conversion*/
+        pClipSettings->pFile =
+            (M4OSA_Void *)M4OSA_malloc(filePathSize + 1, M4VSS3GPP,
+            (M4OSA_Char *)"pClipSettings->pFile");
+
+        if( M4OSA_NULL == pClipSettings->pFile )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCreateClipSettings : ERROR allocating filename");
+            return M4ERR_ALLOC;
+        }
+        //M4OSA_memcpy(pClipSettings->pFile, pFile, M4OSA_chrLength(pFile)+1);
+        /*FB: add clip path size because of utf 16 conversion*/
+        M4OSA_memcpy(pClipSettings->pFile, pFile, filePathSize + 1);
+    }
+
+    /*FB: add file path size to support UTF16 conversion*/
+    pClipSettings->filePathSize = filePathSize + 1;
+    /**/
+    pClipSettings->ClipProperties.bAnalysed = M4OSA_FALSE;
+    pClipSettings->ClipProperties.FileType = 0;
+    pClipSettings->ClipProperties.Version[0] = 0;
+    pClipSettings->ClipProperties.Version[1] = 0;
+    pClipSettings->ClipProperties.Version[2] = 0;
+    pClipSettings->ClipProperties.uiClipDuration = 0;
+
+    pClipSettings->uiBeginCutTime = 0; /**< no begin cut */
+    pClipSettings->uiEndCutTime = 0;   /**< no end cut */
+
+    /**
+    * Reset video characteristics */
+    pClipSettings->ClipProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+    pClipSettings->ClipProperties.uiClipVideoDuration = 0;
+    pClipSettings->ClipProperties.uiVideoBitrate = 0;
+    pClipSettings->ClipProperties.uiVideoMaxAuSize = 0;
+    pClipSettings->ClipProperties.uiVideoWidth = 0;
+    pClipSettings->ClipProperties.uiVideoHeight = 0;
+    pClipSettings->ClipProperties.uiVideoTimeScale = 0;
+    pClipSettings->ClipProperties.fAverageFrameRate = 0.0;
+    pClipSettings->ClipProperties.ProfileAndLevel =
+        M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+    pClipSettings->ClipProperties.uiH263level = 0;
+    pClipSettings->ClipProperties.uiVideoProfile = 0;
+    pClipSettings->ClipProperties.bMPEG4dataPartition = M4OSA_FALSE;
+    pClipSettings->ClipProperties.bMPEG4rvlc = M4OSA_FALSE;
+    pClipSettings->ClipProperties.bMPEG4resynchMarker = M4OSA_FALSE;
+
+    /**
+    * Reset audio characteristics */
+    pClipSettings->ClipProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+    pClipSettings->ClipProperties.uiClipAudioDuration = 0;
+    pClipSettings->ClipProperties.uiAudioBitrate = 0;
+    pClipSettings->ClipProperties.uiAudioMaxAuSize = 0;
+    pClipSettings->ClipProperties.uiNbChannels = 0;
+    pClipSettings->ClipProperties.uiSamplingFrequency = 0;
+    pClipSettings->ClipProperties.uiExtendedSamplingFrequency = 0;
+    pClipSettings->ClipProperties.uiDecodedPcmSize = 0;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editSetDefaultSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings()
+ * @brief    Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_editDuplicateClipSettings( M4VSS3GPP_ClipSettings *pClipSettingsDest,
+                                    M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+                                    M4OSA_Bool bCopyEffects )
+{
+    M4OSA_UInt8 uiFx;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editDuplicateClipSettings called with dest=0x%p src=0x%p",
+        pClipSettingsDest, pClipSettingsOrig);
+
+    /* Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
+        "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsDest is NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
+        "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsOrig is NULL");
+
+    /* Copy plain structure */
+    M4OSA_memcpy((M4OSA_MemAddr8)pClipSettingsDest,
+        (M4OSA_MemAddr8)pClipSettingsOrig, sizeof(M4VSS3GPP_ClipSettings));
+
+    /* Duplicate filename */
+    if( M4OSA_NULL != pClipSettingsOrig->pFile )
+    {
+        //pClipSettingsDest->pFile =
+        // (M4OSA_Char*) M4OSA_malloc(M4OSA_chrLength(pClipSettingsOrig->pFile)+1, M4VSS3GPP,
+        // "pClipSettingsDest->pFile");
+        /*FB: clip path size is needed for utf 16 conversion*/
+        /*FB 2008/10/16: bad allocation size which raises a crash*/
+        pClipSettingsDest->pFile =
+            (M4OSA_Char *)M4OSA_malloc(pClipSettingsOrig->filePathSize + 1,
+            M4VSS3GPP, (M4OSA_Char *)"pClipSettingsDest->pFile");
+
+        if( M4OSA_NULL == pClipSettingsDest->pFile )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editDuplicateClipSettings : ERROR allocating filename");
+            return M4ERR_ALLOC;
+        }
+        /*FB: clip path size is needed for utf 16 conversion*/
+        //M4OSA_memcpy(pClipSettingsDest->pFile, pClipSettingsOrig->pFile,
+        // M4OSA_chrLength(pClipSettingsOrig->pFile)+1);
+        /*FB 2008/10/16: bad allocation size which raises a crash*/
+        M4OSA_memcpy(pClipSettingsDest->pFile, pClipSettingsOrig->pFile,
+            pClipSettingsOrig->filePathSize/*+1*/);
+        ( (M4OSA_Char
+            *)pClipSettingsDest->pFile)[pClipSettingsOrig->filePathSize] = '\0';
+    }
+
+    /* Duplicate effects */
+#if 0
+
+    if( M4OSA_TRUE == bCopyEffects )
+    {
+        if( pClipSettingsOrig->nbEffects > 0 )
+        {
+            pClipSettingsDest->Effects = (M4VSS3GPP_EffectSettings
+                *)M4OSA_malloc(sizeof(M4VSS3GPP_EffectSettings)
+                * pClipSettingsOrig->nbEffects,
+                M4VSS3GPP, "pClipSettingsDest->Effects");
+
+            if( M4OSA_NULL == pClipSettingsDest->Effects )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editDuplicateClipSettings : ERROR allocating effects, nb=%lu",
+                    pClipSettingsOrig->nbEffects);
+                pClipSettingsDest->nbEffects = 0;
+                return M4ERR_ALLOC;
+            }
+
+            for ( uiFx = 0; uiFx < pClipSettingsOrig->nbEffects; uiFx++ )
+            {
+                /* Copy plain structure */
+                M4OSA_memcpy(
+                    (M4OSA_MemAddr8) &(pClipSettingsDest->Effects[uiFx]),
+                    (M4OSA_MemAddr8) &(pClipSettingsOrig->Effects[uiFx]),
+                    sizeof(M4VSS3GPP_EffectSettings));
+            }
+        }
+    }
+    else
+    {
+        pClipSettingsDest->nbEffects = 0;
+        pClipSettingsDest->Effects = M4OSA_NULL;
+    }
+
+#endif /* RC */
+    /* Return with no error */
+
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_editDuplicateClipSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editFreeClipSettings()
+ * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects).
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editFreeClipSettings(
+    M4VSS3GPP_ClipSettings *pClipSettings )
+{
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_editFreeClipSettings: pClipSettings is NULL");
+
+    /* free filename */
+    if( M4OSA_NULL != pClipSettings->pFile )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipSettings->pFile);
+        pClipSettings->pFile = M4OSA_NULL;
+    }
+
+    /* free effects settings */
+    /*    if(M4OSA_NULL != pClipSettings->Effects)
+    {
+    M4OSA_free((M4OSA_MemAddr32)pClipSettings->Effects);
+    pClipSettings->Effects = M4OSA_NULL;
+    pClipSettings->nbEffects = 0;
+    } RC */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editOpen()
+ * @brief     Set the VSS input and output files.
+ * @note      It opens the input file, but the output file may not be created yet.
+ * @param     pContext           (IN) VSS edit context
+ * @param     pSettings           (IN) Edit settings
+ * @return    M4NO_ERROR:       No error
+ * @return    M4ERR_PARAMETER:  At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
+ * @return    M4ERR_ALLOC:      There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editOpen( M4VSS3GPP_EditContext pContext,
+                             M4VSS3GPP_EditSettings *pSettings )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    M4OSA_ERR err;
+    M4OSA_Int32 i;
+    M4VIDEOEDITING_FileType outputFileType =
+        M4VIDEOEDITING_kFileType_Unsupported; /**< 3GPP or MP3 (we don't do AMR output) */
+    M4OSA_UInt32 uiC1duration, uiC2duration;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editOpen called with pContext=0x%x, pSettings=0x%x",
+        pContext, pSettings);
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pSettings is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings->pClipList), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pSettings->pClipList is M4OSA_NULL");
+    M4OSA_DEBUG_IF2(( pSettings->uiClipNumber > 1)
+        && (M4OSA_NULL == pSettings->pTransitionList), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pSettings->pTransitionList is M4OSA_NULL");
+
+    /**
+    * Check state automaton */
+    if( ( pC->State != M4VSS3GPP_kEditState_CREATED)
+        && (pC->State != M4VSS3GPP_kEditState_CLOSED) )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editOpen: State error (0x%x)! Returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /**
+    * Free any previously allocated internal settings list */
+    M4VSS3GPP_intFreeSettingsList(pC);
+
+    /**
+    * Copy the user settings in our context */
+    pC->uiClipNumber = pSettings->uiClipNumber;
+
+    /**
+    * Copy the clip list */
+    pC->pClipList =
+        (M4VSS3GPP_ClipSettings *)M4OSA_malloc(sizeof(M4VSS3GPP_ClipSettings)
+        * pC->uiClipNumber, M4VSS3GPP, (M4OSA_Char *)"pC->pClipList");
+
+    if( M4OSA_NULL == pC->pClipList )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pClipList,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    for ( i = 0; i < pSettings->uiClipNumber; i++ )
+    {
+        M4VSS3GPP_editDuplicateClipSettings(&(pC->pClipList[i]),
+            pSettings->pClipList[i], M4OSA_TRUE);
+    }
+
+    /**
+    * Copy effects list RC */
+
+    /*FB bug fix 19.03.2008 if the number of effects is 0 -> crash*/
+    if( pSettings->nbEffects > 0 )
+    {
+        pC->nbEffects = pSettings->nbEffects;
+        pC->pEffectsList = (M4VSS3GPP_EffectSettings
+            *)M4OSA_malloc(sizeof(M4VSS3GPP_EffectSettings) * pC->nbEffects,
+            M4VSS3GPP, (M4OSA_Char *)"pC->pEffectsList");
+
+        if( M4OSA_NULL == pC->pEffectsList )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editOpen: unable to allocate pC->pEffectsList, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        for ( i = 0; i < pC->nbEffects; i++ )
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8) &(pC->pEffectsList[i]),
+                (M4OSA_MemAddr8) &(pSettings->Effects[i]),
+                sizeof(M4VSS3GPP_EffectSettings));
+        }
+
+        /**
+        * Allocate active effects list RC */
+        pC->pActiveEffectsList =
+            (M4OSA_UInt8 *)M4OSA_malloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
+            M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
+
+        if( M4OSA_NULL == pC->pActiveEffectsList )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList,\
+                returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        /**
+         * Allocate active effects list */
+        pC->pActiveEffectsList1 =
+            (M4OSA_UInt8 *)M4OSA_malloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
+            M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
+        if (M4OSA_NULL == pC->pActiveEffectsList1)
+        {
+            M4OSA_TRACE1_0("M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList, \
+                           returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+    }
+    else
+    {
+        pC->nbEffects = 0;
+        pC->nbActiveEffects = 0;
+        pC->nbActiveEffects1 = 0;
+        pC->pEffectsList = M4OSA_NULL;
+        pC->pActiveEffectsList = M4OSA_NULL;
+        pC->pActiveEffectsList1 = M4OSA_NULL;
+    }
+
+    /**
+    * Test the clip analysis data, if it is not provided, analyse the clips by ourselves. */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAnalysed )
+        {
+            /**< Analysis not provided by the integrator */
+            err = M4VSS3GPP_editAnalyseClip(pC->pClipList[i].pFile,
+                pC->pClipList[i].FileType, &pC->pClipList[i].ClipProperties,
+                pC->pOsaFileReadPtr);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen: M4VSS3GPP_editAnalyseClip returns 0x%x!",
+                    err);
+                return err;
+            }
+        }
+    }
+
+    /**
+    * Check clip compatibility */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        /**
+        * Check all the clips are compatible with VSS 3GPP */
+        err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
+            &pC->pClipList[i].ClipProperties);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_2(
+                "M4VSS3GPP_editOpen:\
+                M4VSS3GPP_intCheckClipCompatibleWithVssEditing(%d) returns 0x%x!",
+                i, err);
+            return err;
+        }
+
+        /**
+        * Check the master clip versus all the other ones.
+        (including master clip with itself, else variables for master clip
+        are not properly setted) */
+        err = M4VSS3GPP_editCheckClipCompatibility(
+            &pC->pClipList[pSettings->uiMasterClip].ClipProperties,
+            &pC->pClipList[i].ClipProperties);
+        /* in case of warning regarding audio incompatibility, editing continues */
+        if( M4OSA_ERR_IS_ERROR(err) )
+        {
+            M4OSA_TRACE1_2(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_editCheckClipCompatibility(%d) returns 0x%x!",
+                i, err);
+            return err;
+        }
+    }
+
+    /* Search audio tracks that cannot be edited :
+    *   - delete all audio effects for the clip
+    *   - if master clip is editable let the transition
+    (bad track will be replaced later with silence)
+    *   - if master clip is not editable switch to a dummy transition (only copy/paste) */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAudioIsEditable )
+        {
+            M4OSA_UInt8 uiFx;
+
+            for ( uiFx = 0; uiFx < pC->nbEffects; uiFx++ )
+            {
+                pC->pEffectsList[uiFx].AudioEffectType
+                    = M4VSS3GPP_kAudioEffectType_None;
+            }
+
+            if( ( i < (pC->uiClipNumber - 1))
+                && (M4OSA_NULL != pSettings->pTransitionList[i])
+                && (M4OSA_FALSE == pC->pClipList[pSettings->
+                uiMasterClip].ClipProperties.bAudioIsEditable) )
+            {
+                pSettings->pTransitionList[i]->AudioTransitionType
+                    = M4VSS3GPP_kAudioTransitionType_None;
+            }
+        }
+    }
+
+    /**
+    * We add a transition of duration 0 at the end of the last clip.
+    * It will suppress a whole bunch a test latter in the processing... */
+    pC->pTransitionList = (M4VSS3GPP_TransitionSettings
+        *)M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings)
+        * (pC->uiClipNumber), M4VSS3GPP, (M4OSA_Char *)"pC->pTransitionList");
+
+    if( M4OSA_NULL == pC->pTransitionList )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pTransitionList,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**< copy transition settings */
+    for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8) &(pC->pTransitionList[i]),
+            (M4OSA_MemAddr8)pSettings->pTransitionList[i],
+            sizeof(M4VSS3GPP_TransitionSettings));
+    }
+
+    /**< We fill the last "dummy" transition */
+    pC->pTransitionList[pC->uiClipNumber - 1].uiTransitionDuration = 0;
+    pC->pTransitionList[pC->uiClipNumber
+        - 1].VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
+    pC->pTransitionList[pC->uiClipNumber
+        - 1].AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
+
+    /**
+    * Avoid weird clip settings */
+    for ( i = 0; i < pSettings->uiClipNumber; i++ )
+    {
+        err = M4VSS3GPP_intClipSettingsSanityCheck(&pC->pClipList[i]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
+    {
+        /**
+        * Maximum transition duration between clip n and clip n+1 is the duration
+        * of the shortest clip */
+        if( 0 == pC->pClipList[i].uiEndCutTime )
+        {
+            uiC1duration = pC->pClipList[i].ClipProperties.uiClipVideoDuration;
+        }
+        else
+        {
+            /**< duration of clip n is the end cut time */
+            uiC1duration = pC->pClipList[i].uiEndCutTime;
+        }
+
+        /**< Substract begin cut */
+        uiC1duration -= pC->pClipList[i].uiBeginCutTime;
+
+        /**< Check that the transition is shorter than clip n */
+        if( pC->pTransitionList[i].uiTransitionDuration > uiC1duration )
+        {
+            pC->pTransitionList[i].uiTransitionDuration = uiC1duration - 1;
+        }
+
+        if( 0 == pC->pClipList[i + 1].uiEndCutTime )
+        {
+            uiC2duration =
+                pC->pClipList[i + 1].ClipProperties.uiClipVideoDuration;
+        }
+        else
+        {
+            /**< duration of clip n+1 is the end cut time */
+            uiC2duration = pC->pClipList[i + 1].uiEndCutTime;
+        }
+
+        /**< Substract begin cut */
+        uiC2duration -= pC->pClipList[i + 1].uiBeginCutTime;
+
+        /**< Check that the transition is shorter than clip n+1 */
+        if( pC->pTransitionList[i].uiTransitionDuration > uiC2duration )
+        {
+            pC->pTransitionList[i].uiTransitionDuration = uiC2duration - 1;
+        }
+
+        /**
+        * Avoid weird transition settings */
+        err =
+            M4VSS3GPP_intTransitionSettingsSanityCheck(&pC->pTransitionList[i]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Check that two transitions are not overlapping
+          (no overlapping possible for first clip) */
+        if( i > 0 )
+        {
+            /**
+            * There is a transition overlap if the sum of the duration of
+              two consecutive transitions
+            * is higher than the duration of the clip in-between. */
+            if( ( pC->pTransitionList[i - 1].uiTransitionDuration
+                + pC->pTransitionList[i].uiTransitionDuration) >= uiC1duration )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen: Overlapping transitions on clip %d,\
+                    returning M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS",
+                    i);
+                return M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS;
+            }
+        }
+    }
+
+    /**
+    * Output clip duration */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        /**
+        * Compute the sum of the clip duration */
+        if( 0 == pC->pClipList[i].uiEndCutTime )
+        {
+            pC->ewc.iOutputDuration +=
+                pC->
+                pClipList[
+                    i].ClipProperties.
+                        uiClipVideoDuration; /* Only video track duration is important to
+                                             avoid deviation if audio track is longer */
+        }
+        else
+        {
+            pC->ewc.iOutputDuration +=
+                pC->pClipList[i].uiEndCutTime; /**< Add end cut */
+        }
+
+        pC->ewc.iOutputDuration -=
+            pC->pClipList[i].uiBeginCutTime; /**< Remove begin cut */
+
+        /**
+        * Remove the duration of the transition (it is counted twice) */
+        pC->ewc.iOutputDuration -= pC->pTransitionList[i].uiTransitionDuration;
+    }
+
+    /**
+    * Copy the video properties of the master clip to the output properties */
+    pC->ewc.uiVideoWidth =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiVideoWidth;
+    pC->ewc.uiVideoHeight =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiVideoHeight;
+    pC->ewc.uiVideoTimeScale =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiVideoTimeScale;
+    pC->ewc.bVideoDataPartitioning = pC->pClipList[pSettings->
+        uiMasterClip].ClipProperties.bMPEG4dataPartition;
+
+    switch( pC->pClipList[pSettings->uiMasterClip].ClipProperties.VideoStreamType )
+    {
+        case M4VIDEOEDITING_kH263:
+            pC->ewc.VideoStreamType = M4SYS_kH263;
+            break;
+
+        case M4VIDEOEDITING_kMPEG4_EMP:
+            pC->ewc.bActivateEmp = M4OSA_TRUE; /* no break */
+
+        case M4VIDEOEDITING_kMPEG4:
+            pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
+            break;
+
+        case M4VIDEOEDITING_kH264:
+            pC->ewc.VideoStreamType = M4SYS_kH264;
+            break;
+
+        default:
+            pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+            break;
+    }
+
+    /**
+    * Copy the audio properties of the master clip to the output properties */
+    pC->ewc.uiNbChannels =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiNbChannels;
+    pC->ewc.uiAudioBitrate =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiAudioBitrate;
+    pC->ewc.uiSamplingFrequency = pC->pClipList[pSettings->
+        uiMasterClip].ClipProperties.uiSamplingFrequency;
+    pC->ewc.uiSilencePcmSize =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiDecodedPcmSize;
+    pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+    switch( pC->pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
+    {
+        case M4VIDEOEDITING_kAMR_NB:
+            pC->ewc.AudioStreamType = M4SYS_kAMR;
+            pC->ewc.pSilenceFrameData =
+                (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+            pC->ewc.uiSilenceFrameSize =
+                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+            pC->ewc.iSilenceFrameDuration =
+                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+            pC->bSupportSilence = M4OSA_TRUE;
+            break;
+
+        case M4VIDEOEDITING_kAAC:
+        case M4VIDEOEDITING_kAACplus:
+        case M4VIDEOEDITING_keAACplus:
+            pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+            if( pC->ewc.uiNbChannels == 1 )
+            {
+                pC->ewc.pSilenceFrameData =
+                    (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                pC->ewc.uiSilenceFrameSize = M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                pC->bSupportSilence = M4OSA_TRUE;
+            }
+            else
+            {
+                pC->ewc.pSilenceFrameData =
+                    (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                pC->ewc.uiSilenceFrameSize =
+                    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                pC->bSupportSilence = M4OSA_TRUE;
+            }
+            pC->ewc.iSilenceFrameDuration =
+                1024; /* AAC is always 1024/Freq sample duration */
+            break;
+
+        case M4VIDEOEDITING_kMP3:
+            pC->ewc.AudioStreamType = M4SYS_kMP3;
+            pC->ewc.pSilenceFrameData = M4OSA_NULL;
+            pC->ewc.uiSilenceFrameSize = 0;
+            pC->ewc.iSilenceFrameDuration = 0;
+            /* Special case, mp3 core reader return a time in ms */
+            pC->ewc.scale_audio = 1.0;
+            break;
+
+        case M4VIDEOEDITING_kEVRC:
+            pC->ewc.AudioStreamType = M4SYS_kEVRC;
+            pC->ewc.pSilenceFrameData = M4OSA_NULL;
+            pC->ewc.uiSilenceFrameSize = 0;
+            pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
+                                             (makes it easier to factorize amr and evrc code) */
+            break;
+
+        default:
+            pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+            break;
+    }
+
+    /**
+    * We produce a 3gpp file, unless it is mp3 */
+    if( M4VIDEOEDITING_kMP3 == pC->
+        pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
+        outputFileType = M4VIDEOEDITING_kFileType_MP3;
+    else
+        outputFileType = M4VIDEOEDITING_kFileType_3GPP;
+
+    /**
+    * Beware, a null duration would lead to a divide by zero error (better safe than sorry...) */
+    if( 0 == pC->ewc.iOutputDuration )
+    {
+        pC->ewc.iOutputDuration = 1;
+    }
+
+    /**
+    * Open first clip */
+    pC->uiCurrentClip = 0;
+
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.dInputVidCts  = 0.0;
+    pC->ewc.dOutputVidCts = 0.0;
+    pC->ewc.dATo = 0.0;
+
+    err = M4VSS3GPP_intSwitchToNextClip(pC);
+    /* RC: to know when a file has been processed */
+    if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editOpen: M4VSS3GPP_intSwitchToNextClip() returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Do the video stuff in 3GPP Audio/Video case */
+    if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
+    {
+        /**
+        * Compute the Decoder Specific Info for the output video and audio streams */
+        err = M4VSS3GPP_intComputeOutputVideoAndAudioDsi(pC,
+            pSettings->uiMasterClip);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intComputeOutputVideoAndAudioDsi() returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Compute the time increment for the transition file */
+        switch( pSettings->videoFrameRate )
+        {
+            case M4VIDEOEDITING_k5_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 5.0;
+                break;
+
+            case M4VIDEOEDITING_k7_5_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 7.5;
+                break;
+
+            case M4VIDEOEDITING_k10_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 10.0;
+                break;
+
+            case M4VIDEOEDITING_k12_5_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 12.5;
+                break;
+
+            case M4VIDEOEDITING_k15_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 15.0;
+                break;
+
+            case M4VIDEOEDITING_k20_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 20.0;
+                break;
+
+            case M4VIDEOEDITING_k25_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 25.0;
+                break;
+
+            case M4VIDEOEDITING_k30_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 30.0;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen(): invalid videoFrameRate (0x%x),\
+                    returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE",
+                    pSettings->videoFrameRate);
+                return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
+        }
+
+        if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType )
+        {
+            M4OSA_UInt32 uiAlpha;
+            /**
+            * MPEG-4 case.
+            * Time scale of the transition encoder must be the same than the
+            * timescale of the input files.
+            * So the frame duration must be compatible with this time scale,
+            * but without beeing too short.
+            * For that, we must compute alpha (integer) so that:
+            *             (alpha x 1000)/EncoderTimeScale > MinFrameDuration
+            **/
+
+            uiAlpha = (M4OSA_UInt32)(( pC->dOutputFrameDuration
+                * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
+
+            if( uiAlpha > 0 )
+            {
+                pC->dOutputFrameDuration =
+                    ( uiAlpha * 1000.0) / pC->ewc.uiVideoTimeScale;
+            }
+        }
+        else if( M4SYS_kH263 == pC->ewc.VideoStreamType )
+        {
+            switch( pSettings->videoFrameRate )
+            {
+                case M4VIDEOEDITING_k12_5_FPS:
+                case M4VIDEOEDITING_k20_FPS:
+                case M4VIDEOEDITING_k25_FPS:
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_editOpen(): invalid videoFrameRate for H263,\
+                        returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
+                    return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
+               default:
+                  break;
+            }
+        }
+    }
+
+    /**
+    * Create the MP3 output file */
+    if( M4VIDEOEDITING_kFileType_MP3 == outputFileType )
+    {
+        M4READER_Buffer mp3tagBuffer;
+        err = M4VSS3GPP_intCreateMP3OutputFile(pC, pSettings->pOutputFile);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intCreateMP3OutputFile returns 0x%x",
+                err);
+            return err;
+        }
+
+        /* The ID3v2 tag could be at any place in the mp3 file                             */
+        /* The mp3 reader only checks few bytes in the beginning of
+           stream to look for a ID3v2 tag  */
+        /* It means that if the ID3v2 tag is not at the beginning of the file the reader do
+        as there is no these metadata */
+
+        /* Retrieve the data of the ID3v2 Tag */
+        err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
+            pC->pC1->pReaderContext, M4READER_kOptionID_Mp3Id3v2Tag,
+            (M4OSA_DataOption) &mp3tagBuffer);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_editOpen: M4MP3R_getOption returns 0x%x",
+                err);
+            return err;
+        }
+
+        /* Write the data of the ID3v2 Tag in the output file */
+        if( 0 != mp3tagBuffer.m_uiBufferSize )
+        {
+            err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+                (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
+
+            /**
+            * Free before the error checking anyway */
+            M4OSA_free((M4OSA_MemAddr32)mp3tagBuffer.m_pData);
+
+            /**
+            * Error checking */
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen: WriteData(ID3v2Tag) returns 0x%x",
+                    err);
+                return err;
+            }
+
+            mp3tagBuffer.m_uiBufferSize = 0;
+            mp3tagBuffer.m_pData = M4OSA_NULL;
+        }
+    }
+    /**
+    * Create the 3GPP output file */
+    else if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
+    {
+        /* Compute an average bitrate from mixed bitrates of the input clips */
+        M4VSS3GPP_intComputeOutputAverageVideoBitrate(pC);
+
+        /**
+        * 11/12/2008 CR3283 MMS use case in VideoArtist: Set max output file size if needed */
+        if( pC->bIsMMS == M4OSA_TRUE )
+        {
+            err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+                pC->pOsaFileWritPtr, pSettings->pOutputFile,
+                pC->pOsaFileReadPtr, pSettings->pTemporaryFile,
+                pSettings->xVSS.outputFileSize);
+        }
+        else
+        {
+            err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+                pC->pOsaFileWritPtr, pSettings->pOutputFile,
+                pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
+        }
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intCreate3GPPOutputFile returns 0x%x",
+                err);
+            return err;
+        }
+    }
+    /**
+    * Default error case */
+    else
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editOpen: invalid outputFileType = 0x%x,\
+            returning M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR",
+            outputFileType);
+        return
+            M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR; /**< this is an internal error code
+                                                  unknown to the user */
+    }
+
+    /**
+    * Initialize state */
+    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+    {
+        /**
+        * In the MP3 case we use a special audio state */
+        pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
+    }
+    else
+    {
+        /**
+        * We start with the video processing */
+        pC->State = M4VSS3GPP_kEditState_VIDEO;
+    }
+
+    /**
+    * Initialize state.
+    * The first clip is independant to the "virtual previous clips",
+    * so it's like if we where in Read/Write mode before it. */
+    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+    pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editOpen(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editStep()
+ * @brief    Perform one step of editing.
+ * @note
+ * @param     pContext           (IN) VSS 3GPP edit context
+ * @param     pProgress          (OUT) Progress percentage (0 to 100) of the editing operation
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:   pContext is M4OSA_NULL (debug only)
+ * @return    M4ERR_STATE:       VSS 3GPP is not in an appropriate state for this
+ *                               function to be called
+ * @return    M4VSS3GPP_WAR_EDITING_DONE: Edition is done, user should now call
+ *            M4VSS3GPP_editClose()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editStep( M4VSS3GPP_EditContext pContext,
+                             M4OSA_UInt8 *pProgress )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_UInt32 uiProgressAudio, uiProgressVideo, uiProgress;
+    M4OSA_ERR err;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_editStep called with pContext=0x%x", pContext);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editStep: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pProgress), M4ERR_PARAMETER,
+        "M4VSS3GPP_editStep: pProgress is M4OSA_NULL");
+
+    /**
+    * Check state automaton and select correct processing */
+    switch( pC->State )
+    {
+        case M4VSS3GPP_kEditState_VIDEO:
+            err = M4VSS3GPP_intEditStepVideo(pC);
+            break;
+
+        case M4VSS3GPP_kEditState_AUDIO:
+            err = M4VSS3GPP_intEditStepAudio(pC);
+            break;
+
+        case M4VSS3GPP_kEditState_MP3:
+            err = M4VSS3GPP_intEditStepMP3(pC);
+            break;
+
+        case M4VSS3GPP_kEditState_MP3_JUMP:
+            err = M4VSS3GPP_intEditJumpMP3(pC);
+            break;
+
+        default:
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editStep(): invalid internal state (0x%x), returning M4ERR_STATE");
+            return M4ERR_STATE;
+    }
+
+    /**
+    * Compute progress.
+    * We do the computing with 32bits precision because in some (very) extreme case, we may get
+    * values higher than 256 (...) */
+    uiProgressAudio =
+        ( (M4OSA_UInt32)(pC->ewc.dATo * 100)) / pC->ewc.iOutputDuration;
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    uiProgressVideo = ((M4OSA_UInt32)(pC->ewc.dInputVidCts * 100)) / pC->ewc.iOutputDuration;
+
+    uiProgress = uiProgressAudio + uiProgressVideo;
+
+    if( ( pC->ewc.AudioStreamType != M4SYS_kAudioUnknown)
+        && (pC->ewc.VideoStreamType != M4SYS_kVideoUnknown) )
+        uiProgress /= 2;
+
+    /**
+    * Sanity check */
+    if( uiProgress > 100 )
+    {
+        *pProgress = 100;
+    }
+    else
+    {
+        *pProgress = (M4OSA_UInt8)uiProgress;
+    }
+
+    /**
+    * Return the error */
+    M4OSA_TRACE3_1("M4VSS3GPP_editStep(): returning 0x%x", err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editClose()
+ * @brief    Finish the VSS edit operation.
+ * @note    The output 3GPP file is ready to be played after this call
+ * @param    pContext           (IN) VSS edit context
+ * @return    M4NO_ERROR:       No error
+ * @return    M4ERR_PARAMETER:  pContext is M4OSA_NULL (debug only)
+ * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editClose( M4VSS3GPP_EditContext pContext )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err;
+    M4OSA_ERR returnedError = M4NO_ERROR;
+    M4OSA_UInt32 lastCTS;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_editClose called with pContext=0x%x", pContext);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editClose: pContext is M4OSA_NULL");
+
+    /**
+    * Check state automaton.
+    * In "theory", we should not authorize closing if we are in CREATED state.
+    * But in practice, in case the opening failed, it may have been partially done.
+    * In that case we have to free some opened ressources by calling Close. */
+    if( M4VSS3GPP_kEditState_CLOSED == pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editClose: Wrong state (0x%x), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /**
+    * There may be an encoder to destroy */
+    err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editClose: M4VSS3GPP_editDestroyVideoEncoder() returns 0x%x!",
+            err);
+        /**< We do not return the error here because we still have stuff to free */
+        returnedError = err;
+    }
+
+    /**
+    * Close the output file */
+    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+    {
+        /**
+        * MP3 case */
+        if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+        {
+            err = pC->pOsaFileWritPtr->closeWrite(pC->ewc.p3gpWriterContext);
+            pC->ewc.p3gpWriterContext = M4OSA_NULL;
+        }
+    }
+    else
+    {
+        /**
+        * Close the output 3GPP clip, if it has been opened */
+        if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+        {
+            /* Update last Video CTS */
+            lastCTS = pC->ewc.iOutputDuration;
+
+            err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
+                pC->ewc.p3gpWriterContext,
+                (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editClose: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+                    err);
+            }
+
+            err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
+                pC->ewc.p3gpWriterContext);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editClose: pFctCloseWrite(OUT) returns 0x%x!",
+                    err);
+                /**< We do not return the error here because we still have stuff to free */
+                if( M4NO_ERROR
+                    == returnedError ) /**< we return the first error that happened */
+                {
+                    returnedError = err;
+                }
+            }
+            pC->ewc.p3gpWriterContext = M4OSA_NULL;
+        }
+    }
+
+    /**
+    * Free the output video DSI, if it has been created */
+    if( M4OSA_NULL != pC->ewc.pVideoOutputDsi )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->ewc.pVideoOutputDsi);
+        pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+    }
+
+    /**
+    * Free the output audio DSI, if it has been created */
+    if( M4OSA_NULL != pC->ewc.pAudioOutputDsi )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->ewc.pAudioOutputDsi);
+        pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+    }
+
+    /**
+    * Close clip1, if needed */
+    if( M4OSA_NULL != pC->pC1 )
+    {
+        err = M4VSS3GPP_intClipCleanUp(pC->pC1);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+            if( M4NO_ERROR
+                == returnedError ) /**< we return the first error that happened */
+            {
+                returnedError = err;
+            }
+        }
+        pC->pC1 = M4OSA_NULL;
+    }
+
+    /**
+    * Close clip2, if needed */
+    if( M4OSA_NULL != pC->pC2 )
+    {
+        err = M4VSS3GPP_intClipCleanUp(pC->pC2);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C2) returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+            if( M4NO_ERROR
+                == returnedError ) /**< we return the first error that happened */
+            {
+                returnedError = err;
+            }
+        }
+        pC->pC2 = M4OSA_NULL;
+    }
+
+    /**
+    * Free the temporary YUV planes */
+    if( M4OSA_NULL != pC->yuv1[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv1[0].pac_data);
+        pC->yuv1[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv1[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv1[1].pac_data);
+        pC->yuv1[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv1[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv1[2].pac_data);
+        pC->yuv1[2].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv2[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv2[0].pac_data);
+        pC->yuv2[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv2[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv2[1].pac_data);
+        pC->yuv2[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv2[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv2[2].pac_data);
+        pC->yuv2[2].pac_data = M4OSA_NULL;
+    }
+
+    /* RC */
+    if( M4OSA_NULL != pC->yuv3[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv3[0].pac_data);
+        pC->yuv3[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv3[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv3[1].pac_data);
+        pC->yuv3[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv3[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv3[2].pac_data);
+        pC->yuv3[2].pac_data = M4OSA_NULL;
+    }
+
+    /* RC */
+    if( M4OSA_NULL != pC->yuv4[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv4[0].pac_data);
+        pC->yuv4[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv4[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv4[1].pac_data);
+        pC->yuv4[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv4[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv4[2].pac_data);
+        pC->yuv4[2].pac_data = M4OSA_NULL;
+    }
+
+    /**
+    * RC Free effects list */
+    if( pC->pEffectsList != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pEffectsList);
+        pC->pEffectsList = M4OSA_NULL;
+    }
+
+    /**
+    * RC Free active effects list */
+    if( pC->pActiveEffectsList != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pActiveEffectsList);
+        pC->pActiveEffectsList = M4OSA_NULL;
+    }
+    /**
+     *  Free active effects list */
+    if(pC->pActiveEffectsList1 != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pActiveEffectsList1);
+        pC->pActiveEffectsList1 = M4OSA_NULL;
+    }
+    /**
+    * Update state automaton */
+    pC->State = M4VSS3GPP_kEditState_CLOSED;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_1("M4VSS3GPP_editClose(): returning 0x%x", returnedError);
+    return returnedError;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCleanUp()
+ * @brief    Free all resources used by the VSS edit operation.
+ * @note    The context is no more valid after this call
+ * @param    pContext            (IN) VSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCleanUp( M4VSS3GPP_EditContext pContext )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_editCleanUp called with pContext=0x%x", pContext);
+
+    /**
+    *    Check input parameter */
+    if( M4OSA_NULL == pContext )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editCleanUp(): pContext is M4OSA_NULL, returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    /**
+    * Close, if needed.
+    * In "theory", we should not close if we are in CREATED state.
+    * But in practice, in case the opening failed, it may have been partially done.
+    * In that case we have to free some opened ressources by calling Close. */
+    if( M4VSS3GPP_kEditState_CLOSED != pC->State )
+    {
+        M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): calling M4VSS3GPP_editClose");
+        err = M4VSS3GPP_editClose(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCleanUp(): M4VSS3GPP_editClose returns 0x%x",
+                err);
+        }
+    }
+
+    /**
+    * Free the video encoder dummy AU */
+    if( M4OSA_NULL != pC->ewc.pDummyAuBuffer )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->ewc.pDummyAuBuffer);
+        pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+    }
+
+    /**
+    * Free the Audio encoder context */
+    if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
+    {
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the shells interfaces */
+    M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
+
+    /**
+    * Free the settings copied in the internal context */
+    M4VSS3GPP_intFreeSettingsList(pC);
+
+    /**
+    * Finally, Free context */
+    M4OSA_free((M4OSA_MemAddr32)pC);
+    pC = M4OSA_NULL;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR
+M4VSS3GPP_editRegisterExternalVideoDecoder( M4VSS3GPP_EditContext pContext,
+                                           M4VD_VideoType decoderType,
+                                           M4VD_Interface *pDecoderInterface,
+                                           M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    /* Here the situation is a bit special: we need to record the registrations that are made,
+    so that we can replay them for each clip we create. */
+
+    if( decoderType >= M4VD_kVideoType_NB )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    pC->registeredExternalDecs[decoderType].pDecoderInterface
+        = pDecoderInterface;
+    pC->registeredExternalDecs[decoderType].pUserData = pUserData;
+    pC->registeredExternalDecs[decoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW decoder that may already have been registered for this type;
+    this is normal.*/
+
+    return M4NO_ERROR;
+
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+}
+
+M4OSA_ERR
+M4VSS3GPP_editRegisterExternalVideoEncoder( M4VSS3GPP_EditContext pContext,
+                                           M4VE_EncoderType encoderType,
+                                           M4VE_Interface *pEncoderInterface,
+                                           M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_ENCODERS
+
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4ENCODER_GlobalInterface *shellInterface;
+    M4ENCODER_Format nativeType;
+
+    switch( encoderType )
+    {
+        case M4VE_kH263VideoEnc:
+            err = M4EGE_H263_getInterfaces(&nativeType, &shellInterface,
+                M4ENCODER_OPEN_ADVANCED);
+
+            break;
+
+        case M4VE_kMpeg4VideoEnc:
+            err = M4EGE_MPEG4_getInterfaces(&nativeType, &shellInterface,
+                M4ENCODER_OPEN_ADVANCED);
+            break;
+
+        case M4VE_kH264VideoEnc:
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editRegisterExternalVideoEncoder:\
+                H264 encoder type not implemented yet");
+            return M4ERR_NOT_IMPLEMENTED;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editRegisterExternalVideoEncoder: unknown encoderType %d",
+                encoderType);
+            return M4ERR_PARAMETER;
+            break;
+    }
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editRegisterExternalVideoEncoder:\
+            M4EGE_getInterface failed with error 0x%08X",
+            err);
+        return err;
+    }
+
+    err = M4VSS3GPP_registerVideoEncoder(&(pC->ShellAPI), nativeType,
+        shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editRegisterExternalVideoEncoder:\
+            M4VSS3GPP_registerVideoEncoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellInterface);
+        return err;
+    }
+
+    pC->ShellAPI.pVideoEncoderExternalAPITable[nativeType] = pEncoderInterface;
+    pC->ShellAPI.pVideoEncoderUserDataTable[nativeType] = pUserData;
+
+    return M4NO_ERROR;
+
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+#ifdef WIN32
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetErrorMessage()
+ * @brief    Return a string describing the given error code
+ * @note    The input string must be already allocated (and long enough!)
+ * @param    err                (IN) Error code to get the description from
+ * @param    sMessage        (IN/OUT) Allocated string in which the description will be copied
+ * @return    M4NO_ERROR:        Input error is from the VSS3GPP module
+ * @return    M4ERR_PARAMETER:Input error is not from the VSS3GPP module
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_GetErrorMessage( M4OSA_ERR err, M4OSA_Char *sMessage )
+{
+    switch( err )
+    {
+        case M4VSS3GPP_WAR_EDITING_DONE:
+            strcpy(sMessage, "M4VSS3GPP_WAR_EDITING_DONE");
+            break;
+
+        case M4VSS3GPP_WAR_END_OF_AUDIO_MIXING:
+            strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_AUDIO_MIXING");
+            break;
+
+        case M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE:
+            strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_FILE_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_FILE_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_EFFECT_KIND:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_EFFECT_KIND");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
+            break;
+
+        case M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL");
+            break;
+
+        case M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL");
+            break;
+
+        case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION:
+            strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION");
+            break;
+
+        case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT");
+            break;
+
+        case M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS:
+            strcpy(sMessage, "M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_3GPP_FILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_3GPP_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU");
+            break;
+
+        case M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR:
+            strcpy(sMessage, "M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
+            break;
+
+        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE");
+            break;
+
+        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS:
+            strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS");
+            break;
+
+        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY:
+            strcpy(sMessage,
+                "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY");
+            break;
+
+        case M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO:
+            strcpy(sMessage, "M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
+            break;
+
+        case M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION:
+            strcpy(sMessage, "M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
+            break;
+
+        case M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
+            break;
+
+        case M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED");
+            break;
+
+        case M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK");
+            break;
+
+        case M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
+            break;
+
+        case M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP:
+            strcpy(sMessage, "M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP");
+            break;
+
+        case M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
+            break;
+
+        case M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
+            break;
+
+        default: /**< Not a VSS3GPP error */
+            strcpy(sMessage, "");
+            return M4ERR_PARAMETER;
+    }
+    return M4NO_ERROR;
+}
+
+#endif /* WIN32 */
+
+/********************************************************/
+/********************************************************/
+/********************************************************/
+/****************   STATIC FUNCTIONS   ******************/
+/********************************************************/
+/********************************************************/
+/********************************************************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck()
+ * @brief    Simplify the given clip settings
+ * @note    This function may modify the given structure
+ * @param   pClip    (IN/OUT) Clip settings
+ * @return    M4NO_ERROR:            No error
+ * @return    M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
+    M4VSS3GPP_ClipSettings *pClip )
+{
+    M4OSA_UInt8 uiFx;
+    M4OSA_UInt32
+        uiClipActualDuration; /**< the clip duration once the cuts are done */
+    M4OSA_UInt32 uiDuration;
+    M4VSS3GPP_EffectSettings *pFx;
+
+    /**
+    * If begin cut is too far, return an error */
+    uiDuration = pClip->ClipProperties.uiClipDuration;
+
+    if( pClip->uiBeginCutTime > uiDuration )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
+            returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION",
+            pClip->uiBeginCutTime, uiDuration);
+        return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION;
+    }
+
+    /**
+    * If end cut is too far, set to zero (it means no end cut) */
+    if( pClip->uiEndCutTime > uiDuration )
+    {
+        pClip->uiEndCutTime = 0;
+    }
+
+    /**
+    * Compute actual clip duration (once cuts are done) */
+    if( 0 == pClip->uiEndCutTime )
+    {
+        /**
+        * No end cut */
+        uiClipActualDuration = uiDuration - pClip->uiBeginCutTime;
+    }
+    else
+    {
+        if( pClip->uiBeginCutTime >= pClip->uiEndCutTime )
+        {
+            M4OSA_TRACE1_2(
+                "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
+                returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT",
+                pClip->uiBeginCutTime, pClip->uiEndCutTime);
+            return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT;
+        }
+        uiClipActualDuration = pClip->uiEndCutTime - pClip->uiBeginCutTime;
+    }
+
+    if( M4VIDEOEDITING_kMP3 != pClip->ClipProperties.AudioStreamType )
+    {
+#if 0 /*RC*/
+        /**
+        * Check the three effects */
+
+        for ( uiFx = 0; uiFx < pClip->nbEffects; uiFx++ )
+        {
+            pFx = &(pClip->Effects[uiFx]); /**< shortcut */
+
+            /**
+            * No effect cases */
+            if( 0 == pFx->uiDuration )
+            {
+                pFx->VideoEffectType = M4VSS3GPP_kVideoEffectType_None;
+                pFx->AudioEffectType = M4VSS3GPP_kAudioEffectType_None;
+            }
+            else if( ( M4VSS3GPP_kVideoEffectType_None == pFx->VideoEffectType)
+                && (M4VSS3GPP_kAudioEffectType_None == pFx->AudioEffectType) )
+            {
+                pFx->uiStartTime = 0;
+                pFx->uiDuration = 0;
+            }
+
+            /**
+            * We convert all the effects into middle effects, computing the corresponding
+            * start time and duration */
+            if( M4VSS3GPP_kEffectKind_Begin == pFx->EffectKind )
+            {
+                pFx->uiStartTime = 0;
+            }
+            else if( M4VSS3GPP_kEffectKind_End == pFx->EffectKind )
+            {
+                /**
+                * Duration sanity check */
+                if( pFx->uiDuration > uiClipActualDuration )
+                {
+                    pFx->uiDuration = uiClipActualDuration;
+                }
+                /**
+                * Start time computing */
+                pFx->uiStartTime = uiClipActualDuration - pFx->uiDuration;
+            }
+            else if( M4VSS3GPP_kEffectKind_Middle == pFx->EffectKind )
+            {
+                /**
+                * Duration sanity check */
+                if( pFx->uiDuration + pFx->uiStartTime > uiClipActualDuration )
+                {
+                    pFx->uiDuration = uiClipActualDuration - pFx->uiStartTime;
+                }
+            }
+            else
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipSettingsSanityCheck: unknown effect kind (0x%x),\
+                    returning M4VSS3GPP_ERR_INVALID_EFFECT_KIND",
+                    pFx->EffectKind);
+                return M4VSS3GPP_ERR_INVALID_EFFECT_KIND;
+            }
+
+            /**
+            * Check external effect function is set */
+            if( ( pFx->VideoEffectType >= M4VSS3GPP_kVideoEffectType_External)
+                && (M4OSA_NULL == pFx->ExtVideoEffectFct) )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intClipSettingsSanityCheck:\
+                    returning M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL");
+                return M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL;
+            }
+        }
+
+#endif
+
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck()
+ * @brief    Simplify the given transition settings
+ * @note     This function may modify the given structure
+ * @param    pTransition    (IN/OUT) Transition settings
+ * @return    M4NO_ERROR:            No error
+ * @return    M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
+    M4VSS3GPP_TransitionSettings *pTransition )
+{
+    /**
+    * No transition */
+    if( 0 == pTransition->uiTransitionDuration )
+    {
+        pTransition->VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
+        pTransition->AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
+    }
+    else if( ( M4VSS3GPP_kVideoTransitionType_None
+        == pTransition->VideoTransitionType)
+        && (M4VSS3GPP_kAudioTransitionType_None
+        == pTransition->AudioTransitionType) )
+    {
+        pTransition->uiTransitionDuration = 0;
+    }
+
+    /**
+    * Check external transition function is set */
+    if( ( pTransition->VideoTransitionType
+        >= M4VSS3GPP_kVideoTransitionType_External)
+        && (M4OSA_NULL == pTransition->ExtVideoTransitionFct) )
+    {
+        return M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL;
+    }
+
+    /**
+    * Set minimal transition duration */
+    if( ( pTransition->uiTransitionDuration > 0)
+        && (pTransition->uiTransitionDuration
+        < M4VSS3GPP_MINIMAL_TRANSITION_DURATION) )
+    {
+        pTransition->uiTransitionDuration =
+            M4VSS3GPP_MINIMAL_TRANSITION_DURATION;
+    }
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intFreeSettingsList()
+ * @brief    Free the settings copied in the internal context
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_UInt32 i;
+
+    /**
+    * Free the settings list */
+    if( M4OSA_NULL != pC->pClipList )
+    {
+        for ( i = 0; i < pC->uiClipNumber; i++ )
+        {
+            M4VSS3GPP_editFreeClipSettings(&(pC->pClipList[i]));
+        }
+
+        M4OSA_free((M4OSA_MemAddr32)pC->pClipList);
+        pC->pClipList = M4OSA_NULL;
+    }
+
+    /**
+    * Free the transition list */
+    if( M4OSA_NULL != pC->pTransitionList )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pTransitionList);
+        pC->pTransitionList = M4OSA_NULL;
+    }
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateMP3OutputFile()
+ * @brief        Creates and prepare the output MP file
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
+                                 M4OSA_Void *pOutputFile )
+{
+    M4OSA_ERR err;
+
+    err =
+        pC->pOsaFileWritPtr->openWrite(&pC->ewc.p3gpWriterContext, pOutputFile,
+        M4OSA_kFileWrite);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateMP3OutputFile: WriteOpen returns 0x%x!", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile()
+ * @brief   Creates and prepare the output MP3 file
+ * @note    Creates the writer, Creates the output file, Adds the streams,
+           Readies the writing process
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_intCreate3GPPOutputFile( M4VSS3GPP_EncodeWriteContext *pC_ewc,
+                                  M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+                                  M4OSA_FileWriterPointer *pOsaFileWritPtr,
+                                  M4OSA_Void *pOutputFile,
+                                  M4OSA_FileReadPointer *pOsaFileReadPtr,
+                                  M4OSA_Void *pTempFile,
+                                  M4OSA_UInt32 maxOutputFileSize )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 uiVersion;
+    M4SYS_StreamIDValue temp;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intCreate3GPPOutputFile called with pC_ewc=0x%x, pOutputFile=0x%x",
+        pC_ewc, pOutputFile);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pC_ewc), M4ERR_PARAMETER,
+        "M4VSS3GPP_intCreate3GPPOutputFile: pC_ewc is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pOutputFile), M4ERR_PARAMETER,
+        "M4VSS3GPP_intCreate3GPPOutputFile: pOutputFile is M4OSA_NULL");
+
+    /* Set writer */
+    err =
+        M4VSS3GPP_setCurrentWriter(pC_ShellAPI, M4VIDEOEDITING_kFileType_3GPP);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Create the output file */
+    err = pC_ShellAPI->pWriterGlobalFcts->pFctOpen(&pC_ewc->p3gpWriterContext,
+        pOutputFile, pOsaFileWritPtr, pTempFile, pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile: pWriterGlobalFcts->pFctOpen returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Set the signature option of the writer */
+    err =
+        pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
+        M4WRITER_kEmbeddedString, (M4OSA_DataOption)"NXP-SW : VSS    ");
+
+    if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+        != err) ) /* this option may not be implemented by some writers */
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile:\
+            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /*11/12/2008 CR3283 MMS use case for VideoArtist:
+    Set the max output file size option in the writer so that the output file will be
+    smaller than the given file size limitation*/
+    if( maxOutputFileSize > 0 )
+    {
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            M4WRITER_kMaxFileSize, &maxOutputFileSize);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                writer set option M4WRITER_kMaxFileSize returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Set the version option of the writer */
+    uiVersion =
+        (M4VIDEOEDITING_VERSION_MAJOR * 100 + M4VIDEOEDITING_VERSION_MINOR * 10
+        + M4VIDEOEDITING_VERSION_REVISION);
+    err =
+        pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
+        M4WRITER_kEmbeddedVersion, (M4OSA_DataOption) &uiVersion);
+
+    if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+        != err) ) /* this option may not be implemented by some writers */
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile:\
+            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * In case of EMP, we have to explicitely give an emp ftyp to the writer */
+    if( M4OSA_TRUE == pC_ewc->bActivateEmp )
+    {
+        M4VIDEOEDITING_FtypBox ftyp;
+
+        ftyp.major_brand = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.minor_version = M4VIDEOEDITING_BRAND_0000;
+        ftyp.nbCompatibleBrands = 2;
+        ftyp.compatible_brands[0] = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.compatible_brands[1] = M4VIDEOEDITING_BRAND_EMP;
+
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            M4WRITER_kSetFtypBox, (M4OSA_DataOption) &ftyp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kSetFtypBox) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    if( M4SYS_kVideoUnknown != pC_ewc->VideoStreamType )
+    {
+        /**
+        * Set the video stream properties */
+        pC_ewc->WriterVideoStreamInfo.height = pC_ewc->uiVideoHeight;
+        pC_ewc->WriterVideoStreamInfo.width = pC_ewc->uiVideoWidth;
+        pC_ewc->WriterVideoStreamInfo.fps =
+            0.0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterVideoStreamInfo.Header.pBuf =
+            pC_ewc->pVideoOutputDsi; /**< Previously computed output DSI */
+        pC_ewc->WriterVideoStreamInfo.Header.Size = pC_ewc->
+            uiVideoOutputDsiSize; /**< Previously computed output DSI size */
+
+        pC_ewc->WriterVideoStream.streamType = pC_ewc->VideoStreamType;
+
+        switch( pC_ewc->VideoStreamType )
+        {
+            case M4SYS_kMPEG_4:
+            case M4SYS_kH263:
+            case M4SYS_kH264:
+                /**< We HAVE to put a value here... */
+                pC_ewc->WriterVideoStream.averageBitrate =
+                    pC_ewc->uiVideoBitrate;
+                pC_ewc->WriterVideoStream.maxBitrate = pC_ewc->uiVideoBitrate;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intCreate3GPPOutputFile: unknown input video format (0x%x),\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT!",
+                    pC_ewc->VideoStreamType);
+                return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+        }
+
+        pC_ewc->WriterVideoStream.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+        pC_ewc->WriterVideoStream.timeScale =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterVideoStream.profileLevel =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterVideoStream.duration =
+            0; /**< Not used by the shell/core writer */
+
+        pC_ewc->WriterVideoStream.decoderSpecificInfoSize =
+            sizeof(M4WRITER_StreamVideoInfos);
+        pC_ewc->WriterVideoStream.decoderSpecificInfo =
+            (M4OSA_MemAddr32) &(pC_ewc->WriterVideoStreamInfo);
+
+        /**
+        * Add the video stream */
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
+            pC_ewc->p3gpWriterContext, &pC_ewc->WriterVideoStream);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctAddStream(video) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Update AU properties for video stream */
+        pC_ewc->WriterVideoAU.attribute = AU_RAP;
+        pC_ewc->WriterVideoAU.CTS = 0;
+        pC_ewc->WriterVideoAU.DTS = 0;    /** Reset time */
+        pC_ewc->WriterVideoAU.frag = M4OSA_NULL;
+        pC_ewc->WriterVideoAU.nbFrag = 0; /** No fragment */
+        pC_ewc->WriterVideoAU.size = 0;
+        pC_ewc->WriterVideoAU.dataAddress = M4OSA_NULL;
+        pC_ewc->WriterVideoAU.stream = &(pC_ewc->WriterVideoStream);
+
+        /**
+        * Set the writer max video AU size */
+        pC_ewc->uiVideoMaxAuSize = (M4OSA_UInt32)(1.5F
+            *(M4OSA_Float)(pC_ewc->WriterVideoStreamInfo.width
+            * pC_ewc->WriterVideoStreamInfo.height)
+            * M4VSS3GPP_VIDEO_MIN_COMPRESSION_RATIO);
+        temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+        temp.value = pC_ewc->uiVideoMaxAuSize;
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Set the writer max video chunk size */
+        temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+        temp.value = (M4OSA_UInt32)(pC_ewc->uiVideoMaxAuSize \
+            * M4VSS3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO); /**< from max AU size to
+                                                                  max Chunck size */
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    if( M4SYS_kAudioUnknown != pC_ewc->AudioStreamType )
+    {
+        M4WRITER_StreamAudioInfos streamAudioInfo;
+
+        streamAudioInfo.nbSamplesPerSec = 0; /**< unused by our shell writer */
+        streamAudioInfo.nbBitsPerSample = 0; /**< unused by our shell writer */
+        streamAudioInfo.nbChannels = 1;      /**< unused by our shell writer */
+
+        if( pC_ewc->pAudioOutputDsi != M4OSA_NULL )
+        {
+            /* If we copy the stream from the input, we copy its DSI */
+            streamAudioInfo.Header.Size = pC_ewc->uiAudioOutputDsiSize;
+            streamAudioInfo.Header.pBuf = pC_ewc->pAudioOutputDsi;
+        }
+        else
+        {
+            /* Writer will put a default DSI */
+            streamAudioInfo.Header.Size = 0;
+            streamAudioInfo.Header.pBuf = M4OSA_NULL;
+        }
+
+        pC_ewc->WriterAudioStream.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+        pC_ewc->WriterAudioStream.streamType = pC_ewc->AudioStreamType;
+        pC_ewc->WriterAudioStream.duration =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterAudioStream.profileLevel =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterAudioStreamInfo.nbSamplesPerSec =
+            pC_ewc->uiSamplingFrequency;
+        pC_ewc->WriterAudioStream.timeScale = pC_ewc->uiSamplingFrequency;
+        pC_ewc->WriterAudioStreamInfo.nbChannels =
+            (M4OSA_UInt16)pC_ewc->uiNbChannels;
+        pC_ewc->WriterAudioStreamInfo.nbBitsPerSample =
+            0; /**< Not used by the shell/core writer */
+
+        /**
+        * Add the audio stream */
+        switch( pC_ewc->AudioStreamType )
+        {
+            case M4SYS_kAMR:
+                pC_ewc->WriterAudioStream.averageBitrate =
+                    0; /**< It is not used by the shell, the DSI is taken into account instead */
+                pC_ewc->WriterAudioStream.maxBitrate =
+                    0; /**< Not used by the shell/core writer */
+                break;
+
+            case M4SYS_kAAC:
+                pC_ewc->WriterAudioStream.averageBitrate =
+                    pC_ewc->uiAudioBitrate;
+                pC_ewc->WriterAudioStream.maxBitrate = pC_ewc->uiAudioBitrate;
+                break;
+
+            case M4SYS_kEVRC:
+                pC_ewc->WriterAudioStream.averageBitrate =
+                    0; /**< It is not used by the shell, the DSI is taken into account instead */
+                pC_ewc->WriterAudioStream.maxBitrate =
+                    0; /**< Not used by the shell/core writer */
+                break;
+
+            case M4SYS_kMP3: /**< there can't be MP3 track in 3GPP file -> error */
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intCreate3GPPOutputFile: unknown output audio format (0x%x),\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT!",
+                    pC_ewc->AudioStreamType);
+                return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+        }
+
+        /**
+        * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos
+        in the DSI pointer... */
+        pC_ewc->WriterAudioStream.decoderSpecificInfo =
+            (M4OSA_MemAddr32) &streamAudioInfo;
+
+        /**
+        * Link the AU and the stream */
+        pC_ewc->WriterAudioAU.stream = &(pC_ewc->WriterAudioStream);
+        pC_ewc->WriterAudioAU.dataAddress = M4OSA_NULL;
+        pC_ewc->WriterAudioAU.size = 0;
+        pC_ewc->WriterAudioAU.CTS =
+            -pC_ewc->iSilenceFrameDuration; /** Reset time */
+        pC_ewc->WriterAudioAU.DTS = 0;
+        pC_ewc->WriterAudioAU.attribute = 0;
+        pC_ewc->WriterAudioAU.nbFrag = 0; /** No fragment */
+        pC_ewc->WriterAudioAU.frag = M4OSA_NULL;
+
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
+            pC_ewc->p3gpWriterContext, &pC_ewc->WriterAudioStream);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctAddStream(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Set the writer max audio AU size */
+        pC_ewc->uiAudioMaxAuSize = M4VSS3GPP_AUDIO_MAX_AU_SIZE;
+        temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+        temp.value = pC_ewc->uiAudioMaxAuSize;
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Set the writer max audio chunck size */
+        temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+        temp.value = M4VSS3GPP_AUDIO_MAX_CHUNCK_SIZE;
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * All streams added, we're now ready to write */
+    err = pC_ShellAPI->pWriterGlobalFcts->pFctStartWriting(
+        pC_ewc->p3gpWriterContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile:\
+            pWriterGlobalFcts->pFctStartWriting() returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCreate3GPPOutputFile(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intComputeOutputVideoAndAudioDsi()
+ * @brief    Generate a H263 or MPEG-4 decoder specific info compatible with all input video
+ *            tracks. Copy audio dsi from master clip.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
+                                           M4OSA_UInt8 uiMasterClip )
+{
+    M4OSA_UInt8 uiCurrentLevel, uiNewLevel;
+    M4OSA_UInt8 uiCurrentProf, uiNewProf;
+    M4OSA_Int32 iResynchMarkerDsiIndex;
+    M4_StreamHandler *pStreamForDsi;
+    M4VSS3GPP_ClipContext *pClip;
+    M4OSA_ERR err;
+    M4OSA_UInt32 i;
+
+    M4ENCODER_Header *encHeader;
+    M4SYS_StreamIDmemAddr streamHeader;
+
+    pStreamForDsi = M4OSA_NULL;
+    pClip = M4OSA_NULL;
+
+    /**
+    * H263 case */
+    if( M4SYS_kH263 == pC->ewc.VideoStreamType )
+    {
+        /**
+        * H263 output DSI is always 7 bytes */
+        pC->ewc.uiVideoOutputDsiSize = 7;
+        pC->ewc.pVideoOutputDsi =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->ewc.uiVideoOutputDsiSize,
+            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H263)");
+
+        if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                unable to allocate pVideoOutputDsi (H263), returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        /**
+        * (We override the input vendor info.
+        * At least we know that nothing special will be tried with PHLP-stamped
+          edited streams...) */
+        pC->ewc.pVideoOutputDsi[0] = 'P';
+        pC->ewc.pVideoOutputDsi[1] = 'H';
+        pC->ewc.pVideoOutputDsi[2] = 'L';
+        pC->ewc.pVideoOutputDsi[3] = 'P';
+
+        /**
+        * Decoder version is 0 */
+        pC->ewc.pVideoOutputDsi[4] = 0;
+
+        /**
+        * We take the max level of all input streams, but 10 is the minimum */
+        uiCurrentLevel = 10;
+
+        for ( i = 0; i < pC->uiClipNumber; i++ )
+        {
+            uiNewLevel = pC->pClipList[i].ClipProperties.uiH263level;
+
+            if( uiNewLevel > uiCurrentLevel )
+            {
+                uiCurrentLevel = uiNewLevel;
+            }
+        }
+
+        /**
+        * Level is the sixth byte i the DSI */
+        pC->ewc.pVideoOutputDsi[5] = uiCurrentLevel;
+
+        /**
+        * Profile is always 0, and it's the seventh byte in the DSI */
+        pC->ewc.pVideoOutputDsi[6] = 0;
+    }
+
+    /**
+    * MPEG-4 case */
+    else if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType )
+    {
+        /**
+        * Profile combination rules:
+        *   8 and x -> x
+        *   1, 2 or 3 -> max
+        *   9 and 1 -> 2
+        *   9 and 2 -> 2
+        *   9 and 3 -> 3
+        */
+
+        /**
+        * Note:
+        *   The part of the output video encoded by the VSS3GPP
+        *   have a profile of 8.
+        *   Since 8 is the less "strong" profile (8 and x --> x),
+        *   we can check only the input clips to compute the
+        *   profile of the output combined clip.
+        */
+
+        /**
+        * Start with profile of the first clip */
+        uiCurrentProf = pC->pClipList[0].ClipProperties.uiVideoProfile;
+
+        /**
+        * Combine current profile with the one of the next clip */
+        for ( i = 1; i < pC->uiClipNumber; i++ )
+        {
+            uiNewProf = pC->pClipList[i].ClipProperties.uiVideoProfile;
+
+            switch( uiNewProf )
+            {
+                case 8:
+                    /**< 8 + x --> x */
+                    /**< uiCurrentProf is not updated */
+                    break;
+
+                case 1:
+                case 2:
+                case 3:
+                    switch( uiCurrentProf )
+                    {
+                        case 1:
+                        case 2:
+                        case 3:
+                        case 4:
+                        case 5:
+                            /**< 1, 2, 3, 4 or 5 -> max */
+                            uiCurrentProf = (uiCurrentProf > uiNewProf)
+                                ? uiCurrentProf : uiNewProf;
+                            break;
+
+                        case 8: /**< 8 + x -> x */
+                            uiCurrentProf = uiNewProf;
+                            break;
+
+                        case 9:
+                            /**< 9 and 1 -> 2 */
+                            /**< 9 and 2 -> 2 */
+                            /**< 9 and 3 -> 3 */
+                            /**< 9 and 4 -> 4 */
+                            /**< 9 and 5 -> 5 */
+                            uiCurrentProf = (uiNewProf > 2) ? uiNewProf : 2;
+                            break;
+                    }
+                    break;
+
+                case 9:
+                    switch( uiCurrentProf )
+                    {
+                        case 1:
+                        case 2:
+                        case 3:
+                            /**< 9 and 1 -> 2 */
+                            /**< 9 and 2 -> 2 */
+                            /**< 9 and 3 -> 3 */
+                            uiCurrentProf =
+                                (uiCurrentProf > 2) ? uiCurrentProf : 2;
+                            break;
+
+                        case 9: /**< 9 + x -> x */
+                        case 8: /**< 8 + x -> x */
+                            uiCurrentProf = uiNewProf;
+                            break;
+                }
+            }
+        }
+
+        /**
+        * Look for the DSI of an input video stream which would use the Resynch. Marker tool */
+        i = 0;
+        iResynchMarkerDsiIndex =
+            0; /**< By default we take the first DSI (if we find no Resynch Marker DSI) */
+
+        while( i < pC->uiClipNumber )
+        {
+            if( M4OSA_TRUE
+                == pC->pClipList[i].ClipProperties.bMPEG4resynchMarker )
+            {
+                iResynchMarkerDsiIndex = i;
+                break; /**< we found it, get out the while loop */
+            }
+            i++;
+        }
+
+        /**
+        * Get the DSI of the clip found. If it is the first clip, it is already opened.
+        * Else we must open it (and later close it...) */
+        if( 0 == iResynchMarkerDsiIndex )
+        {
+            pStreamForDsi = &(pC->pC1->pVideoStream->m_basicProperties);
+        }
+        else
+        {
+            /**
+            * We can use the fast open mode and the skip audio mode to get the DSI */
+            err = M4VSS3GPP_intClipInit(&pClip, pC->pOsaFileReadPtr);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipInit() returns 0x%x!",
+                    err);
+
+                if( M4OSA_NULL != pClip )
+                {
+                    M4VSS3GPP_intClipCleanUp(pClip);
+                }
+                return err;
+            }
+
+            err = M4VSS3GPP_intClipOpen(pClip,
+                &pC->pClipList[iResynchMarkerDsiIndex], M4OSA_TRUE,
+                M4OSA_TRUE, M4OSA_TRUE);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipOpen() returns 0x%x!",
+                    err);
+                M4VSS3GPP_intClipCleanUp(pClip);
+                return err;
+            }
+
+            pStreamForDsi = &(pClip->pVideoStream->m_basicProperties);
+        }
+
+        /**
+        * Allocate and copy the new DSI */
+        pC->ewc.pVideoOutputDsi = (M4OSA_MemAddr8)M4OSA_malloc(
+            pStreamForDsi->m_decoderSpecificInfoSize,
+            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (MPEG4)");
+
+        if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                unable to allocate pVideoOutputDsi (MPEG4), returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->ewc.uiVideoOutputDsiSize =
+            (M4OSA_UInt16)pStreamForDsi->m_decoderSpecificInfoSize;
+        M4OSA_memcpy(pC->ewc.pVideoOutputDsi,
+            (M4OSA_MemAddr8)pStreamForDsi->m_pDecoderSpecificInfo,
+            pC->ewc.uiVideoOutputDsiSize);
+
+        /**
+        * We rewrite the profile in the output DSI because it may not be the good one
+        * The profile and level is always at byte number 4 */
+        (pC->ewc.pVideoOutputDsi)[4] = uiCurrentProf;
+
+        /**
+        * If a clip has been temporarily opened to get its DSI, close it */
+        if( M4OSA_NULL != pClip )
+        {
+            err = M4VSS3GPP_intClipCleanUp(pClip);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipCleanUp() returns 0x%x!",
+                    err);
+                return err;
+            }
+        }
+    }
+    else if( M4SYS_kH264 == pC->ewc.VideoStreamType )
+    {
+
+        /* For H.264 encoder case
+        * Fetch the DSI from the shell video encoder, and feed it to the writer before
+        closing it. */
+
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: get DSI for H264 stream");
+
+        if( M4OSA_NULL == pC->ewc.pEncContext )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: pC->ewc.pEncContext is NULL");
+            err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intCreateVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+
+        if( M4OSA_NULL != pC->ewc.pEncContext )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
+                pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
+                (M4OSA_DataOption) &encHeader);
+
+            if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    failed to get the encoder header (err 0x%x)",
+                    err);
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: encHeader->pBuf=0x%x, size=0x%x",
+                    encHeader->pBuf, encHeader->Size);
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    send DSI for H264 stream to 3GP writer");
+
+                /**
+                * Allocate and copy the new DSI */
+                pC->ewc.pVideoOutputDsi =
+                    (M4OSA_MemAddr8)M4OSA_malloc(encHeader->Size, M4VSS3GPP,
+                    (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
+
+                if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                        unable to allocate pVideoOutputDsi (H264), returning M4ERR_ALLOC");
+                    return M4ERR_ALLOC;
+                }
+                pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
+                M4OSA_memcpy(pC->ewc.pVideoOutputDsi, encHeader->pBuf,
+                    encHeader->Size);
+            }
+
+            err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intDestroyVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                pC->ewc.pEncContext is NULL, cannot get the DSI");
+        }
+    }
+
+    pStreamForDsi = M4OSA_NULL;
+    pClip = M4OSA_NULL;
+
+    /* Compute Audio DSI */
+    if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+    {
+        if( uiMasterClip == 0 )
+        {
+            /* Clip is already opened */
+            pStreamForDsi = &(pC->pC1->pAudioStream->m_basicProperties);
+        }
+        else
+        {
+            /**
+            * We can use the fast open mode to get the DSI */
+            err = M4VSS3GPP_intClipInit(&pClip, pC->pOsaFileReadPtr);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipInit() returns 0x%x!",
+                    err);
+
+                if( pClip != M4OSA_NULL )
+                {
+                    M4VSS3GPP_intClipCleanUp(pClip);
+                }
+                return err;
+            }
+
+            err = M4VSS3GPP_intClipOpen(pClip, &pC->pClipList[uiMasterClip],
+                M4OSA_FALSE, M4OSA_TRUE, M4OSA_TRUE);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipOpen() returns 0x%x!",
+                    err);
+                M4VSS3GPP_intClipCleanUp(pClip);
+                return err;
+            }
+
+            pStreamForDsi = &(pClip->pAudioStream->m_basicProperties);
+        }
+
+        /**
+        * Allocate and copy the new DSI */
+        pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)M4OSA_malloc(
+            pStreamForDsi->m_decoderSpecificInfoSize,
+            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pAudioOutputDsi");
+
+        if( M4OSA_NULL == pC->ewc.pAudioOutputDsi )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                unable to allocate pAudioOutputDsi, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->ewc.uiAudioOutputDsiSize =
+            (M4OSA_UInt16)pStreamForDsi->m_decoderSpecificInfoSize;
+        M4OSA_memcpy(pC->ewc.pAudioOutputDsi,
+            (M4OSA_MemAddr8)pStreamForDsi->m_pDecoderSpecificInfo,
+            pC->ewc.uiAudioOutputDsiSize);
+
+        /**
+        * If a clip has been temporarily opened to get its DSI, close it */
+        if( M4OSA_NULL != pClip )
+        {
+            err = M4VSS3GPP_intClipCleanUp(pClip);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipCleanUp() returns 0x%x!",
+                    err);
+                return err;
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intComputeOutputVideoAndAudioDsi(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intSwitchToNextClip()
+ * @brief    Switch from the current clip to the next one
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    if( M4OSA_NULL != pC->pC1 )
+    {
+        /**
+        * Close the current first clip */
+        err = M4VSS3GPP_intClipCleanUp(pC->pC1);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        *  increment clip counter */
+        pC->uiCurrentClip++;
+    }
+
+    /**
+    * Check if we reached the last clip */
+    if( pC->uiCurrentClip >= pC->uiClipNumber )
+    {
+        pC->pC1 = M4OSA_NULL;
+        pC->State = M4VSS3GPP_kEditState_FINISHED;
+
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intSwitchToNextClip:\
+            M4VSS3GPP_intClipClose(C1) returns M4VSS3GPP_WAR_EDITING_DONE");
+        return M4VSS3GPP_WAR_EDITING_DONE;
+    }
+
+    /**
+    * If the next clip has already be opened, set it as first clip */
+    if( M4OSA_NULL != pC->pC2 )
+    {
+        pC->pC1 = pC->pC2;
+        pC->pC2 = M4OSA_NULL;
+    }
+    /**
+    * else open it */
+    else
+    {
+        err = M4VSS3GPP_intOpenClip(pC, &pC->pC1,
+            &pC->pClipList[pC->uiCurrentClip]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intOpenClip() returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * If the second clip has not been opened yet,
+          that means that there has been no transition.
+        * So both output video and audio times are OK.
+        * So we can set both video2 and audio offsets */
+
+        /**
+        * Add current video output CTS to the clip video offset */
+
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        pC->pC1->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+        /**
+        * Add current audio output CTS to the clip audio offset */
+        pC->pC1->iAoffset +=
+            (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+        /**
+        * 2005-03-24: BugFix for audio-video synchro:
+        * There may be a portion of the duration of an audio AU of desynchro at each assembly.
+        * It leads to an audible desynchro when there are a lot of clips assembled.
+        * This bug fix allows to resynch the audio track when the delta is higher
+        * than one audio AU duration.
+        * We Step one AU in the second clip and we change the audio offset accordingly. */
+        if( ( pC->pC1->iAoffset
+            - (M4OSA_Int32)(pC->pC1->iVoffset *pC->pC1->scale_audio + 0.5))
+        > pC->ewc.iSilenceFrameDuration )
+        {
+            /**
+            * Advance one AMR frame */
+            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+            if( M4OSA_ERR_IS_ERROR(err) )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intSwitchToNextClip:\
+                    M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                    err);
+                return err;
+            }
+            /**
+            * Update audio offset accordingly*/
+            pC->pC1->iAoffset -= pC->ewc.iSilenceFrameDuration;
+        }
+    }
+
+    /**
+    * Init starting state for this clip processing */
+    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+    {
+        /**
+        * In the MP3 case we use a special audio state */
+        pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
+    }
+    else
+    {
+        /**
+        * We start with the video processing */
+        pC->State = M4VSS3GPP_kEditState_VIDEO;
+
+        if( pC->Vstate != M4VSS3GPP_kEditVideoState_TRANSITION )
+        {
+            /* if not a transition then reset previous video state */
+            pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+
+            if( pC->bIsMMS == M4OSA_FALSE ) /* RC */
+            {
+                /* There may be an encoder to destroy */
+                err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intSwitchToNextClip:\
+                        M4VSS3GPP_editDestroyVideoEncoder() returns 0x%x!",
+                        err);
+                    return err;
+                }
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intSwitchToNextClip(): returning M4NO_ERROR");
+    /* RC: to know when a file has been processed */
+    return M4VSS3GPP_WAR_SWITCH_CLIP;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo()
+ * @brief    Do what to do when the end of a clip video track is reached
+ * @note    If there is audio on the current clip, process it, else switch to the next clip
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Video is done for this clip, now we do the audio */
+    if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+    {
+        pC->State = M4VSS3GPP_kEditState_AUDIO;
+    }
+    else
+    {
+        /**
+        * Clip done, do the next one */
+        err = M4VSS3GPP_intSwitchToNextClip(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intReachedEndOfVideo: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfVideo(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio()
+ * @brief    Do what to do when the end of a clip audio track is reached
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Clip done, do the next one */
+    err = M4VSS3GPP_intSwitchToNextClip(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intReachedEndOfAudio: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Start with the video */
+    if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+    {
+        pC->State = M4VSS3GPP_kEditState_VIDEO;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfAudio(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intOpenClip()
+ * @brief    Open next clip
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intOpenClip( M4VSS3GPP_InternalEditContext *pC,
+                                M4VSS3GPP_ClipContext ** hClip,
+                                M4VSS3GPP_ClipSettings *pClipSettings )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClip; /**< shortcut */
+    M4VIDEOEDITING_ClipProperties *pClipProperties;
+    M4OSA_Int32 iCts;
+    M4OSA_UInt32 i;
+
+    M4OSA_TRACE2_1("M4VSS3GPP_intOpenClip: \"%s\"",
+        (M4OSA_Char *)pClipSettings->pFile);
+
+    err = M4VSS3GPP_intClipInit(hClip, pC->pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipInit() returns 0x%x!",
+            err);
+
+        if( *hClip != M4OSA_NULL )
+        {
+            M4VSS3GPP_intClipCleanUp(*hClip);
+        }
+        return err;
+    }
+
+    /**
+    * Set shortcut */
+    pClip = *hClip;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    /* replay recorded external decoder registrations on the clip */
+
+    for ( i = 0; i < M4VD_kVideoType_NB; i++ )
+    {
+        if( pC->registeredExternalDecs[i].registered )
+        {
+            err = M4VSS3GPP_intClipRegisterExternalVideoDecoder(pClip, i,
+                pC->registeredExternalDecs[i].pDecoderInterface,
+                pC->registeredExternalDecs[i].pUserData);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intOpenClip:\
+                    M4VSS3GPP_intClipRegisterExternalVideoDecoder() returns 0x%x!",
+                    err);
+                M4VSS3GPP_intClipCleanUp(pClip);
+                return err;
+            }
+        }
+    }
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intOpenClip: pClip->ShellAPI = 0x%x",
+        &pClip->ShellAPI);
+    err = M4VSS3GPP_intSubscribeExternalCodecs((M4VSS3GPP_EditContext *)pC,
+        (M4OSA_Context) &pClip->ShellAPI);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intSubscribeExternalCodecs returned err 0x%x",
+            err);
+    }
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_intOpenClip: M4VSS3GPP_intSubscribeExternalCodecs returned 0x%x",
+        err);
+#endif /* M4VSS_SUPPORT_OMX_CODECS */
+
+    err = M4VSS3GPP_intClipOpen(pClip, pClipSettings, M4OSA_FALSE, M4OSA_FALSE,
+        M4OSA_FALSE);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipOpen() returns 0x%x!",
+            err);
+        M4VSS3GPP_intClipCleanUp(pClip);
+        *hClip = M4OSA_NULL;
+        return err;
+    }
+
+    pClipProperties = &pClip->pSettings->ClipProperties;
+
+    /**
+    * Copy common 'silence frame stuff' to ClipContext */
+    pClip->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+    pClip->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+    pClip->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+    pClip->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
+    pClip->scale_audio = pC->ewc.scale_audio;
+
+    pClip->iAudioFrameCts = -pClip->iSilenceFrameDuration; /* Reset time */
+
+    /**
+    * If the audio track is not compatible with the output audio format,
+    * we remove it. So it will be replaced by silence */
+    if( M4OSA_FALSE == pClipProperties->bAudioIsCompatibleWithMasterClip )
+    {
+        M4VSS3GPP_intClipDeleteAudioTrack(pClip);
+    }
+
+    /**
+    * Actual begin cut */
+    if( 0 == pClipSettings->uiBeginCutTime )
+    {
+        pClip->iVoffset = 0;
+        pClip->iAoffset = 0;
+        pClip->iActualVideoBeginCut = 0;
+        pClip->iActualAudioBeginCut = 0;
+    }
+    else
+    {
+        if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+        {
+            /**
+            * Jump the video to the target begin cut to get the actual begin cut value */
+            pClip->iActualVideoBeginCut =
+                (M4OSA_Int32)pClipSettings->uiBeginCutTime;
+            iCts = pClip->iActualVideoBeginCut;
+
+            err = pClip->ShellAPI.m_pReader->m_pFctJump(pClip->pReaderContext,
+                (M4_StreamHandler *)pClip->pVideoStream, &iCts);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intOpenClip: m_pFctJump(V) returns 0x%x!", err);
+                return err;
+            }
+
+            /**
+            * Update clip offset with the video begin cut */
+            pClip->iVoffset = -pClip->iActualVideoBeginCut;
+        }
+
+        if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+        {
+            /**
+            * Jump the audio to the video actual begin cut */
+            if( M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType )
+            {
+                pClip->iActualAudioBeginCut = pClip->iActualVideoBeginCut;
+                iCts = (M4OSA_Int32)(pClip->iActualAudioBeginCut
+                    * pClip->scale_audio + 0.5);
+
+                err = M4VSS3GPP_intClipJumpAudioAt(pClip, &iCts);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
+                        err);
+                    return err;
+                }
+                /**
+                * Update clip offset with the audio begin cut */
+                pClip->iAoffset = -iCts;
+            }
+            else
+            {
+                /**
+                * For the MP3, the jump is not done because of the VBR,
+                  it could be not enough accurate */
+                pClip->iActualAudioBeginCut =
+                    (M4OSA_Int32)pClipSettings->uiBeginCutTime;
+            }
+        }
+    }
+
+    if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+    {
+        /**
+        * Read the first Video AU of the clip */
+        err = pClip->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+            pClip->pReaderContext,
+            (M4_StreamHandler *)pClip->pVideoStream, &pClip->VideoAU);
+
+        if( M4WAR_NO_MORE_AU == err )
+        {
+            /**
+            * If we (already!) reach the end of the clip, we filter the error.
+            * It will be correctly managed at the first step. */
+            err = M4NO_ERROR;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intOpenClip: m_pReaderDataIt->m_pFctGetNextAu() returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * The video is currently in reading mode */
+        pClip->Vstatus = M4VSS3GPP_kClipStatus_READ;
+    }
+
+    if( ( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType)
+        && (M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType) )
+    {
+        /**
+        * Read the first Audio AU of the clip */
+        err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
+
+        if( M4OSA_ERR_IS_ERROR(err) )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * The audio is currently in reading mode */
+        pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intOpenClip(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intComputeOutputAverageVideoBitrate()
+ * @brief    Average bitrate of the output file, computed from input bitrates,
+ *          durations, transitions and cuts.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4VSS3GPP_ClipSettings *pCS_0, *pCS_1, *pCS_2;
+    M4VSS3GPP_TransitionSettings *pT0, *pT2;
+    M4OSA_Int32 i;
+
+    M4OSA_UInt32 t0_duration, t2_duration;
+    M4OSA_UInt32 t0_bitrate, t2_bitrate;
+    M4OSA_UInt32 c1_duration;
+
+    M4OSA_UInt32 total_duration;
+    M4OSA_UInt32 total_bitsum;
+
+    total_duration = 0;
+    total_bitsum = 0;
+
+    /* Loop on the number of clips */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        pCS_1 = &pC->pClipList[i];
+
+        t0_duration = 0;
+        t0_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
+        t2_duration = 0;
+        t2_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
+
+        /* Transition with the previous clip */
+        if( i > 0 )
+        {
+            pCS_0 = &pC->pClipList[i - 1];
+            pT0 = &pC->pTransitionList[i - 1];
+
+            if( pT0->VideoTransitionType
+                != M4VSS3GPP_kVideoTransitionType_None )
+            {
+                t0_duration = pT0->uiTransitionDuration;
+
+                if( pCS_0->ClipProperties.uiVideoBitrate > t0_bitrate )
+                {
+                    t0_bitrate = pCS_0->ClipProperties.uiVideoBitrate;
+                }
+            }
+        }
+
+        /* Transition with the next clip */
+        if( i < pC->uiClipNumber - 1 )
+        {
+            pCS_2 = &pC->pClipList[i + 1];
+            pT2 = &pC->pTransitionList[i];
+
+            if( pT2->VideoTransitionType
+                != M4VSS3GPP_kVideoTransitionType_None )
+            {
+                t2_duration = pT2->uiTransitionDuration;
+
+                if( pCS_2->ClipProperties.uiVideoBitrate > t2_bitrate )
+                {
+                    t2_bitrate = pCS_2->ClipProperties.uiVideoBitrate;
+                }
+            }
+        }
+
+        /* Check for cut times */
+        if( pCS_1->uiEndCutTime > 0 )
+            c1_duration = pCS_1->uiEndCutTime;
+        else
+            c1_duration = pCS_1->ClipProperties.uiClipVideoDuration;
+
+        if( pCS_1->uiBeginCutTime > 0 )
+            c1_duration -= pCS_1->uiBeginCutTime;
+
+        c1_duration -= t0_duration + t2_duration;
+
+        /* Compute bitsum and duration */
+        total_duration += c1_duration + t0_duration / 2 + t2_duration / 2;
+
+        total_bitsum +=
+            c1_duration * (pCS_1->ClipProperties.uiVideoBitrate / 1000)
+            + (t0_bitrate / 1000) * t0_duration / 2
+            + (t2_bitrate / 1000) * t2_duration / 2;
+    }
+
+    pC->ewc.uiVideoBitrate = ( total_bitsum / total_duration) * 1000;
+}
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editRegisterExternalCodec(M4VSS3GPP_EditContext pContext,
+ *                                               M4VSS3GPP_codecType   codecType,
+ *                                               M4OSA_Context pCodecInterface,
+ *                                               M4OSA_Void* pUserData)
+ * @brief    Registers an external Video/Audio codec with VSS3GPP
+ * @note This is much different from the other external codec registration API to cope
+ *      up with specific requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @param  codecType        (IN) Type of codec (MPEG4 ...)
+ * @param  pCodecInterface  (IN) Codec interface
+ * @param  pUserData          (IN) Pointer on a user data to give to external codec
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:       VSS3GPP is not in an appropriate state for
+ *                             this function to be called
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_editRegisterExternalCodec( M4VSS3GPP_EditContext pContext,
+                                              M4VSS3GPP_codecType codecType,
+                                              M4OSA_Context pCodecInterface,
+                                              M4OSA_Void *pUserData )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( ( M4OSA_NULL == pContext) || (M4OSA_NULL == pCodecInterface) )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_editRegisterExternalCodec: NULL input parameter; pContext=0x%x,\
+            pCodecInterface=0x%x",
+            pContext, pCodecInterface);
+        return M4ERR_PARAMETER;
+    }
+
+    if( codecType >= M4VSS3GPP_kCodecType_NB )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editRegisterExternalCodec: invalid codec Type; codecType=0x%x",
+            codecType);
+        return M4ERR_PARAMETER;
+    }
+
+    pC->m_codecInterface[codecType] = pCodecInterface;
+    pC->pOMXUserData = pUserData;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editRegisterExternalCodec: pC->m_codecInterface[%d] = 0x%x",
+        codecType, pCodecInterface);
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editRegisterExternalCodec: pC->pOMXUserDatat = 0x%x",
+        pUserData);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editSubscribeExternalCodecs(M4VSS3GPP_EditContext    pContext)
+ * @brief    Subscribes to previously registered external Video/Audio codec
+ * @note This is much different from the other external codec registration API to cope
+ *       up with specific    requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:       VSS3GPP is not in an appropriate state for
+ *                             this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editSubscribeExternalCodecs(
+    M4VSS3GPP_EditContext pContext )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( M4OSA_NULL == pContext )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editSubscribeExternalCodecs: NULL input parameter; pContext=0x%x",
+            pContext);
+        return M4ERR_PARAMETER;
+    }
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editSubscribeExternalCodecs: &pC->ShellAPI = 0x%x",
+        &pC->ShellAPI);
+    err = M4VSS3GPP_intSubscribeExternalCodecs(pContext,
+        (M4OSA_Context) &pC->ShellAPI);
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editSubscribeExternalCodecs:\
+        M4VSS3GPP_intSubscribeExternalCodecs returns 0x%x",
+        err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intSubscribeExternalCodecs(M4VSS3GPP_EditContext    pContext,
+ *                                                 M4OSA_Context pShellCtxt)
+ * @brief    Subscribes to previously registered external Video/Audio codec
+ * @note This is much different from the other external codec registration API to cope
+ *       up with specific requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @param pShellContext    (IN) Media Codec shell context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:       VSS3GPP is not in an appropriate state for
+ *                             this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intSubscribeExternalCodecs( M4VSS3GPP_EditContext pContext,
+                                               M4OSA_Context pShellCtxt )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4VSS3GPP_MediaAndCodecCtxt *pShellContext =
+        (M4VSS3GPP_MediaAndCodecCtxt *)pShellCtxt;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( ( M4OSA_NULL == pContext) || (M4OSA_NULL == pShellContext) )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_intSubscribeExternalCodecs: NULL input parameter; pContext=0x%x,\
+            pShellContext=0x%x",
+            pContext, pShellContext);
+        return M4ERR_PARAMETER;
+    }
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intSubscribeExternalCodecs: pShellContext=0x%x",
+        pShellContext);
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoDecMPEG4] )
+    {
+        err = M4VSS3GPP_registerVideoDecoder(pShellContext,
+            M4DECODER_kVideoTypeMPEG4, (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecMPEG4]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoDecoder(Mpeg4) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->m_pVideoDecoderUserDataTable[M4DECODER_kVideoTypeMPEG4] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+             M4VSS3GPP_registerVideoDecoder(Mpeg4) OK: 0x%x",
+            (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecMPEG4]);
+    }
+
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoDecH264] )
+    {
+        err = M4VSS3GPP_registerVideoDecoder(pShellContext,
+            M4DECODER_kVideoTypeAVC, (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecH264]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoDecoder(AVC) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->m_pVideoDecoderUserDataTable[M4DECODER_kVideoTypeAVC] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerVideoDecoder(H264) OK: 0x%x",
+            (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecH264]);
+    }
+
+#endif /* M4VSS_SUPPORT_VIDEO_AVC*/
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4] )
+    {
+        err = M4VSS3GPP_registerVideoEncoder(pShellContext, M4ENCODER_kMPEG4,
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoEncoder(Mpeg4) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->pVideoEncoderUserDataTable[M4ENCODER_kMPEG4] =
+            pC->pOMXUserData;
+        pShellContext->pVideoEncoderExternalAPITable[M4ENCODER_kMPEG4] =
+            pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4];
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+            M4VSS3GPP_registerVideoEncoder(Mpeg4) OK: 0x%x",
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4]);
+    }
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoEncH263] )
+    {
+        err = M4VSS3GPP_registerVideoEncoder(pShellContext, M4ENCODER_kH263,
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH263]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoEncoder(H263) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->pVideoEncoderUserDataTable[M4ENCODER_kH263] =
+            pC->pOMXUserData;
+        pShellContext->pVideoEncoderExternalAPITable[M4ENCODER_kH263] =
+            pC->m_codecInterface[M4VSS3GPP_kVideoEncH263];
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerVideoEncoder(H263) OK: 0x%x",
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH263]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoEncH264] )
+    {
+        err = M4VSS3GPP_registerVideoEncoder(pShellContext, M4ENCODER_kH264,
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH264]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoEncoder(H264) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->pVideoEncoderUserDataTable[M4ENCODER_kH264] =
+            pC->pOMXUserData;
+        pShellContext->pVideoEncoderExternalAPITable[M4ENCODER_kH264] =
+            pC->m_codecInterface[M4VSS3GPP_kVideoEncH264];
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerVideoEncoder(H264) OK: 0x%x",
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH264]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_AVC */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AAC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioDecAAC] )
+    {
+        err = M4VSS3GPP_registerAudioDecoder(pShellContext, M4AD_kTypeAAC,
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAAC]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioDecoder(AAC) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioDecoderUserDataTable[M4AD_kTypeAAC] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerAudioDecoder(AAC) OK: 0x%x",
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAAC]);
+    }
+
+#endif /* M4VSS_SUPPORT_AUDEC_AAC*/
+
+#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioDecAMRNB] )
+    {
+        err = M4VSS3GPP_registerAudioDecoder(pShellContext, M4AD_kTypeAMRNB,
+            (M4AD_Interface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAMRNB]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioDecoder(AMRNB) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioDecoderUserDataTable[M4AD_kTypeAMRNB] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+            M4VSS3GPP_registerAudioDecoder(AMRNB) OK: 0x%x",
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAMRNB]);
+    }
+
+#endif /* M4VSS_SUPPORT_AUDEC_AMRNB*/
+
+#ifdef M4VSS_SUPPORT_AUDEC_MP3
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioDecMP3] )
+    {
+        err = M4VSS3GPP_registerAudioDecoder(pShellContext, M4AD_kTypeMP3,
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecMP3]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioDecoder(MP3) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioDecoderUserDataTable[M4AD_kTypeMP3] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerAudioDecoder(MP3) OK: 0x%x",
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecMP3]);
+    }
+
+#endif /* M4VSS_SUPPORT_AUDEC_MP3*/
+
+#ifdef M4VSS_SUPPORT_ENCODER_AAC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioEncAAC] )
+    {
+        err = M4VSS3GPP_registerAudioEncoder(pShellContext, M4ENCODER_kAAC,
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAAC]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioEncoder(AAC) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioEncoderUserDataTable[M4ENCODER_kAAC] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerAudioEncoder(AAC) OK: 0x%x",
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAAC]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_AAC*/
+
+#ifdef M4VSS_SUPPORT_ENCODER_AMR
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioEncAMRNB] )
+    {
+        err = M4VSS3GPP_registerAudioEncoder(pShellContext, M4ENCODER_kAMRNB,
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAMRNB]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioEncoder(AMRNB) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioEncoderUserDataTable[M4ENCODER_kAMRNB] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+            M4VSS3GPP_registerAudioEncoder(AMRNB) OK: 0x%x",
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAMRNB]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_AMR*/
+
+    if( M4OSA_NULL != pC->pOMXUserData )
+    {
+        /* If external OMX codecs are already registered with VSS3GPP internal context
+        * and are being subscribed by application, *
+        * then set boolean to prevent unregistration of external codec interfaces. *
+        * This is to prevent external codec interfaces from being reset
+          during VSS3GPP step function. *
+        * External OMX codecs are registered only once by application.
+          So pointers should remain valid*
+        * throughout life cycle of the application */
+
+        pShellContext->bAllowFreeingOMXCodecInterface = M4OSA_FALSE;
+    }
+
+    return M4NO_ERROR;
+}
+#endif /* M4VSS_SUPPORT_OMX_CODECS */
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c b/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
new file mode 100755
index 0000000..b118244
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
@@ -0,0 +1,2020 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_EditAudio.c
+ * @brief    Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our header */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h"  /**< OSAL debug management */
+
+#define PWR_FXP_FRACT_MAX            (32768)
+
+/************************************************************************/
+/* Static local functions                                               */
+/************************************************************************/
+static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
+                                             *pC );
+static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
+                                                 *pC, M4OSA_UInt8 uiClipNumber );
+static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
+                                               *pC, M4OSA_UInt8 uiClip1orClip2,
+                                               M4OSA_Int16 *pPCMdata,
+                                               M4OSA_UInt32 uiPCMsize );
+static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
+                                              *pC, M4OSA_Int16 *pPCMdata1,
+                                              M4OSA_Int16 *pPCMdata2,
+                                              M4OSA_UInt32 uiPCMsize );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditJumpMP3()
+ * @brief    One step of jumping processing for the MP3 clip.
+ * @note    On one step, the jump of several AU is done
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditJumpMP3( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
+    M4OSA_Int32 JumpCts;
+
+    JumpCts = pClip->iActualAudioBeginCut;
+
+    err = M4VSS3GPP_intClipJumpAudioAt(pClip, &JumpCts);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    if( JumpCts >= pClip->iActualAudioBeginCut )
+    {
+        pC->State = M4VSS3GPP_kEditState_MP3;
+
+        /**
+        * Update clip offset with the audio begin cut */
+        pClip->iAoffset = -JumpCts;
+
+        /**
+        * The audio is currently in reading mode */
+        pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
+    }
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepMP3()
+ * @brief    One step of audio processing for the MP3 clip
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepMP3( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
+
+    /**
+    * Copy the input AU to the output AU */
+    err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+        pClip->pAudioFramePtr, (M4OSA_UInt32)pClip->uiAudioFrameSize);
+
+    /**
+    * Read the next audio frame */
+    err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
+
+    if( M4OSA_ERR_IS_ERROR(err) )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
+            M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",    err);
+        return err;
+    }
+    else
+    {
+        /**
+        * Update current time (to=tc+T) */
+        pC->ewc.dATo =
+            ( pClip->iAudioFrameCts + pClip->iAoffset) / pClip->scale_audio;
+
+        if( (M4OSA_Int32)(pClip->iAudioFrameCts / pClip->scale_audio + 0.5)
+            >= pClip->iEndTime )
+        {
+            M4READER_Buffer mp3tagBuffer;
+
+            /**
+            * The duration is better respected if the first AU and last AU are both above
+            the cut time */
+            err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+                pClip->pAudioFramePtr,
+                (M4OSA_UInt32)pClip->uiAudioFrameSize);
+
+            /* The ID3v1 tag is always at the end of the mp3 file so the end of the cutting
+            process is waited */
+            /* before writing the metadata in the output file*/
+
+            /* Retrieve the data of the ID3v1 Tag */
+            err = pClip->ShellAPI.m_pReader->m_pFctGetOption(
+                pClip->pReaderContext, M4READER_kOptionID_Mp3Id3v1Tag,
+                (M4OSA_DataOption) &mp3tagBuffer);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intEditStepMP3: M4MP3R_getOption returns 0x%x",
+                    err);
+                return err;
+            }
+
+            /* Write the data of the ID3v1 Tag in the output file */
+            if( 0 != mp3tagBuffer.m_uiBufferSize )
+            {
+                err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+                    (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
+                /**
+                * Free before the error checking anyway */
+                M4OSA_free((M4OSA_MemAddr32)mp3tagBuffer.m_pData);
+
+                /**
+                * Error checking */
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepMP3:\
+                        pOsaFileWritPtr->writeData(ID3v1Tag) returns 0x%x",    err);
+                    return err;
+                }
+
+                mp3tagBuffer.m_uiBufferSize = 0;
+                mp3tagBuffer.m_pData = M4OSA_NULL;
+            }
+
+            /* The End Cut has been reached */
+            err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intEditStepMP3 : M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+
+        if( ( M4WAR_NO_MORE_AU == err) && (M4OSA_FALSE
+            == pC->bSupportSilence) ) /**< Reached end of clip */
+        {
+            err = M4VSS3GPP_intReachedEndOfAudio(
+                pC); /**< Clip done, do the next one */
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
+                    M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepMP3: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepAudio()
+ * @brief    One step of audio processing
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepAudio( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
+    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+    M4OSA_Time
+        frameTimeDelta; /**< Duration of the encoded (then written) data */
+    M4OSA_Bool bStopAudio;
+
+    /**
+    * Check if we reached end cut */
+    if( ( pC->ewc.dATo - pC->pC1->iAoffset / pC->pC1->scale_audio + 0.5)
+        >= pC->pC1->iEndTime )
+    {
+        /**
+        * Audio is done for this clip */
+        err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+        /* RC: to know when a file has been processed */
+        if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                err);
+        }
+
+        return err;
+    }
+
+    /**
+    * Check Audio Mode, depending on the current output CTS */
+    err = M4VSS3GPP_intCheckAudioMode(
+        pC); /**< This function change the pC->Astate variable! */
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intCheckAudioMode returns 0x%x!",
+            err);
+        return err;
+    }
+
+    M4OSA_TRACE2_3("  AUDIO step : dATo = %f  state = %d  offset = %ld",
+        pC->ewc.dATo, pC->Astate, pC->pC1->iAoffset);
+
+    bStopAudio = M4OSA_FALSE;
+
+    switch( pC->Astate )
+    {
+            /* _________________ */
+            /*|                 |*/
+            /*| READ_WRITE MODE |*/
+            /*|_________________|*/
+
+        case M4VSS3GPP_kEditAudioState_READ_WRITE:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio READ_WRITE");
+
+                /**
+                * Get the output AU to write into */
+                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio:\
+                        READ_WRITE: pWriterDataFcts->pStartAU returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Compute output audio CTS */
+                pC->ewc.WriterAudioAU.CTS =
+                    pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
+
+                /**
+                * BZZZ bug fix (read-write case):
+                * Replace the first AMR AU of the stream with a silence AU.
+                * It removes annoying "BZZZ" audio glitch.
+                * It is not needed if there is a begin cut.
+                * It is not needed for the first clip.
+                * Because of another bugfix (2005-03-24), the first AU written may be
+                * the second one which CTS is 20. Hence the cts<21 test.
+                * (the BZZZ effect occurs even with the second AU!) */
+                if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
+                    && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
+                    < (pC->ewc.iSilenceFrameDuration + 1)) )
+                {
+                    /**
+                    * Copy a silence AU to the output */
+                    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+                        (M4OSA_MemAddr8)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+                    M4OSA_TRACE2_0("A #### silence AU");
+                }
+                else if( (M4OSA_UInt32)pC->pC1->uiAudioFrameSize
+                    < pC->ewc.uiAudioMaxAuSize )
+                {
+                    /**
+                    * Copy the input AU to the output AU */
+                    pC->ewc.WriterAudioAU.size =
+                        (M4OSA_UInt32)pC->pC1->uiAudioFrameSize;
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+                        pC->pC1->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+                }
+                else
+                {
+                    M4OSA_TRACE1_2(
+                        "M4VSS3GPP_intEditStepAudio: READ_WRITE: AU size greater than MaxAuSize \
+                        (%d>%d)! returning M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE",
+                        pC->pC1->uiAudioFrameSize, pC->ewc.uiAudioMaxAuSize);
+                    return M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE;
+                }
+
+                /**
+                * This boolean is only used to fix the BZZ bug... */
+                pC->pC1->bFirstAuWritten = M4OSA_TRUE;
+
+                M4OSA_TRACE2_2("B ---- write : cts  = %ld [ 0x%x ]",
+                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                    pC->ewc.WriterAudioAU.size);
+
+                /**
+                * Write the AU */
+                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    /*11/12/2008 CR 3283 MMS use case for VideoArtist
+                    the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
+                    size is reached
+                    The editing is then finished,
+                     the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
+                    if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio:\
+                            READ_WRITE: pWriterDataFcts->pProcessAU returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Audio is now in read mode (there may be a "if(status!=READ)" here,
+                but it is removed for optimization) */
+                pC->pC1->Astatus = M4VSS3GPP_kClipStatus_READ;
+
+                /**
+                * Read the next audio frame */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                    pC->pC1->iAoffset / pC->pC1->scale_audio,
+                    pC->pC1->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: READ_WRITE:\
+                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+                else
+                {
+                    /**
+                    * Update current time (to=tc+T) */
+                    pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+                        / pC->pC1->scale_audio;
+
+                    if( ( M4WAR_NO_MORE_AU == err)
+                        && (M4OSA_FALSE == pC->bSupportSilence) )
+                    {
+                        /**
+                        * If output is other than AMR or AAC
+                        (i.e. EVRC,we can't write silence into it)
+                        * So we simply end here.*/
+                        bStopAudio = M4OSA_TRUE;
+                    }
+                }
+            }
+            break;
+
+            /* ____________________ */
+            /*|                    |*/
+            /*| DECODE_ENCODE MODE |*/
+            /*|____________________|*/
+
+        case M4VSS3GPP_kEditAudioState_DECODE_ENCODE:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio DECODE_ENCODE");
+
+                /**
+                * Get the output AU to write into */
+                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                        pWriterDataFcts->pStartAU returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * If we were reading the clip, we must jump a few AU backward to decode/encode
+                (without writing result) from that point. */
+                if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
+                {
+                    M4OSA_Int32 iTargetCts, iCurrentCts;
+
+                    if( 0
+                        != pC->pC1->
+                        iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning. */
+                    {
+                        /**
+                        * Jump a few AUs backward */
+                        iCurrentCts = pC->pC1->iAudioFrameCts;
+                        iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+                            * pC->ewc.iSilenceFrameDuration;
+
+                        if( iTargetCts < 0 )
+                        {
+                            iTargetCts = 0; /**< Sanity check */
+                        }
+
+                        err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+                                M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        err = M4VSS3GPP_intClipReadNextAudioFrame(
+                            pC->pC1); /**< read AU where we jumped */
+
+                        M4OSA_TRACE2_3("D .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                            pC->pC1->iAoffset / pC->pC1->scale_audio,
+                            pC->pC1->uiAudioFrameSize);
+
+                        if( M4OSA_ERR_IS_ERROR(err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+                                M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * Decode/encode up to the wanted position */
+                        while( pC->pC1->iAudioFrameCts < iCurrentCts )
+                        {
+                            err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch: \
+                                    M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+
+                            /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                            pEncInBuffer.pTableBuffer[0] =
+                                pC->pC1->AudioDecBufferOut.m_dataAddress;
+                            pEncInBuffer.pTableBufferSize[0] =
+                                pC->pC1->AudioDecBufferOut.m_bufferSize;
+                            pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                            pEncInBuffer.pTableBufferSize[1] = 0;
+
+                            /* Time in ms from data size, because it is PCM16 samples */
+                            frameTimeDelta =
+                                pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                                / pC->ewc.uiNbChannels;
+
+                            /**
+                            * Prepare output buffer */
+                            pEncOutBuffer.pTableBuffer[0] =
+                                (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                            pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                            M4OSA_TRACE2_0("E **** pre-encode");
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                            /*OMX Audio decoder used.
+                            * OMX Audio dec shell does internal buffering and hence does not return
+                            a PCM buffer for every decodeStep call.*
+                            * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                            if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                            {
+#endif
+                                /**
+                                * Encode the PCM audio */
+
+                                err =
+                                    pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                                    pC->ewc.pAudioEncCtxt,
+                                    &pEncInBuffer, &pEncOutBuffer);
+
+                                if( ( M4NO_ERROR != err)
+                                    && (M4WAR_NO_MORE_AU != err) )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intEditStepAudio():\
+                                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                                        err);
+                                    return err;
+                                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                            } //if(0 != pEncInBuffer.pTableBufferSize[0])
+
+#endif
+
+                            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                            M4OSA_TRACE2_3(
+                                "F .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                                pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                                pC->pC1->iAoffset / pC->pC1->scale_audio,
+                                pC->pC1->uiAudioFrameSize);
+
+                            if( M4OSA_ERR_IS_ERROR(err) )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+                                    M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+                        }
+                    }
+
+                    /**
+                    * Audio is now OK for decoding */
+                    pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
+                }
+
+                /**
+                * Decode the input audio */
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Apply the effect */
+                if( pC->iClip1ActiveEffect >= 0 )
+                {
+                    err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
+                        *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+                        pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                            M4VSS3GPP_intEndAudioEffect returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Compute output audio CTS */
+                pC->ewc.WriterAudioAU.CTS =
+                    pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
+
+                /* May happen with corrupted input files (which have stts entries not
+                multiple of SilenceFrameDuration) */
+                if( pC->ewc.WriterAudioAU.CTS < 0 )
+                {
+                    pC->ewc.WriterAudioAU.CTS = 0;
+                }
+
+                /**
+                * BZZZ bug fix (decode-encode case):
+                * (Yes, the Bzz bug may also occur when we re-encode. It doesn't
+                *  occur at the decode before the encode, but at the playback!)
+                * Replace the first AMR AU of the encoded stream with a silence AU.
+                * It removes annoying "BZZZ" audio glitch.
+                * It is not needed if there is a begin cut.
+                * It is not needed for the first clip.
+                * Because of another bugfix (2005-03-24), the first AU written may be
+                * the second one which CTS is 20. Hence the cts<21 test.
+                * (the BZZZ effect occurs even with the second AU!) */
+                if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
+                    && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
+                    < (pC->ewc.iSilenceFrameDuration + 1)) )
+                {
+                    /**
+                    * Copy a silence AMR AU to the output */
+                    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+                        (M4OSA_MemAddr8)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+                    M4OSA_TRACE2_0("G #### silence AU");
+                }
+                else
+                {
+                    /**
+                    * Encode the filtered PCM audio directly into the output AU */
+
+                    /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                    pEncInBuffer.pTableBuffer[0] =
+                        pC->pC1->AudioDecBufferOut.m_dataAddress;
+                    pEncInBuffer.pTableBufferSize[0] =
+                        pC->pC1->AudioDecBufferOut.m_bufferSize;
+                    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                    pEncInBuffer.pTableBufferSize[1] = 0;
+
+                    /* Time in ms from data size, because it is PCM16 samples */
+                    frameTimeDelta =
+                        pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                        / pC->ewc.uiNbChannels;
+
+                    /**
+                    * Prepare output buffer */
+                    pEncOutBuffer.pTableBuffer[0] =
+                        (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                    pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                    M4OSA_TRACE2_0("H ++++ encode AU");
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                    /*OMX Audio decoder used.
+                    * OMX Audio dec shell does internal buffering and hence does not return
+                    a PCM buffer for every decodeStep call.*
+                    * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                    if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                    {
+
+#endif
+
+                        /**
+                        * Encode the PCM audio */
+
+                        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                            pC->ewc.pAudioEncCtxt,
+                            &pEncInBuffer, &pEncOutBuffer);
+
+                        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio():\
+                                pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                                err);
+                            return err;
+                        }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    }
+
+#endif
+
+                    /**
+                    * Set AU size */
+
+                    pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+                        0]; /**< Get the size of encoded data */
+                }
+
+                /**
+                * This boolean is only used to fix the BZZ bug... */
+                pC->pC1->bFirstAuWritten = M4OSA_TRUE;
+
+                M4OSA_TRACE2_2("I ---- write : cts  = %ld [ 0x%x ]",
+                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                    pC->ewc.WriterAudioAU.size);
+
+                /**
+                * Write the AU */
+                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    /*11/12/2008 CR 3283 MMS use case for VideoArtist
+                    the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
+                     size is reached
+                    The editing is then finished,
+                     the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
+                    if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                            pWriterDataFcts->pProcessAU returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Read the next audio frame */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                M4OSA_TRACE2_3("J .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                    pC->pC1->iAoffset / pC->pC1->scale_audio,
+                    pC->pC1->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+                else
+                {
+                    /**
+                    * Update current time (to=tc+T) */
+                    pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+                        / pC->pC1->scale_audio;
+
+                    if( ( M4WAR_NO_MORE_AU == err)
+                        && (M4OSA_FALSE == pC->bSupportSilence) )
+                    {
+                        /**
+                        * If output is other than AMR or AAC
+                        (i.e. EVRC,we can't write silence into it)
+                        * So we simply end here.*/
+                        bStopAudio = M4OSA_TRUE;
+                    }
+                }
+            }
+            break;
+
+            /* _________________ */
+            /*|                 |*/
+            /*| TRANSITION MODE |*/
+            /*|_________________|*/
+
+        case M4VSS3GPP_kEditAudioState_TRANSITION:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio TRANSITION");
+
+                /**
+                * Get the output AU to write into */
+                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        pWriterDataFcts->pStartAU returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * If we were reading the clip, we must jump a few AU backward to decode/encode
+                (without writing result) from that point. */
+                if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
+                {
+                    M4OSA_Int32 iTargetCts, iCurrentCts;
+
+                    if( 0
+                        != pC->pC1->
+                        iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning.*/
+                    {
+                        /**
+                        * Jump a few AUs backward */
+                        iCurrentCts = pC->pC1->iAudioFrameCts;
+                        iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+                            * pC->ewc.iSilenceFrameDuration;
+
+                        if( iTargetCts < 0 )
+                        {
+                            iTargetCts = 0; /**< Sanity check */
+                        }
+
+                        err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        err = M4VSS3GPP_intClipReadNextAudioFrame(
+                            pC->pC1); /**< read AU where we jumped */
+
+                        M4OSA_TRACE2_3("K .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                            pC->pC1->iAoffset / pC->pC1->scale_audio,
+                            pC->pC1->uiAudioFrameSize);
+
+                        if( M4OSA_ERR_IS_ERROR(err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * Decode/encode up to the wanted position */
+                        while( pC->pC1->iAudioFrameCts < iCurrentCts )
+                        {
+                            err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                    M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+
+                            /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                            pEncInBuffer.pTableBuffer[0] =
+                                pC->pC1->AudioDecBufferOut.m_dataAddress;
+                            pEncInBuffer.pTableBufferSize[0] =
+                                pC->pC1->AudioDecBufferOut.m_bufferSize;
+                            pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                            pEncInBuffer.pTableBufferSize[1] = 0;
+
+                            /* Time in ms from data size, because it is PCM16 samples */
+                            frameTimeDelta =
+                                pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                                / pC->ewc.uiNbChannels;
+
+                            /**
+                            * Prepare output buffer */
+                            pEncOutBuffer.pTableBuffer[0] =
+                                (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                            pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                            M4OSA_TRACE2_0("L **** pre-encode");
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                            /*OMX Audio decoder used.
+                            * OMX Audio dec shell does internal buffering and hence does not return
+                            a PCM buffer for every decodeStep call.*
+                            * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                            if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                            {
+
+#endif
+                                /**
+                                * Encode the PCM audio */
+
+                                err =
+                                    pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                                    pC->ewc.pAudioEncCtxt,
+                                    &pEncInBuffer, &pEncOutBuffer);
+
+                                if( ( M4NO_ERROR != err)
+                                    && (M4WAR_NO_MORE_AU != err) )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intEditStepAudio():\
+                                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                                        err);
+                                    return err;
+                                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                            }
+
+#endif
+
+                            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                            M4OSA_TRACE2_3(
+                                "M .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                                pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                                pC->pC1->iAoffset / pC->pC1->scale_audio,
+                                pC->pC1->uiAudioFrameSize);
+
+                            if( M4OSA_ERR_IS_ERROR(err) )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                    M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+                        }
+                    }
+
+                    /**
+                    * Audio is now OK for decoding */
+                    pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
+                }
+
+                /**
+                * Decode the first input audio */
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame(C1) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Decode the second input audio */
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC2);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame(C2) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check both clips decoded the same amount of PCM samples */
+                if( pC->pC1->AudioDecBufferOut.m_bufferSize
+                    != pC->pC2->AudioDecBufferOut.m_bufferSize )
+                {
+                    M4OSA_TRACE1_2(
+                        "ERR : AudioTransition: both clips AU must have the same decoded\
+                        PCM size! pc1 size=0x%x, pC2 size = 0x%x",
+                        pC->pC1->AudioDecBufferOut.m_bufferSize,
+                        pC->pC2->AudioDecBufferOut.m_bufferSize);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                    /*OMX Audio decoder used.
+                    * OMX Audio dec shell does internal buffering and hence does not return
+                    a PCM buffer for every decodeStep call.*
+                    * So PCM buffer sizes might be 0 or different for clip1 and clip2.
+                    * So no need to return error in this case */
+
+                    M4OSA_TRACE1_2(
+                        "M4VSS3GPP_intEditStepAudio: , pc1 AudBuff size=0x%x,\
+                         pC2 AudBuff size = 0x%x",
+                        pC->pC1->AudioDecBufferOut.m_bufferSize,
+                        pC->pC2->AudioDecBufferOut.m_bufferSize);
+
+#else
+
+                    return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+
+#endif // M4VSS_SUPPORT_OMX_CODECS
+
+                }
+
+                /**
+                * Apply the audio effect on clip1 */
+                if( pC->iClip1ActiveEffect >= 0 )
+                {
+                    err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
+                        *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+                        pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                            M4VSS3GPP_intApplyAudioEffect(C1) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Apply the audio effect on clip2 */
+                if( pC->iClip2ActiveEffect >= 0 )
+                {
+                    err = M4VSS3GPP_intApplyAudioEffect(pC, 2, (M4OSA_Int16
+                        *)pC->pC2->AudioDecBufferOut.m_dataAddress,
+                        pC->pC2->AudioDecBufferOut.m_bufferSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                            M4VSS3GPP_intApplyAudioEffect(C2) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Apply the transition effect */
+                err = M4VSS3GPP_intAudioTransition(pC,
+                    (M4OSA_Int16 *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+                    (M4OSA_Int16 *)pC->pC2->AudioDecBufferOut.m_dataAddress,
+                    pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        M4VSS3GPP_intAudioTransition returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                pEncInBuffer.pTableBuffer[0] =
+                    pC->pC1->AudioDecBufferOut.m_dataAddress;
+                pEncInBuffer.pTableBufferSize[0] =
+                    pC->pC1->AudioDecBufferOut.m_bufferSize;
+                pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                pEncInBuffer.pTableBufferSize[1] = 0;
+
+                /* Time in ms from data size, because it is PCM16 samples */
+                frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                    / pC->ewc.uiNbChannels;
+
+                /**
+                * Prepare output buffer */
+                pEncOutBuffer.pTableBuffer[0] =
+                    (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                M4OSA_TRACE2_0("N **** blend AUs");
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                /*OMX Audio decoder used.
+                * OMX Audio dec shell does internal buffering and hence does not return
+                a PCM buffer for every decodeStep call.*
+                * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                {
+
+#endif
+
+                    /**
+                    * Encode the PCM audio */
+
+                    err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                        pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio():\
+                            pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                            err);
+                        return err;
+                    }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                }
+
+#endif
+
+                /**
+                * Set AU cts and size */
+
+                pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+                    0]; /**< Get the size of encoded data */
+                    pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+                    M4OSA_TRACE2_2("O ---- write : cts  = %ld [ 0x%x ]",
+                        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                        pC->ewc.WriterAudioAU.size);
+
+                    /**
+                    * Write the AU */
+                    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                        pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                        &pC->ewc.WriterAudioAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        /*11/12/2008 CR 3283 MMS use case for VideoArtist
+                        the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                         file size is reached
+                        The editing is then finished,the warning M4VSS3GPP_WAR_EDITING_DONE
+                        is returned*/
+                        if( M4WAR_WRITER_STOP_REQ == err )
+                        {
+                            M4OSA_TRACE1_0(
+                                "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+                            return M4VSS3GPP_WAR_EDITING_DONE;
+                        }
+                        else
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                                pWriterDataFcts->pProcessAU returns 0x%x!",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Read the next audio frame */
+                    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                    M4OSA_TRACE2_3("P .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                        pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                        pC->pC1->iAoffset / pC->pC1->scale_audio,
+                        pC->pC1->uiAudioFrameSize);
+
+                    if( M4OSA_ERR_IS_ERROR(err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                            M4VSS3GPP_intClipReadNextAudioFrame(C1) returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                    else
+                    {
+                        M4OSA_ERR secondaryError;
+
+                        /**
+                        * Update current time (to=tc+T) */
+                        pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+                            / pC->pC1->scale_audio;
+
+                        /**
+                        * Read the next audio frame in the second clip */
+                        secondaryError = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
+
+                        M4OSA_TRACE2_3("Q .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
+                            pC->pC2->iAoffset / pC->pC2->scale_audio,
+                            pC->pC2->uiAudioFrameSize);
+
+                        if( M4OSA_ERR_IS_ERROR(secondaryError) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                                M4VSS3GPP_intClipReadNextAudioFrame(C2) returns 0x%x!",
+                                secondaryError);
+                            return err;
+                        }
+
+                        if( ( ( M4WAR_NO_MORE_AU == err)
+                            || (M4WAR_NO_MORE_AU == secondaryError))
+                            && (M4OSA_FALSE == pC->bSupportSilence) )
+                        {
+                            /**
+                            * If output is other than AMR or AAC
+                              (i.e. EVRC,we can't write silence into it)
+                            * So we simply end here.*/
+                            bStopAudio = M4OSA_TRUE;
+                        }
+                    }
+            }
+            break;
+
+            /* ____________ */
+            /*|            |*/
+            /*| ERROR CASE |*/
+            /*|____________|*/
+
+        default:
+
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intEditStepAudio: invalid internal state (0x%x), \
+                returning M4VSS3GPP_ERR_INTERNAL_STATE",
+                pC->Astate);
+            return M4VSS3GPP_ERR_INTERNAL_STATE;
+    }
+
+    /**
+    * Check if we are forced to stop audio */
+    if( M4OSA_TRUE == bStopAudio )
+    {
+        /**
+        * Audio is done for this clip */
+        err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckAudioMode()
+ * @brief    Check which audio process mode we must use, depending on the output CTS.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
+                                             *pC )
+{
+    M4OSA_ERR err;
+    const M4OSA_Int32 TD = pC->pTransitionList[pC->
+        uiCurrentClip].uiTransitionDuration; /**< Transition duration */
+
+    const M4VSS3GPP_EditAudioState previousAstate = pC->Astate;
+
+    /**
+    * Check if Clip1 is on its begin cut, or in its begin effect or end effect zone */
+    M4VSS3GPP_intCheckAudioEffects(pC, 1);
+
+    /**
+    * Check if we are in the transition with next clip */
+    if( ( TD > 0) && ((M4OSA_Int32)(pC->ewc.dATo - pC->pC1->iAoffset
+        / pC->pC1->scale_audio + 0.5) >= (pC->pC1->iEndTime - TD)) )
+    {
+        /**
+        * We are in a transition */
+        pC->Astate = M4VSS3GPP_kEditAudioState_TRANSITION;
+        pC->bTransitionEffect = M4OSA_TRUE;
+
+        /**
+        * Do we enter the transition section ? */
+        if( M4VSS3GPP_kEditAudioState_TRANSITION != previousAstate )
+        {
+            /**
+            * Open second clip for transition, if not yet opened */
+            if( M4OSA_NULL == pC->pC2 )
+            {
+                err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
+                    &pC->pClipList[pC->uiCurrentClip + 1]);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intOpenClip() returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * In case of short transition and bad luck (...), there may be no video AU
+                * in the transition. In that case, the second clip has not been opened.
+                * So we must update the video offset here. */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                /**< Add current video output CTS to the clip offset */
+                pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+            }
+
+            /**
+            * Add current audio output CTS to the clip offset
+            * (video offset has already been set when doing the video transition) */
+            pC->pC2->iAoffset +=
+                (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+            /**
+            * 2005-03-24: BugFix for audio-video synchro:
+            * There may be a portion of the duration of an audio AU of desynchro at each assembly.
+            * It leads to an audible desynchro when there are a lot of clips assembled.
+            * This bug fix allows to resynch the audio track when the delta is higher
+            * than one audio AU duration.
+            * We Step one AU in the second clip and we change the audio offset accordingly. */
+            if( ( pC->pC2->iAoffset
+                - (M4OSA_Int32)(pC->pC2->iVoffset *pC->pC2->scale_audio + 0.5))
+                    > pC->ewc.iSilenceFrameDuration )
+            {
+                /**
+                * Advance one AMR frame */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
+
+                M4OSA_TRACE2_3("Z .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
+                    pC->pC2->iAoffset / pC->pC2->scale_audio,
+                    pC->pC2->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCheckAudioMode:\
+                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+                /**
+                * Update audio offset accordingly*/
+                pC->pC2->iAoffset -= pC->ewc.iSilenceFrameDuration;
+            }
+        }
+
+        /**
+        * Check begin and end effects for clip2 */
+        M4VSS3GPP_intCheckAudioEffects(pC, 2);
+    }
+    else
+    {
+        /**
+        * We are not in a transition */
+        pC->bTransitionEffect = M4OSA_FALSE;
+
+        /**
+        * Check if current mode is Read/Write or Decode/Encode */
+        if( pC->iClip1ActiveEffect >= 0 )
+        {
+            pC->Astate = M4VSS3GPP_kEditAudioState_DECODE_ENCODE;
+        }
+        else
+        {
+            pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+        }
+    }
+
+    /**
+    * Check if we create/destroy an encoder */
+    if( ( M4VSS3GPP_kEditAudioState_READ_WRITE == previousAstate)
+        && /**< read mode */
+        (M4VSS3GPP_kEditAudioState_READ_WRITE != pC->Astate) ) /**< encode mode */
+    {
+        M4OSA_UInt32 uiAudioBitrate;
+
+        /* Compute max bitrate depending on input files bitrates and transitions */
+        if( pC->Astate == M4VSS3GPP_kEditAudioState_TRANSITION )
+        {
+            /* Max of the two blended files */
+            if( pC->pC1->pSettings->ClipProperties.uiAudioBitrate
+                > pC->pC2->pSettings->ClipProperties.uiAudioBitrate )
+                uiAudioBitrate =
+                pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
+            else
+                uiAudioBitrate =
+                pC->pC2->pSettings->ClipProperties.uiAudioBitrate;
+        }
+        else
+        {
+            /* Same as input file */
+            uiAudioBitrate = pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
+        }
+
+        /**
+        * Create the encoder */
+        err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
+            uiAudioBitrate);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intResetAudioEncoder() returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCheckAudioMode(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intCheckAudioEffects()
+ * @brief    Check which audio effect must be applied at the current time
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
+                                                 *pC, M4OSA_UInt8 uiClipNumber )
+{
+    M4OSA_UInt8 uiClipIndex;
+    M4OSA_UInt8 uiFxIndex;
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4OSA_Int32 BC, EC;
+    M4OSA_Int8 *piClipActiveEffect;
+    M4OSA_Int32 t;
+
+    if( 1 == uiClipNumber )
+    {
+        uiClipIndex = pC->uiCurrentClip;
+        pClip = pC->pC1;
+        piClipActiveEffect = &(pC->iClip1ActiveEffect);
+    }
+    else /**< (2 == uiClipNumber) */
+    {
+        uiClipIndex = pC->uiCurrentClip + 1;
+        pClip = pC->pC2;
+        piClipActiveEffect = &(pC->iClip2ActiveEffect);
+    }
+
+    /**
+    * Shortcuts for code readability */
+    BC = pClip->iActualAudioBeginCut;
+    EC = pClip->iEndTime;
+
+    /**
+    Change the absolut time to clip related time
+     RC t = (M4OSA_Int32)(pC->ewc.dATo - pClip->iAoffset/pClip->scale_audio + 0.5);
+    < rounding */;
+    t = (M4OSA_Int32)(pC->ewc.dATo/*- pClip->iAoffset/pClip->scale_audio*/
+        + 0.5); /**< rounding */
+    ;
+
+    /**
+    * Default: no effect active */
+    *piClipActiveEffect = -1;
+
+    /**
+    * Check the three effects */
+    // RC    for (uiFxIndex=0; uiFxIndex<pC->pClipList[uiClipIndex].nbEffects; uiFxIndex++)
+    for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
+    {
+        /** Shortcut, reverse order because of priority between effects
+        ( EndEffect always clean ) */
+        pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
+
+        if( M4VSS3GPP_kAudioEffectType_None != pFx->AudioEffectType )
+        {
+            /**
+            * Check if there is actually a video effect */
+            if( ( t >= (M4OSA_Int32)(/*BC +*/pFx->uiStartTime))
+                && /**< Are we after the start time of the effect? */
+                (t < (M4OSA_Int32)(/*BC +*/pFx->uiStartTime + pFx->
+                uiDuration)) ) /**< Are we into the effect duration? */
+            {
+                /**
+                * Set the active effect */
+                *piClipActiveEffect = pC->nbEffects - 1 - uiFxIndex;
+
+                /**
+                * The first effect has the highest priority, then the second one,
+                  then the thirs one.
+                * Hence, as soon as we found an active effect, we can get out of this loop */
+                uiFxIndex = pC->nbEffects; /** get out of the for loop */
+            }
+            /**
+            * Bugfix: The duration of the end effect has been set according to the
+                      announced clip duration.
+            * If the announced duration is smaller than the real one, the end effect
+                      won't be applied at
+            * the very end of the clip. To solve this issue we force the end effect. */
+#if 0
+
+            else if( ( M4VSS3GPP_kEffectKind_End == pFx->EffectKind)
+                && (t >= (M4OSA_Int32)(BC + pFx->uiStartTime)) )
+            {
+                /**
+                * Set the active effect */
+                *piClipActiveEffect =
+                    pC->pClipList[uiClipIndex].nbEffects - 1 - uiFxIndex;
+
+                /**
+                * The third effect has the highest priority, then the second one,
+                   then the first one.
+                * Hence, as soon as we found an active effect, we can get out of this loop */
+                uiFxIndex = pC->
+                    pClipList[
+                        uiClipIndex].nbEffects; /** get out of the for loop */
+            }
+
+#endif                                                    /* RC */
+
+        }
+    }
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intApplyAudioEffect()
+ * @brief    Apply audio effect to pPCMdata
+ * @param   pC            (IN/OUT) Internal edit context
+ * @param   uiClip1orClip2    (IN/OUT) 1 for first clip, 2 for second clip
+ * @param    pPCMdata    (IN/OUT) Input and Output PCM audio data
+ * @param    uiPCMsize    (IN)     Size of pPCMdata
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
+                                               *pC, M4OSA_UInt8 uiClip1orClip2,
+                                               M4OSA_Int16 *pPCMdata,
+                                               M4OSA_UInt32 uiPCMsize )
+{
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_ClipSettings *pClipSettings;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4OSA_Int32
+        i32sample; /**< we will cast each Int16 sample into this Int32 variable */
+    M4OSA_Int32 iPos;
+    M4OSA_Int32 iDur;
+
+    M4OSA_DEBUG_IF2(( 1 != uiClip1orClip2) && (2 != uiClip1orClip2),
+        M4ERR_PARAMETER,
+        "M4VSS3GPP_intBeginAudioEffect: uiClip1orClip2 invalid");
+
+    if( 1 == uiClip1orClip2 )
+    {
+        pClip = pC->pC1;
+        pClipSettings = &(pC->pClipList[pC->
+            uiCurrentClip]); /**< Get a shortcut to the clip settings */
+        // RC        pFx = &(pClipSettings->Effects[pC->iClip1ActiveEffect]);/**< Get a shortcut
+        //                                                                to the active effect */
+        pFx = &(pC->
+            pEffectsList[pC->
+            iClip1ActiveEffect]); /**< Get a shortcut to the active effect */
+        M4OSA_DEBUG_IF2(( pC->iClip1ActiveEffect < 0)
+            || (pC->iClip1ActiveEffect > 2), M4ERR_PARAMETER,
+            "M4VSS3GPP_intApplyAudioEffect: iClip1ActiveEffect invalid");
+    }
+    else /**< if (2==uiClip1orClip2) */
+    {
+        pClip = pC->pC2;
+        pClipSettings = &(pC->pClipList[pC->uiCurrentClip
+            + 1]); /**< Get a shortcut to the clip settings */
+        // RC        pFx = &(pClipSettings->Effects[pC->iClip2ActiveEffect]);/**< Get a shortcut
+        //                                                                to the active effect */
+        pFx = &(pC->
+            pEffectsList[pC->
+            iClip2ActiveEffect]); /**< Get a shortcut to the active effect */
+        M4OSA_DEBUG_IF2(( pC->iClip2ActiveEffect < 0)
+            || (pC->iClip2ActiveEffect > 2), M4ERR_PARAMETER,
+            "M4VSS3GPP_intApplyAudioEffect: iClip2ActiveEffect invalid");
+    }
+
+    iDur = (M4OSA_Int32)pFx->uiDuration;
+
+    /**
+    * Compute how far from the beginning of the effect we are, in clip-base time.
+    * It is done with integers because the offset and begin cut have been rounded already. */
+    iPos =
+        (M4OSA_Int32)(pC->ewc.dATo + 0.5 - pClip->iAoffset / pClip->scale_audio)
+        - pClip->iActualAudioBeginCut - pFx->uiStartTime;
+
+    /**
+    * Sanity check */
+    if( iPos > iDur )
+    {
+        iPos = iDur;
+    }
+    else if( iPos < 0 )
+    {
+        iPos = 0;
+    }
+
+    /**
+    * At this point, iPos is the effect progress, in a 0 to iDur base */
+    switch( pFx->AudioEffectType )
+    {
+        case M4VSS3GPP_kAudioEffectType_FadeIn:
+
+            /**
+            * Original samples are signed 16bits.
+            * We convert it to signed 32bits and multiply it by iPos.
+            * So we must assure that iPos is not higher that 16bits max.
+            * iPos max value is iDur, so we test iDur. */
+            while( iDur > PWR_FXP_FRACT_MAX )
+            {
+                iDur >>=
+                    2; /**< divide by 2 would be more logical (instead of 4),
+                       but we have enough dynamic..) */
+                iPos >>= 2; /**< idem */
+            }
+
+            /**
+            * From buffer size (bytes) to number of sample (int16): divide by two */
+            uiPCMsize >>= 1;
+
+            /**
+            * Loop on samples */
+            while( uiPCMsize-- > 0 ) /**< decrementing to optimize */
+            {
+                i32sample = *pPCMdata;
+                i32sample *= iPos;
+                i32sample /= iDur;
+                *pPCMdata++ = (M4OSA_Int16)i32sample;
+            }
+
+            break;
+
+        case M4VSS3GPP_kAudioEffectType_FadeOut:
+
+            /**
+            * switch from 0->Dur to Dur->0 in order to do fadeOUT instead of fadeIN */
+            iPos = iDur - iPos;
+
+            /**
+            * Original samples are signed 16bits.
+            * We convert it to signed 32bits and multiply it by iPos.
+            * So we must assure that iPos is not higher that 16bits max.
+            * iPos max value is iDur, so we test iDur. */
+            while( iDur > PWR_FXP_FRACT_MAX )
+            {
+                iDur >>=
+                    2; /**< divide by 2 would be more logical (instead of 4),
+                       but we have enough dynamic..) */
+                iPos >>= 2; /**< idem */
+            }
+
+            /**
+            * From buffer size (bytes) to number of sample (int16): divide by two */
+            uiPCMsize >>= 1;
+
+            /**
+            * Loop on samples, apply the fade factor on each */
+            while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
+            {
+                i32sample = *pPCMdata;
+                i32sample *= iPos;
+                i32sample /= iDur;
+                *pPCMdata++ = (M4OSA_Int16)i32sample;
+            }
+
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intApplyAudioEffect: unknown audio effect type (0x%x),\
+                returning M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE",
+                pFx->AudioEffectType);
+            return M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intApplyAudioEffect: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioTransition()
+ * @brief    Apply transition effect to two PCM buffer
+ * @note    The result of the transition is put in the first buffer.
+ *          I know it's not beautiful, but it fits my current needs, and it's efficient!
+ *          So why bother with a third output buffer?
+ * @param   pC            (IN/OUT) Internal edit context
+ * @param    pPCMdata1    (IN/OUT) First input and Output PCM audio data
+ * @param    pPCMdata2    (IN) Second input PCM audio data
+ * @param    uiPCMsize    (IN) Size of both PCM buffers
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
+                                              *pC, M4OSA_Int16 *pPCMdata1,
+                                              M4OSA_Int16 *pPCMdata2,
+                                              M4OSA_UInt32 uiPCMsize )
+{
+    M4OSA_Int32 i32sample1,
+        i32sample2; /**< we will cast each Int16 sample into this Int32 variable */
+    M4OSA_Int32 iPos1, iPos2;
+    M4OSA_Int32 iDur = (M4OSA_Int32)pC->
+        pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+    /**
+    * Compute how far from the end cut we are, in clip-base time.
+    * It is done with integers because the offset and begin cut have been rounded already. */
+    iPos1 = pC->pC1->iEndTime - (M4OSA_Int32)(pC->ewc.dATo
+        + 0.5 - pC->pC1->iAoffset / pC->pC1->scale_audio);
+
+    /**
+    * Sanity check */
+    if( iPos1 > iDur )
+    {
+        iPos1 = iDur;
+    }
+    else if( iPos1 < 0 )
+    {
+        iPos1 = 0;
+    }
+
+    /**
+    * Position of second clip in the transition */
+    iPos2 = iDur - iPos1;
+
+    /**
+    * At this point, iPos2 is the transition progress, in a 0 to iDur base.
+    * iPos1 is the transition progress, in a iDUr to 0 base. */
+    switch( pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType )
+    {
+        case M4VSS3GPP_kAudioTransitionType_CrossFade:
+
+            /**
+            * Original samples are signed 16bits.
+            * We convert it to signed 32bits and multiply it by iPos.
+            * So we must assure that iPos is not higher that 16bits max.
+            * iPos max value is iDur, so we test iDur. */
+            while( iDur > PWR_FXP_FRACT_MAX )
+            {
+                iDur >>=
+                    2; /**< divide by 2 would be more logical (instead of 4),
+                       but we have enough dynamic..) */
+                iPos1 >>= 2; /**< idem */
+                iPos2 >>= 2; /**< idem */
+            }
+
+            /**
+            * From buffer size (bytes) to number of sample (int16): divide by two */
+            uiPCMsize >>= 1;
+
+            /**
+            * Loop on samples, apply the fade factor on each */
+            while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
+            {
+                i32sample1 = *pPCMdata1; /**< Get clip1 sample */
+                i32sample1 *= iPos1;     /**< multiply by fade numerator */
+                i32sample1 /= iDur;      /**< divide by fade denominator */
+
+                i32sample2 = *pPCMdata2; /**< Get clip2 sample */
+                i32sample2 *= iPos2;     /**< multiply by fade numerator */
+                i32sample2 /= iDur;      /**< divide by fade denominator */
+
+                *pPCMdata1++ = (M4OSA_Int16)(i32sample1
+                    + i32sample2); /**< mix the two samples */
+                pPCMdata2++; /**< don't forget to increment the second buffer */
+            }
+            break;
+
+        case M4VSS3GPP_kAudioTransitionType_None:
+            /**
+            * This is a stupid-non optimized version of the None transition...
+            * We copy the PCM frames */
+            if( iPos1 < (iDur >> 1) ) /**< second half of transition */
+            {
+                /**
+                * Copy the input PCM to the output buffer */
+                M4OSA_memcpy((M4OSA_MemAddr8)pPCMdata1,
+                    (M4OSA_MemAddr8)pPCMdata2, uiPCMsize);
+            }
+            /**
+            * the output must be put in the first buffer.
+            * For the first half of the non-transition it's already the case!
+            * So we have nothing to do here...
+            */
+
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioTransition: unknown transition type (0x%x),\
+                returning M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE",
+                pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType);
+            return M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioTransition: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder()
+ * @brief    Reset the audio encoder (Create it if needed)
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder( M4VSS3GPP_EncodeWriteContext *pC_ewc,
+                                          M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+                                          M4OSA_UInt32 uiAudioBitrate )
+{
+    M4OSA_ERR err;
+
+    /**
+    * If an encoder already exist, we destroy it */
+    if( M4OSA_NULL != pC_ewc->pAudioEncCtxt )
+    {
+        err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctClose(
+            pC_ewc->pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctCleanUp(
+            pC_ewc->pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intResetAudioEncoder:\
+                pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",    err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC_ewc->pAudioEncCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Creates a new encoder  */
+    switch( pC_ewc->AudioStreamType )
+    {
+            //EVRC
+            //        case M4SYS_kEVRC:
+            //
+            //            err = M4VSS3GPP_setCurrentAudioEncoder(&pC->ShellAPI,
+            //                                                   pC_ewc->AudioStreamType);
+            //            M4ERR_CHECK_RETURN(err);
+            //
+            //            pC_ewc->AudioEncParams.Format = M4ENCODER_kEVRC;
+            //            pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+            //            pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+            //            pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_EVRC_DEFAULT_BITRATE;
+            //            break;
+
+        case M4SYS_kAMR:
+
+            err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
+                pC_ewc->AudioStreamType);
+            M4ERR_CHECK_RETURN(err);
+
+            pC_ewc->AudioEncParams.Format = M4ENCODER_kAMRNB;
+            pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+            pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+            pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
+            pC_ewc->AudioEncParams.SpecifParam.AmrSID = M4ENCODER_kAmrNoSID;
+            break;
+
+        case M4SYS_kAAC:
+
+            err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
+                pC_ewc->AudioStreamType);
+            M4ERR_CHECK_RETURN(err);
+
+            pC_ewc->AudioEncParams.Format = M4ENCODER_kAAC;
+
+            switch( pC_ewc->uiSamplingFrequency )
+            {
+                case 8000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+                    break;
+
+                case 16000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+                    break;
+
+                case 22050:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
+                    break;
+
+                case 24000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
+                    break;
+
+                case 32000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
+                    break;
+
+                case 44100:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
+                    break;
+
+                case 48000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
+                    break;
+
+                default:
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCreateAudioEncoder: invalid input AAC sampling frequency\
+                        (%d Hz), returning M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED",
+                        pC_ewc->uiSamplingFrequency);
+                    return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
+            }
+            pC_ewc->AudioEncParams.ChannelNum = (pC_ewc->uiNbChannels == 1)
+                ? M4ENCODER_kMono : M4ENCODER_kStereo;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.Regulation =
+                M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
+            /* unused */
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bIS = M4OSA_FALSE;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bMS = M4OSA_FALSE;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bPNS = M4OSA_FALSE;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bTNS = M4OSA_FALSE;
+            /* TODO change into highspeed asap */
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
+                M4OSA_FALSE;
+
+            /* Quantify value (ceil one) */
+            if( uiAudioBitrate <= 16000 )
+                pC_ewc->AudioEncParams.Bitrate = 16000;
+
+            else if( uiAudioBitrate <= 24000 )
+                pC_ewc->AudioEncParams.Bitrate = 24000;
+
+            else if( uiAudioBitrate <= 32000 )
+                pC_ewc->AudioEncParams.Bitrate = 32000;
+
+            else if( uiAudioBitrate <= 48000 )
+                pC_ewc->AudioEncParams.Bitrate = 48000;
+
+            else if( uiAudioBitrate <= 64000 )
+                pC_ewc->AudioEncParams.Bitrate = 64000;
+
+            else
+                pC_ewc->AudioEncParams.Bitrate = 96000;
+
+            /* Special requirement of our encoder */
+            if( ( pC_ewc->uiNbChannels == 2)
+                && (pC_ewc->AudioEncParams.Bitrate < 32000) )
+                pC_ewc->AudioEncParams.Bitrate = 32000;
+
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intResetAudioEncoder: Undefined output audio format (%d),\
+                returning M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT",
+                pC_ewc->AudioStreamType);
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+    }
+
+    /* Initialise the audio encoder */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_intResetAudioEncoder:\
+        pAudioEncoderGlobalFcts->pFctInit called with userdata 0x%x",
+        pC_ShellAPI->pCurrentAudioEncoderUserData);
+    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
+        pC_ShellAPI->pCurrentAudioEncoderUserData);
+
+#else
+
+    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
+        M4OSA_NULL /* no HW encoder */);
+
+#endif
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    /* Open the audio encoder */
+    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctOpen(pC_ewc->pAudioEncCtxt,
+        &pC_ewc->AudioEncParams, &pC_ewc->pAudioEncDSI,
+        M4OSA_NULL /* no grabbing */);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intResetAudioEncoder: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
new file mode 100755
index 0000000..270453f
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
@@ -0,0 +1,2554 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_EditVideo.c
+ * @brief    Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our header */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h"  /**< OSAL debug management */
+
+/**
+ * component includes */
+#include "M4VFL_transition.h" /**< video effects */
+
+/*for transition behaviour*/
+#include <math.h>
+
+/************************************************************************/
+/* Static local functions                                               */
+/************************************************************************/
+
+static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
+    M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_Void
+M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
+                               M4OSA_UInt8 uiClipNumber );
+static M4OSA_ERR
+M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC,/*M4OSA_UInt8 uiClip1orClip2,*/
+                              M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut );
+static M4OSA_ERR
+M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
+                             M4VIFI_ImagePlane *pPlaneOut );
+
+static M4OSA_Void
+M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
+                            M4SYS_AccessUnit *pAU );
+static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
+                                                  M4OSA_UInt8 uiCts );
+static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 uiCtsSec );
+static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 *pCtsSec );
+static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
+                                             M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepVideo()
+ * @brief    One step of video processing
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iCts, iNextCts;
+    M4ENCODER_FrameMode FrameMode;
+    M4OSA_Bool bSkipFrame;
+    M4OSA_UInt16 offset;
+
+    /**
+    * Check if we reached end cut */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    if ( ((M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset) >= pC->pC1->iEndTime )
+    {
+        /* Re-adjust video to precise cut time */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+
+        /**
+        * Video is done for this clip */
+        err = M4VSS3GPP_intReachedEndOfVideo(pC);
+
+        /* RC: to know when a file has been processed */
+        if (M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP)
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intReachedEndOfVideo returns 0x%x",
+                err);
+        }
+
+        return err;
+    }
+
+    /* Don't change the states if we are in decodeUpTo() */
+    if ( (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
+        && (( pC->pC2 == M4OSA_NULL)
+        || (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)) )
+    {
+        /**
+        * Check Video Mode, depending on the current output CTS */
+        err = M4VSS3GPP_intCheckVideoMode(
+            pC); /**< This function change the pC->Vstate variable! */
+
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intCheckVideoMode returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+
+    switch( pC->Vstate )
+    {
+        /* _________________ */
+        /*|                 |*/
+        /*| READ_WRITE MODE |*/
+        /*|_________________|*/
+
+        case M4VSS3GPP_kEditVideoState_READ_WRITE:
+        case M4VSS3GPP_kEditVideoState_AFTER_CUT:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo READ_WRITE");
+
+                bSkipFrame = M4OSA_FALSE;
+
+                /**
+                * If we were decoding the clip, we must jump to be sure
+                * to get to the good position. */
+                if( M4VSS3GPP_kClipStatus_READ != pC->pC1->Vstatus )
+                {
+                    /**
+                    * Jump to target video time (tc = to-T) */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                iCts = (M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset;
+                    err = pC->pC1->ShellAPI.m_pReader->m_pFctJump(
+                        pC->pC1->pReaderContext,
+                        (M4_StreamHandler *)pC->pC1->pVideoStream, &iCts);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo:\
+                            READ_WRITE: m_pReader->m_pFctJump(V1) returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                        pC->pC1->pReaderContext,
+                        (M4_StreamHandler *)pC->pC1->pVideoStream,
+                        &pC->pC1->VideoAU);
+
+                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo:\
+                            READ_WRITE: m_pReader->m_pFctGetNextAu returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    M4OSA_TRACE2_3("A .... read  : cts  = %.0f + %ld [ 0x%x ]",
+                        pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+                        pC->pC1->VideoAU.m_size);
+
+                    /* This frame has been already written in BEGIN CUT step -> skip it */
+                    if( pC->pC1->VideoAU.m_CTS == iCts
+                        && pC->pC1->iVideoRenderCts >= iCts )
+                    {
+                        bSkipFrame = M4OSA_TRUE;
+                    }
+                }
+
+                /* This frame has been already written in BEGIN CUT step -> skip it */
+                if( ( pC->Vstate == M4VSS3GPP_kEditVideoState_AFTER_CUT)
+                    && (pC->pC1->VideoAU.m_CTS
+                    + pC->pC1->iVoffset <= pC->ewc.WriterVideoAU.CTS) )
+                {
+                    bSkipFrame = M4OSA_TRUE;
+                }
+
+                /**
+                * Remember the clip reading state */
+                pC->pC1->Vstatus = M4VSS3GPP_kClipStatus_READ;
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                // Rounding is to compensate reader imprecision (m_CTS is actually an integer)
+                iCts = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pC->pC1->iVoffset - 1;
+                iNextCts = iCts + ((M4OSA_Int32)pC->dOutputFrameDuration) + 1;
+                /* Avoid to write a last frame of duration 0 */
+                if( iNextCts > pC->pC1->iEndTime )
+                    iNextCts = pC->pC1->iEndTime;
+
+                /**
+                * If the AU is good to be written, write it, else just skip it */
+                if( ( M4OSA_FALSE == bSkipFrame)
+                    && (( pC->pC1->VideoAU.m_CTS >= iCts)
+                    && (pC->pC1->VideoAU.m_CTS < iNextCts)
+                    && (pC->pC1->VideoAU.m_size > 0)) )
+                {
+                    /**
+                    * Get the output AU to write into */
+                    err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                        pC->ewc.p3gpWriterContext,
+                        M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
+                        &pC->ewc.WriterVideoAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                            pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Copy the input AU to the output AU */
+                    pC->ewc.WriterVideoAU.attribute = pC->pC1->VideoAU.m_attribute;
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    pC->ewc.WriterVideoAU.CTS = (M4OSA_Time)pC->pC1->VideoAU.m_CTS +
+                        (M4OSA_Time)pC->pC1->iVoffset;
+                    pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+                    offset = 0;
+                    /* for h.264 stream do not read the 1st 4 bytes as they are header
+                     indicators */
+                    if( pC->pC1->pVideoStream->m_basicProperties.m_streamType
+                        == M4DA_StreamTypeVideoMpeg4Avc )
+                        offset = 4;
+
+                    pC->ewc.WriterVideoAU.size = pC->pC1->VideoAU.m_size - offset;
+                    if( pC->ewc.WriterVideoAU.size > pC->ewc.uiVideoMaxAuSize )
+                    {
+                        M4OSA_TRACE1_2(
+                            "M4VSS3GPP_intEditStepVideo: READ_WRITE: AU size greater than\
+                             MaxAuSize (%d>%d)! returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
+                            pC->ewc.WriterVideoAU.size, pC->ewc.uiVideoMaxAuSize);
+                        return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
+                    }
+
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterVideoAU.dataAddress,
+                        (pC->pC1->VideoAU.m_dataAddress + offset),
+                        (pC->ewc.WriterVideoAU.size));
+
+                    /**
+                    * Update time info for the Counter Time System to be equal to the bit
+                    -stream time*/
+                    M4VSS3GPP_intUpdateTimeInfo(pC, &pC->ewc.WriterVideoAU);
+                    M4OSA_TRACE2_2("B ---- write : cts  = %lu [ 0x%x ]",
+                        pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
+
+                    /**
+                    * Write the AU */
+                    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                        pC->ewc.p3gpWriterContext,
+                        M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
+                        &pC->ewc.WriterVideoAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                         file size is reached
+                        The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
+                        is returned*/
+                        if( M4WAR_WRITER_STOP_REQ == err )
+                        {
+                            M4OSA_TRACE1_0(
+                                "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+                            return M4VSS3GPP_WAR_EDITING_DONE;
+                        }
+                        else
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                                pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Read next AU for next step */
+                    err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                        pC->pC1->pReaderContext,
+                        (M4_StreamHandler *)pC->pC1->pVideoStream,
+                        &pC->pC1->VideoAU);
+
+                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                            m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %ld [ 0x%x ]",
+                        pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+                        pC->pC1->VideoAU.m_size);
+                }
+                else
+                {
+                    /**
+                    * Decide wether to read or to increment time increment */
+                    if( ( pC->pC1->VideoAU.m_size == 0)
+                        || (pC->pC1->VideoAU.m_CTS >= iNextCts) )
+                    {
+                        /*Increment time by the encoding period (NO_MORE_AU or reader in advance */
+                       // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                       pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+
+                        /* Switch (from AFTER_CUT) to normal mode because time is
+                        no more frozen */
+                        pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+                    }
+                    else
+                    {
+                        /* In other cases (reader late), just let the reader catch up
+                         pC->ewc.dVTo */
+                        err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                            pC->pC1->pReaderContext,
+                            (M4_StreamHandler *)pC->pC1->pVideoStream,
+                            &pC->pC1->VideoAU);
+
+                        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                                m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        M4OSA_TRACE2_3("D .... read  : cts  = %.0f + %ld [ 0x%x ]",
+                            pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+                            pC->pC1->VideoAU.m_size);
+                    }
+                }
+            }
+            break;
+
+            /* ____________________ */
+            /*|                    |*/
+            /*| DECODE_ENCODE MODE |*/
+            /*|   BEGIN_CUT MODE   |*/
+            /*|____________________|*/
+
+        case M4VSS3GPP_kEditVideoState_DECODE_ENCODE:
+        case M4VSS3GPP_kEditVideoState_BEGIN_CUT:
+            {
+                M4OSA_TRACE3_0(
+                    "M4VSS3GPP_intEditStepVideo DECODE_ENCODE / BEGIN_CUT");
+
+                /**
+                * Decode the video up to the target time
+                (will jump to the previous RAP if needed ) */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, (M4OSA_Int32)pC->ewc.dInputVidCts);
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                        M4VSS3GPP_intDecodeVideoUpToCts returns err=0x%x",
+                        err);
+                    return err;
+                }
+
+                /* If the decoding is not completed, do one more step with time frozen */
+                if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
+                {
+                    return M4NO_ERROR;
+                }
+
+                /**
+                * Reset the video pre-processing error before calling the encoder */
+                pC->ewc.VppError = M4NO_ERROR;
+
+                M4OSA_TRACE2_0("E ++++ encode AU");
+
+                /**
+                * Encode the frame(rendering,filtering and writing will be done
+                 in encoder callbacks)*/
+                if( pC->Vstate == M4VSS3GPP_kEditVideoState_BEGIN_CUT )
+                    FrameMode = M4ENCODER_kIFrame;
+                else
+                    FrameMode = M4ENCODER_kNormalFrame;
+
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
+                pC->ewc.dInputVidCts, FrameMode);
+                /**
+                * Check if we had a VPP error... */
+                if( M4NO_ERROR != pC->ewc.VppError )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                        pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
+                        pC->ewc.VppError);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
+                    {
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+                        return pC->ewc.VppError;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    }
+
+#endif                                   //M4VSS_SUPPORT_OMX_CODECS
+
+                }
+                else if( M4NO_ERROR != err ) /**< ...or an encoder error */
+                {
+                    if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                            returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+                        return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
+                    }
+                    /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                    file size is reached
+                    The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
+                    is returned*/
+                    else if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                            pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Increment time by the encoding period (for begin cut, do not increment to not
+                loose P-frames) */
+                if( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate )
+                {
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+                }
+            }
+            break;
+
+            /* _________________ */
+            /*|                 |*/
+            /*| TRANSITION MODE |*/
+            /*|_________________|*/
+
+        case M4VSS3GPP_kEditVideoState_TRANSITION:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo TRANSITION");
+
+                /* Don't decode more than needed */
+                if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
+                    && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus)) )
+                {
+                    /**
+                    * Decode the clip1 video up to the target time
+                    (will jump to the previous RAP if needed */
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1,
+                         (M4OSA_Int32)pC->ewc.dInputVidCts);
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            M4VSS3GPP_intDecodeVideoUpToCts(C1) returns err=0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /* If the decoding is not completed, do one more step with time frozen */
+                    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
+                    {
+                        return M4NO_ERROR;
+                    }
+                }
+
+                /* Don't decode more than needed */
+                if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)
+                    && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus)) )
+                {
+                    /**
+                    * Decode the clip2 video up to the target time
+                        (will jump to the previous RAP if needed) */
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC2,
+                         (M4OSA_Int32)pC->ewc.dInputVidCts);
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            M4VSS3GPP_intDecodeVideoUpToCts(C2) returns err=0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /* If the decoding is not completed, do one more step with time frozen */
+                    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus )
+                    {
+                        return M4NO_ERROR;
+                    }
+                }
+
+                /**
+                * Reset the video pre-processing error before calling the encoder */
+                pC->ewc.VppError = M4NO_ERROR;
+
+                M4OSA_TRACE2_0("F **** blend AUs");
+
+                /**
+                * Encode the frame (rendering, filtering and writing will be done
+                in encoder callbacks */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
+                    pC->ewc.dInputVidCts, M4ENCODER_kNormalFrame);
+
+                /**
+                * If encode returns a process frame error, it is likely to be a VPP error */
+                if( M4NO_ERROR != pC->ewc.VppError )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                        pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
+                        pC->ewc.VppError);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
+                    {
+
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+                        return pC->ewc.VppError;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    }
+
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+                }
+                else if( M4NO_ERROR != err ) /**< ...or an encoder error */
+                {
+                    if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+                        return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
+                    }
+
+                    /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                     file size is reached
+                    The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE is
+                     returned*/
+                    else if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Increment time by the encoding period */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+            }
+            break;
+
+            /* ____________ */
+            /*|            |*/
+            /*| ERROR CASE |*/
+            /*|____________|*/
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepVideo: invalid internal state (0x%x),\
+                returning M4VSS3GPP_ERR_INTERNAL_STATE",
+                pC->Vstate);
+            return M4VSS3GPP_ERR_INTERNAL_STATE;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckVideoMode()
+ * @brief    Check which video process mode we must use, depending on the output CTS.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    const M4OSA_Int32  t = (M4OSA_Int32)pC->ewc.dInputVidCts;
+    /**< Transition duration */
+    const M4OSA_Int32 TD = pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+    M4OSA_Int32 iTmp;
+
+    const M4VSS3GPP_EditVideoState previousVstate = pC->Vstate;
+
+    /**
+    * Check if Clip1 is on its begin cut, or in an effect zone */
+    M4VSS3GPP_intCheckVideoEffects(pC, 1);
+
+    /**
+    * Check if we are in the transition with next clip */
+    if( ( TD > 0) && (( t - pC->pC1->iVoffset) >= (pC->pC1->iEndTime - TD)) )
+    {
+        /**
+        * We are in a transition */
+        pC->Vstate = M4VSS3GPP_kEditVideoState_TRANSITION;
+        pC->bTransitionEffect = M4OSA_TRUE;
+
+        /**
+        * Open second clip for transition, if not yet opened */
+        if( M4OSA_NULL == pC->pC2 )
+        {
+            err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
+                &pC->pClipList[pC->uiCurrentClip + 1]);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_editOpenClip returns 0x%x!",
+                    err);
+                return err;
+            }
+
+            /**
+            * Add current video output CTS to the clip offset
+            * (audio output CTS is not yet at the transition, so audio
+            *  offset can't be updated yet). */
+            // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+
+            /**
+            * 2005-03-24: BugFix for audio-video synchro:
+            * Update transition duration due to the actual video transition beginning time.
+            * It will avoid desynchronization when doing the audio transition. */
+           // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            iTmp = ((M4OSA_Int32)pC->ewc.dInputVidCts)\
+             - (pC->pC1->iEndTime - TD + pC->pC1->iVoffset);
+            if (iTmp < (M4OSA_Int32)pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration)
+            /**< Test in case of a very short transition */
+            {
+                pC->pTransitionList[pC->
+                    uiCurrentClip].uiTransitionDuration -= iTmp;
+
+                /**
+                * Don't forget to also correct the total duration used for the progress bar
+                * (it was computed with the original transition duration). */
+                pC->ewc.iOutputDuration += iTmp;
+            }
+            /**< No "else" here because it's hard predict the effect of 0 duration transition...*/
+        }
+
+        /**
+        * Check effects for clip2 */
+        M4VSS3GPP_intCheckVideoEffects(pC, 2);
+    }
+    else
+    {
+        /**
+        * We are not in a transition */
+        pC->bTransitionEffect = M4OSA_FALSE;
+
+        /* If there is an effect we go to decode/encode mode */
+        if ((pC->nbActiveEffects > 0) ||(pC->nbActiveEffects1 > 0))
+        {
+            pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
+        }
+        /* We do a begin cut, except if already done (time is not progressing because we want
+        to catch all P-frames after the cut) */
+        else if( M4OSA_TRUE == pC->bClip1AtBeginCut )
+        {
+            if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+                || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) )
+                pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
+            else
+                pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
+        }
+        /* Else we are in default copy/paste mode */
+        else
+        {
+            if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+                || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) )
+            {
+                pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
+            }
+            else if( pC->bIsMMS == M4OSA_TRUE )
+            {
+                M4OSA_UInt32 currentBitrate;
+                M4OSA_ERR err = M4NO_ERROR;
+
+                /* Do we need to reencode the video to downgrade the bitrate or not ? */
+                /* Let's compute the cirrent bitrate of the current edited clip */
+                err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
+                    pC->pC1->pReaderContext,
+                    M4READER_kOptionID_Bitrate, &currentBitrate);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCheckVideoMode:\
+                        Error when getting next bitrate of edited clip: 0x%x",
+                        err);
+                    return err;
+                }
+
+                /* Remove audio bitrate */
+                currentBitrate -= 12200;
+
+                /* Test if we go into copy/paste mode or into decode/encode mode */
+                if( currentBitrate > pC->uiMMSVideoBitrate )
+                {
+                    pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
+                }
+                else
+                {
+                    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+                }
+            }
+            else
+            {
+                pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+            }
+        }
+    }
+
+    /**
+    * Check if we create an encoder */
+    if( ( ( M4VSS3GPP_kEditVideoState_READ_WRITE == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_AFTER_CUT
+        == previousVstate)) /**< read mode */
+        && (( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate)
+        || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == pC->Vstate)
+        || (M4VSS3GPP_kEditVideoState_TRANSITION
+        == pC->Vstate)) /**< encode mode */
+        && pC->bIsMMS == M4OSA_FALSE )
+    {
+        /**
+        * Create the encoder */
+        err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+    else if( pC->bIsMMS == M4OSA_TRUE && pC->ewc.pEncContext == M4OSA_NULL )
+    {
+        /**
+        * Create the encoder */
+        err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * When we go from filtering to read/write, we must act like a begin cut,
+    * because the last filtered image may be different than the original image. */
+    else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_TRANSITION
+        == previousVstate)) /**< encode mode */
+        && (M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) /**< read mode */
+        )
+    {
+        pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
+    }
+
+    /**
+    * Check if we destroy an encoder */
+    else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_TRANSITION
+        == previousVstate)) /**< encode mode */
+        && (( M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate)
+        || (M4VSS3GPP_kEditVideoState_AFTER_CUT
+        == pC->Vstate)) /**< read mode */
+        && pC->bIsMMS == M4OSA_FALSE )
+    {
+        /**
+        * Destroy the previously created encoder */
+        err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intDestroyVideoEncoder returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCheckVideoMode: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intStartAU()
+ * @brief    StartAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param    pContext: (IN) It is the VSS 3GPP context in our case
+ * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param    pAU:      (IN/OUT) Access Unit to be prepared.
+ * @return    M4NO_ERROR: there is no error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intStartAU( M4WRITER_Context pContext,
+                               M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 uiMaxAuSize;
+
+    /**
+    * Given context is actually the VSS3GPP context */
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    /**
+    * Get the output AU to write into */
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intStartAU: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intStartAU: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intProcessAU()
+ * @brief    ProcessAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param    pContext: (IN) It is the VSS 3GPP context in our case
+ * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param    pAU:      (IN/OUT) Access Unit to be written
+ * @return    M4NO_ERROR: there is no error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intProcessAU( M4WRITER_Context pContext,
+                                 M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Given context is actually the VSS3GPP context */
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    /**
+    * Fix the encoded AU time */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.dOutputVidCts = pAU->CTS;
+    /**
+    * Update time info for the Counter Time System to be equal to the bit-stream time */
+    M4VSS3GPP_intUpdateTimeInfo(pC, pAU);
+
+    /**
+    * Write the AU */
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intProcessAU: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intProcessAU: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVPP()
+ * @brief    We implement our own VideoPreProcessing function
+ * @note    It is called by the video encoder
+ * @param    pContext    (IN) VPP context, which actually is the VSS 3GPP context in our case
+ * @param    pPlaneIn    (IN)
+ * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the output
+ *                                  YUV420 image
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
+                           M4VIFI_ImagePlane *pPlaneOut )
+{
+    M4OSA_ERR err;
+    M4_MediaTime t;
+    M4VIFI_ImagePlane *pTmp = M4OSA_NULL;
+    M4VIFI_ImagePlane pTemp1[3],pTemp2[3];
+    M4OSA_UInt32  i =0;
+    /**
+    * VPP context is actually the VSS3GPP context */
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    pTemp1[0].pac_data = pTemp2[0].pac_data = M4OSA_NULL;
+    /**
+    * Reset VPP error remembered in context */
+    pC->ewc.VppError = M4NO_ERROR;
+
+    /**
+    * At the end of the editing, we may be called when no more clip is loaded.
+    * (because to close the encoder properly it must be stepped one or twice...) */
+    if( M4OSA_NULL == pC->pC1 )
+    {
+        /**
+        * We must fill the input of the encoder with a dummy image, because
+        * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[0].pac_data,
+            pPlaneOut[0].u_stride * pPlaneOut[0].u_height, 0);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[1].pac_data,
+            pPlaneOut[1].u_stride * pPlaneOut[1].u_height, 0);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[2].pac_data,
+            pPlaneOut[2].u_stride * pPlaneOut[2].u_height, 0);
+
+        M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR (abort)");
+        return M4NO_ERROR;
+    }
+
+    /**
+    **************** Transition case ****************/
+    if( M4OSA_TRUE == pC->bTransitionEffect )
+    {
+        if (M4OSA_NULL == pTemp1[0].pac_data)
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pTemp1, pC->ewc.uiVideoWidth,
+                                              pC->ewc.uiVideoHeight);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(1) returns 0x%x, \
+                               returning M4NO_ERROR", err);
+                pC->ewc.VppError = err;
+                return M4NO_ERROR; /**< Return no error to the encoder core
+                                   (else it may leak in some situations...) */
+            }
+        }
+        if (M4OSA_NULL == pTemp2[0].pac_data)
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pTemp2, pC->ewc.uiVideoWidth,
+                                              pC->ewc.uiVideoHeight);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(2) returns 0x%x, \
+                               returning M4NO_ERROR", err);
+                pC->ewc.VppError = err;
+                return M4NO_ERROR; /**< Return no error to the encoder core
+                                  (else it may leak in some situations...) */
+            }
+        }
+        /**
+        * We need two intermediate planes */
+        if( M4OSA_NULL == pC->yuv1[0].pac_data )
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth,
+                pC->ewc.uiVideoHeight);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+
+        if( M4OSA_NULL == pC->yuv2[0].pac_data )
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pC->yuv2, pC->ewc.uiVideoWidth,
+                pC->ewc.uiVideoHeight);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+
+        /**
+        * Allocate new temporary plane if needed */
+        if( M4OSA_NULL == pC->yuv3[0].pac_data )
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pC->yuv3, pC->ewc.uiVideoWidth,
+                pC->ewc.uiVideoHeight);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+
+        /**
+        * Compute the time in the clip1 base: t = to - Offset */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        t = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
+
+        /**
+        * Render Clip1 */
+        if( pC->pC1->isRenderDup == M4OSA_FALSE )
+        {
+            if(pC->nbActiveEffects > 0)
+            {
+                err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt,
+                                                                      &t, pTemp1,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->bIssecondClip = M4OSA_FALSE;
+                err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp1 ,pC->yuv1 );
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->pC1->lastDecodedPlane = pTemp1;
+            }
+            else
+            {
+                err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt,
+                                                                      &t, pC->yuv1,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                      (else it may leak in some situations...) */
+                }
+                pC->pC1->lastDecodedPlane = pC->yuv1;
+            }
+            pC->pC1->iVideoRenderCts = (M4OSA_Int32)t;
+        }
+        else
+        {
+            /* Copy last decoded plane to output plane */
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data,
+                (pTmp[0].u_height * pTmp[0].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data,
+                (pTmp[1].u_height * pTmp[1].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data,
+                (pTmp[2].u_height * pTmp[2].u_width));
+            pC->pC1->lastDecodedPlane = pTmp;
+        }
+
+        /**
+        * Compute the time in the clip2 base: t = to - Offset */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        t = pC->ewc.dInputVidCts - pC->pC2->iVoffset;
+        /**
+        * Render Clip2 */
+        if( pC->pC2->isRenderDup == M4OSA_FALSE )
+        {
+            if(pC->nbActiveEffects1 > 0)
+            {
+                err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt,
+                                                                      &t, pTemp2,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \
+                                   returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+
+                pC->bIssecondClip = M4OSA_TRUE;
+                err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp2 ,pC->yuv2);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->pC2->lastDecodedPlane = pTemp2;
+            }
+            else
+            {
+                err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt,
+                                                                      &t, pC->yuv2,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->pC2->lastDecodedPlane = pC->yuv2;
+            }
+            pC->pC2->iVideoRenderCts = (M4OSA_Int32)t;
+        }
+        else
+        {
+            /* Copy last decoded plane to output plane */
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data,
+                (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[0].pac_data,
+                (pTmp[0].u_height * pTmp[0].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data,
+                (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[1].pac_data,
+                (pTmp[1].u_height * pTmp[1].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data,
+                (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[2].pac_data,
+                (pTmp[2].u_height * pTmp[2].u_width));
+            pC->pC2->lastDecodedPlane = pTmp;
+        }
+
+
+        pTmp = pPlaneOut;
+        err = M4VSS3GPP_intVideoTransition(pC, pTmp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intVPP: M4VSS3GPP_intVideoTransition returns 0x%x,\
+                returning M4NO_ERROR",
+                err);
+            pC->ewc.VppError = err;
+            return  M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+        }
+        for (i=0; i < 3; i++)
+        {
+            if (pTemp2[i].pac_data != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pTemp2[i].pac_data);
+                pTemp2[i].pac_data = M4OSA_NULL;
+            }
+
+
+            if (pTemp1[i].pac_data != M4OSA_NULL)
+            {
+                    M4OSA_free((M4OSA_MemAddr32)pTemp1[i].pac_data);
+                    pTemp1[i].pac_data = M4OSA_NULL;
+                }
+            }
+    }
+    /**
+    **************** No Transition case ****************/
+    else
+    {
+        /**
+        * Check if there is a filter */
+        if( pC->nbActiveEffects > 0 )
+        {
+            /**
+            * If we do modify the image, we need an intermediate image plane */
+            if( M4OSA_NULL == pC->yuv1[0].pac_data )
+            {
+                err =
+                    M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth,
+                    pC->ewc.uiVideoHeight);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 returns 0x%x,\
+                        returning M4NO_ERROR",
+                        err);
+                    pC->ewc.VppError = err;
+                    return
+                        M4NO_ERROR; /**< Return no error to the encoder core
+                                    (else it may leak in some situations...) */
+                }
+            }
+            /**
+            * The image is rendered in the intermediate image plane */
+            pTmp = pC->yuv1;
+        }
+        else
+        {
+            /**
+            * No filter, the image is directly rendered in pPlaneOut */
+            pTmp = pPlaneOut;
+        }
+
+        /**
+        * Compute the time in the clip base: t = to - Offset */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        t = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
+
+        if( pC->pC1->isRenderDup == M4OSA_FALSE )
+        {
+            err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
+                pC->pC1->pViDecCtxt, &t, pTmp, M4OSA_TRUE);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+            pC->pC1->lastDecodedPlane = pTmp;
+            pC->pC1->iVideoRenderCts = (M4OSA_Int32)t;
+        }
+        else
+        {
+            /* Copy last decoded plane to output plane */
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data,
+                (pTmp[0].u_height * pTmp[0].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data,
+                (pTmp[1].u_height * pTmp[1].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data,
+                (pTmp[2].u_height * pTmp[2].u_width));
+            pC->pC1->lastDecodedPlane = pTmp;
+        }
+
+        M4OSA_TRACE3_1("M4VSS3GPP_intVPP: Rendered at CTS %.3f", t);
+
+        /**
+        * Apply the clip1 effect */
+        //        if (pC->iClip1ActiveEffect >= 0)
+        if( pC->nbActiveEffects > 0 )
+        {
+            err = M4VSS3GPP_intApplyVideoEffect(pC,/*1,*/ pC->yuv1, pPlaneOut);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intApplyVideoEffect()
+ * @brief    Apply video effect from pPlaneIn to pPlaneOut
+ * @param   pC                (IN/OUT) Internal edit context
+ * @param   uiClip1orClip2    (IN/OUT) 1 for first clip, 2 for second clip
+ * @param    pInputPlanes    (IN) Input raw YUV420 image
+ * @param    pOutputPlanes    (IN/OUT) Output raw YUV420 image
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC,
+                               M4VIFI_ImagePlane *pPlaneIn,
+                               M4VIFI_ImagePlane *pPlaneOut )
+{
+    M4OSA_ERR err;
+
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4VFL_CurtainParam curtainParams;
+    M4VSS3GPP_ExternalProgress extProgress;
+
+    M4OSA_Double VideoEffectTime;
+    M4OSA_Double PercentageDone;
+    M4OSA_Int32 tmp;
+
+    M4VIFI_ImagePlane *pPlaneTempIn;
+    M4VIFI_ImagePlane *pPlaneTempOut;
+    M4OSA_UInt8 i;
+    M4OSA_UInt8 NumActiveEffects =0;
+
+
+    pClip = pC->pC1;
+    if (pC->bIssecondClip == M4OSA_TRUE)
+    {
+        NumActiveEffects = pC->nbActiveEffects1;
+    }
+    else
+    {
+        NumActiveEffects = pC->nbActiveEffects;
+    }
+
+    /**
+    * Allocate temporary plane if needed RC */
+    if (M4OSA_NULL == pC->yuv4[0].pac_data && NumActiveEffects  > 1)
+    {
+        err = M4VSS3GPP_intAllocateYUV420(pC->yuv4, pC->ewc.uiVideoWidth,
+            pC->ewc.uiVideoHeight);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intApplyVideoEffect: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
+                returning M4NO_ERROR",
+                err);
+            pC->ewc.VppError = err;
+            return
+                M4NO_ERROR; /**< Return no error to the encoder core
+                            (else it may leak in some situations...) */
+        }
+    }
+
+    if (NumActiveEffects  % 2 == 0)
+    {
+        pPlaneTempIn = pPlaneIn;
+        pPlaneTempOut = pC->yuv4;
+    }
+    else
+    {
+        pPlaneTempIn = pPlaneIn;
+        pPlaneTempOut = pPlaneOut;
+    }
+
+    for (i=0; i<NumActiveEffects; i++)
+    {
+        if (pC->bIssecondClip == M4OSA_TRUE)
+        {
+
+
+            pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]);
+            /* Compute how far from the beginning of the effect we are, in clip-base time. */
+            // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) +
+                              pC->pTransitionList[pC->uiCurrentClip].
+                              uiTransitionDuration- pFx->uiStartTime;
+        }
+        else
+        {
+            pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]);
+            /* Compute how far from the beginning of the effect we are, in clip-base time. */
+            // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime;
+        }
+
+
+
+        /* To calculate %, substract timeIncrement because effect should finish on the last frame*/
+        /* which is presented from CTS = eof-timeIncrement till CTS = eof */
+        PercentageDone = VideoEffectTime
+            / ((M4OSA_Float)pFx->uiDuration/*- pC->dOutputFrameDuration*/);
+
+        if( PercentageDone < 0.0 )
+            PercentageDone = 0.0;
+
+        if( PercentageDone > 1.0 )
+            PercentageDone = 1.0;
+
+        switch( pFx->VideoEffectType )
+        {
+            case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
+                /**
+                * Compute where we are in the effect (scale is 0->1024). */
+                tmp = (M4OSA_Int32)(PercentageDone * 1024);
+
+                /**
+                * Apply the darkening effect */
+                err =
+                    M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect:\
+                        M4VFL_modifyLumaWithScale returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+                }
+                break;
+
+            case M4VSS3GPP_kVideoEffectType_CurtainOpening:
+                /**
+                * Compute where we are in the effect (scale is 0->height).
+                * It is done with floats because tmp x height can be very large
+                (with long clips).*/
+                curtainParams.nb_black_lines =
+                    (M4OSA_UInt16)(( 1.0 - PercentageDone)
+                    * pPlaneTempIn[0].u_height);
+                /**
+                * The curtain is hanged on the ceiling */
+                curtainParams.top_is_black = 1;
+
+                /**
+                * Apply the curtain effect */
+                err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, &curtainParams,
+                    M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR;
+                }
+                break;
+
+            case M4VSS3GPP_kVideoEffectType_FadeToBlack:
+                /**
+                * Compute where we are in the effect (scale is 0->1024) */
+                tmp = (M4OSA_Int32)(( 1.0 - PercentageDone) * 1024);
+
+                /**
+                * Apply the darkening effect */
+                err =
+                    M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect:\
+                        M4VFL_modifyLumaWithScale returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+                }
+                break;
+
+            case M4VSS3GPP_kVideoEffectType_CurtainClosing:
+                /**
+                * Compute where we are in the effect (scale is 0->height) */
+                curtainParams.nb_black_lines =
+                    (M4OSA_UInt16)(PercentageDone * pPlaneTempIn[0].u_height);
+
+                /**
+                * The curtain is hanged on the ceiling */
+                curtainParams.top_is_black = 1;
+
+                /**
+                * Apply the curtain effect */
+                err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, &curtainParams,
+                    M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR;
+                }
+                break;
+
+            default:
+                if( pFx->VideoEffectType
+                    >= M4VSS3GPP_kVideoEffectType_External )
+                {
+                    M4OSA_UInt32 Cts = 0;
+                    M4OSA_Int32 nextEffectTime;
+
+                    /**
+                    * Compute where we are in the effect (scale is 0->1000) */
+                    tmp = (M4OSA_Int32)(PercentageDone * 1000);
+
+                    /**
+                    * Set the progress info provided to the external function */
+                    extProgress.uiProgress = (M4OSA_UInt32)tmp;
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
+                    extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset;
+                    extProgress.bIsLast = M4OSA_FALSE;
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \
+                        + pC->dOutputFrameDuration);
+                    if(nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration))
+                    {
+                        extProgress.bIsLast = M4OSA_TRUE;
+                    }
+
+                    err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt,
+                        pPlaneTempIn, pPlaneTempOut, &extProgress,
+                        pFx->VideoEffectType
+                        - M4VSS3GPP_kVideoEffectType_External);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intApplyVideoEffect: \
+                            External video effect function returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                    break;
+                }
+                else
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect: unknown effect type (0x%x),\
+                        returning M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE",
+                        pFx->VideoEffectType);
+                    return M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE;
+                }
+        }
+        /**
+        * RC Updates pTempPlaneIn and pTempPlaneOut depending on current effect */
+        if (((i % 2 == 0) && (NumActiveEffects  % 2 == 0))
+            || ((i % 2 != 0) && (NumActiveEffects % 2 != 0)))
+        {
+            pPlaneTempIn = pC->yuv4;
+            pPlaneTempOut = pPlaneOut;
+        }
+        else
+        {
+            pPlaneTempIn = pPlaneOut;
+            pPlaneTempOut = pC->yuv4;
+        }
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoEffect: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVideoTransition()
+ * @brief    Apply video transition effect pC1+pC2->pPlaneOut
+ * @param   pC                (IN/OUT) Internal edit context
+ * @param    pOutputPlanes    (IN/OUT) Output raw YUV420 image
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
+                             M4VIFI_ImagePlane *pPlaneOut )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iProgress;
+    M4VSS3GPP_ExternalProgress extProgress;
+    M4VIFI_ImagePlane *pPlane;
+    M4OSA_Int32 i;
+    const M4OSA_Int32 iDur = (M4OSA_Int32)pC->
+        pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+    /**
+    * Compute how far from the end cut we are, in clip-base time.
+    * It is done with integers because the offset and begin cut have been rounded already. */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    iProgress = (M4OSA_Int32)((M4OSA_Double)pC->pC1->iEndTime) - pC->ewc.dInputVidCts +
+        ((M4OSA_Double)pC->pC1->iVoffset);
+    /**
+    * We must remove the duration of one frame, else we would almost never reach the end
+    * (It's kind of a "pile and intervals" issue). */
+    iProgress -= (M4OSA_Int32)pC->dOutputFrameDuration;
+
+    if( iProgress < 0 ) /**< Sanity checks */
+    {
+        iProgress = 0;
+    }
+
+    /**
+    * Compute where we are in the transition, on a base 1000 */
+    iProgress = ( ( iDur - iProgress) * 1000) / iDur;
+
+    /**
+    * Sanity checks */
+    if( iProgress < 0 )
+    {
+        iProgress = 0;
+    }
+    else if( iProgress > 1000 )
+    {
+        iProgress = 1000;
+    }
+
+    switch( pC->pTransitionList[pC->uiCurrentClip].TransitionBehaviour )
+    {
+        case M4VSS3GPP_TransitionBehaviour_SpeedUp:
+            iProgress = ( iProgress * iProgress) / 1000;
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_Linear:
+            /*do nothing*/
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_SpeedDown:
+            iProgress = (M4OSA_Int32)(sqrt(iProgress * 1000));
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_SlowMiddle:
+            if( iProgress < 500 )
+            {
+                iProgress = (M4OSA_Int32)(sqrt(iProgress * 500));
+            }
+            else
+            {
+                iProgress =
+                    (M4OSA_Int32)(( ( ( iProgress - 500) * (iProgress - 500))
+                    / 500) + 500);
+            }
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_FastMiddle:
+            if( iProgress < 500 )
+            {
+                iProgress = (M4OSA_Int32)(( iProgress * iProgress) / 500);
+            }
+            else
+            {
+                iProgress = (M4OSA_Int32)(sqrt(( iProgress - 500) * 500) + 500);
+            }
+            break;
+
+        default:
+            /*do nothing*/
+            break;
+    }
+
+    switch( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType )
+    {
+        case M4VSS3GPP_kVideoTransitionType_CrossFade:
+            /**
+            * Apply the transition effect */
+            err = M4VIFI_ImageBlendingonYUV420(M4OSA_NULL,
+                (M4ViComImagePlane *)pC->yuv1,
+                (M4ViComImagePlane *)pC->yuv2,
+                (M4ViComImagePlane *)pPlaneOut, iProgress);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVideoTransition:\
+                     M4VIFI_ImageBlendingonYUV420 returns error 0x%x,\
+                    returning M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR",
+                    err);
+                return M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR;
+            }
+            break;
+
+        case M4VSS3GPP_kVideoTransitionType_None:
+            /**
+            * This is a stupid-non optimized version of the None transition...
+            * We copy the YUV frame */
+            if( iProgress < 500 ) /**< first half of transition */
+            {
+                pPlane = pC->yuv1;
+            }
+            else /**< second half of transition */
+            {
+                pPlane = pC->yuv2;
+            }
+            /**
+            * Copy the input YUV frames */
+            i = 3;
+
+            while( i-- > 0 )
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[i].pac_data,
+                 (M4OSA_MemAddr8)pPlane[i].pac_data,
+                    pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
+            }
+            break;
+
+        default:
+            if( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType
+                >= M4VSS3GPP_kVideoTransitionType_External )
+            {
+                /**
+                * Set the progress info provided to the external function */
+                extProgress.uiProgress = (M4OSA_UInt32)iProgress;
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
+                extProgress.uiClipTime = extProgress.uiOutputTime - pC->pC1->iVoffset;
+
+                err = pC->pTransitionList[pC->
+                    uiCurrentClip].ExtVideoTransitionFct(
+                    pC->pTransitionList[pC->
+                    uiCurrentClip].pExtVideoTransitionFctCtxt,
+                    pC->yuv1, pC->yuv2, pPlaneOut, &extProgress,
+                    pC->pTransitionList[pC->
+                    uiCurrentClip].VideoTransitionType
+                    - M4VSS3GPP_kVideoTransitionType_External);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intVideoTransition:\
+                        External video transition function returns 0x%x!",
+                        err);
+                    return err;
+                }
+                break;
+            }
+            else
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVideoTransition: unknown transition type (0x%x),\
+                    returning M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE",
+                    pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType);
+                return M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE;
+            }
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intVideoTransition: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intUpdateTimeInfo()
+ * @brief    Update bit stream time info by Counter Time System to be compliant with
+ *          players using bit stream time info
+ * @note    H263 uses an absolute time counter unlike MPEG4 which uses Group Of Vops
+ *          (GOV, see the standard)
+ * @param   pC                    (IN/OUT) returns time updated video AU,
+ *                                the offset between system and video time (MPEG4 only)
+ *                                and the state of the current clip (MPEG4 only)
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void
+M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
+                            M4SYS_AccessUnit *pAU )
+{
+    M4OSA_UInt8 uiTmp;
+    M4OSA_UInt32 uiCts = 0;
+    M4OSA_MemAddr8 pTmp;
+    M4OSA_UInt32 uiAdd;
+    M4OSA_UInt32 uiCurrGov;
+    M4OSA_Int8 iDiff;
+
+    M4VSS3GPP_ClipContext *pClipCtxt = pC->pC1;
+    M4OSA_Int32 *pOffset = &(pC->ewc.iMpeg4GovOffset);
+
+    /**
+    * Set H263 time counter from system time */
+    if( M4SYS_kH263 == pAU->stream->streamType )
+    {
+        uiTmp = (M4OSA_UInt8)((M4OSA_UInt32)( ( pAU->CTS * 30) / 1001 + 0.5)
+            % M4VSS3GPP_EDIT_H263_MODULO_TIME);
+        M4VSS3GPP_intSetH263TimeCounter((M4OSA_MemAddr8)(pAU->dataAddress),
+            uiTmp);
+    }
+    /*
+    * Set MPEG4 GOV time counter regarding video and system time */
+    else if( M4SYS_kMPEG_4 == pAU->stream->streamType )
+    {
+        /*
+        * If GOV.
+        * beware of little/big endian! */
+        /* correction: read 8 bits block instead of one 32 bits block */
+        M4OSA_UInt8 *temp8 = (M4OSA_UInt8 *)(pAU->dataAddress);
+        M4OSA_UInt32 temp32 = 0;
+
+        temp32 = ( 0x000000ff & (M4OSA_UInt32)(*temp8))
+            + (0x0000ff00 & ((M4OSA_UInt32)(*(temp8 + 1))) << 8)
+            + (0x00ff0000 & ((M4OSA_UInt32)(*(temp8 + 2))) << 16)
+            + (0xff000000 & ((M4OSA_UInt32)(*(temp8 + 3))) << 24);
+
+        M4OSA_TRACE3_2("RC: Temp32: 0x%x, dataAddress: 0x%x\n", temp32,
+            *(pAU->dataAddress));
+
+        if( M4VSS3GPP_EDIT_GOV_HEADER == temp32 )
+        {
+            pTmp =
+                (M4OSA_MemAddr8)(pAU->dataAddress
+                + 1); /**< Jump to the time code (just after the 32 bits header) */
+            uiAdd = (M4OSA_UInt32)(pAU->CTS)+( *pOffset);
+
+            switch( pClipCtxt->bMpeg4GovState )
+            {
+                case M4OSA_FALSE: /*< INIT */
+                    {
+                        /* video time = ceil (system time + offset) */
+                        uiCts = ( uiAdd + 999) / 1000;
+
+                        /* offset update */
+                        ( *pOffset) += (( uiCts * 1000) - uiAdd);
+
+                        /* Save values */
+                        pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
+
+                        /* State to 'first' */
+                        pClipCtxt->bMpeg4GovState = M4OSA_TRUE;
+                    }
+                    break;
+
+                case M4OSA_TRUE: /*< UPDATE */
+                    {
+                        /* Get current Gov value */
+                        M4VSS3GPP_intGetMPEG4Gov(pTmp, &uiCurrGov);
+
+                        /* video time = floor or ceil (system time + offset) */
+                        uiCts = (uiAdd / 1000);
+                        iDiff = (M4OSA_Int8)(uiCurrGov
+                            - pClipCtxt->uiMpeg4PrevGovValueGet - uiCts
+                            + pClipCtxt->uiMpeg4PrevGovValueSet);
+
+                        /* ceiling */
+                        if( iDiff > 0 )
+                        {
+                            uiCts += (M4OSA_UInt32)(iDiff);
+
+                            /* offset update */
+                            ( *pOffset) += (( uiCts * 1000) - uiAdd);
+                        }
+
+                        /* Save values */
+                        pClipCtxt->uiMpeg4PrevGovValueGet = uiCurrGov;
+                        pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
+                    }
+                    break;
+            }
+
+            M4VSS3GPP_intSetMPEG4Gov(pTmp, uiCts);
+        }
+    }
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intCheckVideoEffects()
+ * @brief    Check which video effect must be applied at the current time
+ ******************************************************************************
+ */
+static M4OSA_Void
+M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
+                               M4OSA_UInt8 uiClipNumber )
+{
+    M4OSA_UInt8 uiClipIndex;
+    M4OSA_UInt8 uiFxIndex, i;
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4OSA_Int32 Off, BC, EC;
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts;
+
+    uiClipIndex = pC->uiCurrentClip;
+    pClip = pC->pC1;
+    /**
+    * Shortcuts for code readability */
+    Off = pClip->iVoffset;
+    BC = pClip->iActualVideoBeginCut;
+    EC = pClip->iEndTime;
+
+    i = 0;
+
+    for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
+    {
+        /** Shortcut, reverse order because of priority between effects(EndEffect always clean )*/
+        pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
+
+        if( M4VSS3GPP_kVideoEffectType_None != pFx->VideoEffectType )
+        {
+            /**
+            * Check if there is actually a video effect */
+
+             if(uiClipNumber ==1)
+             {
+                if ((t >= (M4OSA_Int32)(pFx->uiStartTime)) &&                  /**< Are we after the start time of the effect? */
+                    (t <  (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) /**< Are we into the effect duration? */
+                    {
+                /**
+                 * Set the active effect(s) */
+                    pC->pActiveEffectsList[i] = pC->nbEffects-1-uiFxIndex;
+
+                /**
+                 * Update counter of active effects */
+                    i++;
+
+                /**
+                 * The third effect has the highest priority, then the second one, then the first one.
+                 * Hence, as soon as we found an active effect, we can get out of this loop */
+
+                }
+            }
+            else
+            {
+                if ((t + pC->pTransitionList[uiClipIndex].uiTransitionDuration >=
+                   (M4OSA_Int32)(pFx->uiStartTime)) && (t + pC->pTransitionList[uiClipIndex].uiTransitionDuration
+                    <  (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) /**< Are we into the effect duration? */
+                 {
+                /**
+                 * Set the active effect(s) */
+                    pC->pActiveEffectsList1[i] = pC->nbEffects-1-uiFxIndex;
+
+                /**
+                 * Update counter of active effects */
+                    i++;
+
+                /**
+                 * The third effect has the highest priority, then the second one, then the first one.
+                 * Hence, as soon as we found an active effect, we can get out of this loop */
+                }
+
+
+            }
+
+        }
+    }
+
+    if(1==uiClipNumber)
+    {
+    /**
+     * Save number of active effects */
+        pC->nbActiveEffects = i;
+    }
+    else
+    {
+        pC->nbActiveEffects1 = i;
+    }
+
+    /**
+    * Change the absolut time to clip related time */
+    t -= Off;
+
+    /**
+    * Check if we are on the begin cut (for clip1 only) */
+    if( ( 0 != BC) && (t == BC) && (1 == uiClipNumber) )
+    {
+        pC->bClip1AtBeginCut = M4OSA_TRUE;
+    }
+    else
+    {
+        pC->bClip1AtBeginCut = M4OSA_FALSE;
+    }
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder()
+ * @brief    Creates the video encoder
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4ENCODER_AdvancedParams EncParams;
+
+    /**
+    * Simulate a writer interface with our specific function */
+    pC->ewc.OurWriterDataInterface.pProcessAU =
+        M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
+                                but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pStartAU =
+        M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
+                              but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pWriterContext =
+        (M4WRITER_Context)
+        pC; /**< We give the internal context as writer context */
+
+    /**
+    * Get the encoder interface, if not already done */
+    if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
+    {
+        err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
+            pC->ewc.VideoStreamType);
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateVideoEncoder: setCurrentEncoder returns 0x%x",
+            err);
+        M4ERR_CHECK_RETURN(err);
+    }
+
+    /**
+    * Set encoder shell parameters according to VSS settings */
+
+    /* Common parameters */
+    EncParams.InputFormat = M4ENCODER_kIYUV420;
+    EncParams.FrameWidth = pC->ewc.uiVideoWidth;
+    EncParams.FrameHeight = pC->ewc.uiVideoHeight;
+    EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
+
+    if( pC->bIsMMS == M4OSA_FALSE )
+    {
+        /* No strict regulation in video editor */
+        /* Because of the effects and transitions we should allow more flexibility */
+        /* Also it prevents to drop important frames (with a bad result on sheduling and
+        block effetcs) */
+        EncParams.bInternalRegulation = M4OSA_FALSE;
+        // Variable framerate is not supported by StageFright encoders
+        EncParams.FrameRate = M4ENCODER_k30_FPS;
+    }
+    else
+    {
+        /* In case of MMS mode, we need to enable bitrate regulation to be sure */
+        /* to reach the targeted output file size */
+        EncParams.bInternalRegulation = M4OSA_TRUE;
+        EncParams.FrameRate = pC->MMSvideoFramerate;
+    }
+
+    /**
+    * Other encoder settings (defaults) */
+    EncParams.uiHorizontalSearchRange = 0;     /* use default */
+    EncParams.uiVerticalSearchRange = 0;       /* use default */
+    EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+    EncParams.uiIVopPeriod = 0;                /* use default */
+    EncParams.uiMotionEstimationTools = 0;     /* M4V_MOTION_EST_TOOLS_ALL */
+    EncParams.bAcPrediction = M4OSA_TRUE;      /* use AC prediction */
+    EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+    EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+
+    switch ( pC->ewc.VideoStreamType )
+    {
+        case M4SYS_kH263:
+
+            EncParams.Format = M4ENCODER_kH263;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            break;
+
+        case M4SYS_kMPEG_4:
+
+            EncParams.Format = M4ENCODER_kMPEG4;
+
+            EncParams.uiStartingQuantizerValue = 8;
+            EncParams.uiRateFactor = (M4OSA_UInt8)(( pC->dOutputFrameDuration
+                * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
+
+            if( EncParams.uiRateFactor == 0 )
+                EncParams.uiRateFactor = 1; /* default */
+
+            if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
+            {
+                EncParams.bErrorResilience = M4OSA_FALSE;
+                EncParams.bDataPartitioning = M4OSA_FALSE;
+            }
+            else
+            {
+                EncParams.bErrorResilience = M4OSA_TRUE;
+                EncParams.bDataPartitioning = M4OSA_TRUE;
+            }
+            break;
+
+        case M4SYS_kH264:
+            M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: M4SYS_H264");
+
+            EncParams.Format = M4ENCODER_kH264;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            //EncParams.FrameRate = M4VIDEOEDITING_k5_FPS;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreateVideoEncoder: Unknown videoStreamType 0x%x",
+                pC->ewc.VideoStreamType);
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    /* In case of EMP we overwrite certain parameters */
+    if( M4OSA_TRUE == pC->ewc.bActivateEmp )
+    {
+        EncParams.uiHorizontalSearchRange = 15;    /* set value */
+        EncParams.uiVerticalSearchRange = 15;      /* set value */
+        EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+        EncParams.uiIVopPeriod = 15; /* one I frame every 15 frames */
+        EncParams.uiMotionEstimationTools = 1; /* M4V_MOTION_EST_TOOLS_NO_4MV */
+        EncParams.bAcPrediction = M4OSA_FALSE;     /* no AC prediction */
+        EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+        EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+    }
+
+    if( pC->bIsMMS == M4OSA_FALSE )
+    {
+        /* Compute max bitrate depending on input files bitrates and transitions */
+        if( pC->Vstate == M4VSS3GPP_kEditVideoState_TRANSITION )
+        {
+            /* Max of the two blended files */
+            if( pC->pC1->pSettings->ClipProperties.uiVideoBitrate
+                > pC->pC2->pSettings->ClipProperties.uiVideoBitrate )
+                EncParams.Bitrate =
+                pC->pC1->pSettings->ClipProperties.uiVideoBitrate;
+            else
+                EncParams.Bitrate =
+                pC->pC2->pSettings->ClipProperties.uiVideoBitrate;
+        }
+        else
+        {
+            /* Same as input file */
+            EncParams.Bitrate =
+                pC->pC1->pSettings->ClipProperties.uiVideoBitrate;
+        }
+    }
+    else
+    {
+        EncParams.Bitrate = pC->uiMMSVideoBitrate; /* RC */
+        EncParams.uiTimeScale = 0; /* We let the encoder choose the timescale */
+    }
+
+    M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctInit");
+    /**
+    * Init the video encoder (advanced settings version of the encoder Open function) */
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
+        &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
+        pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
+        pC->ShellAPI.pCurrentVideoEncoderUserData);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+    M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctOpen");
+
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
+        &pC->ewc.WriterVideoAU, &EncParams);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctStart");
+
+    if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
+    {
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCreateVideoEncoder: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder()
+ * @brief    Destroy the video encoder
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( M4OSA_NULL != pC->ewc.pEncContext )
+    {
+        if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
+        {
+            if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+            {
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
+                    pC->ewc.pEncContext);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intDestroyVideoEncoder:\
+                        pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+                        err);
+                    /* Well... how the heck do you handle a failed cleanup? */
+                }
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+        }
+
+        /* Has the encoder actually been opened? Don't close it if that's not the case. */
+        if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
+                pC->ewc.pEncContext);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intDestroyVideoEncoder:\
+                    pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+                    err);
+                /* Well... how the heck do you handle a failed cleanup? */
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+        }
+
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intDestroyVideoEncoder:\
+                pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+        }
+
+        pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+        /**
+        * Reset variable */
+        pC->ewc.pEncContext = M4OSA_NULL;
+    }
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intDestroyVideoEncoder: returning 0x%x", err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intSetH263TimeCounter()
+ * @brief    Modify the time counter of the given H263 video AU
+ * @note
+ * @param    pAuDataBuffer    (IN/OUT) H263 Video AU to modify
+ * @param    uiCts            (IN)     New time counter value
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
+                                                  M4OSA_UInt8 uiCts )
+{
+    /*
+    *  The H263 time counter is 8 bits located on the "x" below:
+    *
+    *   |--------|--------|--------|--------|
+    *    ???????? ???????? ??????xx xxxxxx??
+    */
+
+    /**
+    * Write the 2 bits on the third byte */
+    pAuDataBuffer[2] = ( pAuDataBuffer[2] & 0xFC) | (( uiCts >> 6) & 0x3);
+
+    /**
+    * Write the 6 bits on the fourth byte */
+    pAuDataBuffer[3] = ( ( uiCts << 2) & 0xFC) | (pAuDataBuffer[3] & 0x3);
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intSetMPEG4Gov()
+ * @brief    Modify the time info from Group Of VOP video AU
+ * @note
+ * @param    pAuDataBuffer    (IN)    MPEG4 Video AU to modify
+ * @param    uiCtsSec            (IN)     New GOV time info in second unit
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 uiCtsSec )
+{
+    /*
+    *  The MPEG-4 time code length is 18 bits:
+    *
+    *     hh     mm    marker    ss
+    *    xxxxx|xxx xxx     1    xxxx xx ??????
+    *   |----- ---|---     -    ----|-- ------|
+    */
+    M4OSA_UInt8 uiHh;
+    M4OSA_UInt8 uiMm;
+    M4OSA_UInt8 uiSs;
+    M4OSA_UInt8 uiTmp;
+
+    /**
+    * Write the 2 last bits ss */
+    uiSs = (M4OSA_UInt8)(uiCtsSec % 60); /**< modulo part */
+    pAuDataBuffer[2] = (( ( uiSs & 0x03) << 6) | (pAuDataBuffer[2] & 0x3F));
+
+    if( uiCtsSec < 60 )
+    {
+        /**
+        * Write the 3 last bits of mm, the marker bit (0x10 */
+        pAuDataBuffer[1] = (( 0x10) | (uiSs >> 2));
+
+        /**
+        * Write the 5 bits of hh and 3 of mm (out of 6) */
+        pAuDataBuffer[0] = 0;
+    }
+    else
+    {
+        /**
+        * Write the 3 last bits of mm, the marker bit (0x10 */
+        uiTmp = (M4OSA_UInt8)(uiCtsSec / 60); /**< integer part */
+        uiMm = (M4OSA_UInt8)(uiTmp % 60);
+        pAuDataBuffer[1] = (( uiMm << 5) | (0x10) | (uiSs >> 2));
+
+        if( uiTmp < 60 )
+        {
+            /**
+            * Write the 5 bits of hh and 3 of mm (out of 6) */
+            pAuDataBuffer[0] = ((uiMm >> 3));
+        }
+        else
+        {
+            /**
+            * Write the 5 bits of hh and 3 of mm (out of 6) */
+            uiHh = (M4OSA_UInt8)(uiTmp / 60);
+            pAuDataBuffer[0] = (( uiHh << 3) | (uiMm >> 3));
+        }
+    }
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intGetMPEG4Gov()
+ * @brief    Get the time info from Group Of VOP video AU
+ * @note
+ * @param    pAuDataBuffer    (IN)    MPEG4 Video AU to modify
+ * @param    pCtsSec            (OUT)    Current GOV time info in second unit
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 *pCtsSec )
+{
+    /*
+    *  The MPEG-4 time code length is 18 bits:
+    *
+    *     hh     mm    marker    ss
+    *    xxxxx|xxx xxx     1    xxxx xx ??????
+    *   |----- ---|---     -    ----|-- ------|
+    */
+    M4OSA_UInt8 uiHh;
+    M4OSA_UInt8 uiMm;
+    M4OSA_UInt8 uiSs;
+    M4OSA_UInt8 uiTmp;
+    M4OSA_UInt32 uiCtsSec;
+
+    /**
+    * Read ss */
+    uiSs = (( pAuDataBuffer[2] & 0xC0) >> 6);
+    uiTmp = (( pAuDataBuffer[1] & 0x0F) << 2);
+    uiCtsSec = uiSs + uiTmp;
+
+    /**
+    * Read mm */
+    uiMm = (( pAuDataBuffer[1] & 0xE0) >> 5);
+    uiTmp = (( pAuDataBuffer[0] & 0x07) << 3);
+    uiMm = uiMm + uiTmp;
+    uiCtsSec = ( uiMm * 60) + uiCtsSec;
+
+    /**
+    * Read hh */
+    uiHh = (( pAuDataBuffer[0] & 0xF8) >> 3);
+
+    if( uiHh )
+    {
+        uiCtsSec = ( uiHh * 3600) + uiCtsSec;
+    }
+
+    /*
+    * in sec */
+    *pCtsSec = uiCtsSec;
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAllocateYUV420()
+ * @brief    Allocate the three YUV 4:2:0 planes
+ * @note
+ * @param    pPlanes    (IN/OUT) valid pointer to 3 M4VIFI_ImagePlane structures
+ * @param    uiWidth    (IN)     Image width
+ * @param    uiHeight(IN)     Image height
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
+                                             M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight )
+{
+
+    pPlanes[0].u_width = uiWidth;
+    pPlanes[0].u_height = uiHeight;
+    pPlanes[0].u_stride = uiWidth;
+    pPlanes[0].u_topleft = 0;
+    pPlanes[0].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[0].u_stride
+        * pPlanes[0].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[0].pac_data");
+
+    if( M4OSA_NULL == pPlanes[0].pac_data )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[0].pac_data,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    pPlanes[1].u_width = pPlanes[0].u_width >> 1;
+    pPlanes[1].u_height = pPlanes[0].u_height >> 1;
+    pPlanes[1].u_stride = pPlanes[1].u_width;
+    pPlanes[1].u_topleft = 0;
+    pPlanes[1].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[1].u_stride
+        * pPlanes[1].u_height, M4VSS3GPP,(M4OSA_Char *) "pPlanes[1].pac_data");
+
+    if( M4OSA_NULL == pPlanes[1].pac_data )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[1].pac_data,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    pPlanes[2].u_width = pPlanes[1].u_width;
+    pPlanes[2].u_height = pPlanes[1].u_height;
+    pPlanes[2].u_stride = pPlanes[2].u_width;
+    pPlanes[2].u_topleft = 0;
+    pPlanes[2].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[2].u_stride
+        * pPlanes[2].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[2].pac_data");
+
+    if( M4OSA_NULL == pPlanes[2].pac_data )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[2].pac_data,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAllocateYUV420: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c b/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
new file mode 100755
index 0000000..f7226a3
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file    M4VSS3GPP_MediaAndCodecSubscription.c
+ * @brief    Media readers and codecs subscription
+ * @note    This file implements the subscription of supported media
+ *            readers and decoders for the VSS. Potential support can
+ *            be activated or de-activated
+ *            using compilation flags set in the projects settings.
+ *************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+
+#include "M4OSA_Debug.h"
+#include "M4VSS3GPP_InternalTypes.h"                /**< Include for VSS specific types */
+#include "M4VSS3GPP_InternalFunctions.h"            /**< Registration module */
+
+/* _______________________ */
+/*|                       |*/
+/*|  reader subscription  |*/
+/*|_______________________|*/
+
+/* Reader registration : at least one reader must be defined */
+#ifndef M4VSS_SUPPORT_READER_3GP
+#ifndef M4VSS_SUPPORT_READER_AMR
+#ifndef M4VSS_SUPPORT_READER_MP3
+#ifndef M4VSS_SUPPORT_READER_PCM
+#ifndef M4VSS_SUPPORT_AUDEC_NULL
+#error "no reader registered"
+#endif /* M4VSS_SUPPORT_AUDEC_NULL */
+#endif /* M4VSS_SUPPORT_READER_PCM */
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+#endif /* M4VSS_SUPPORT_READER_AMR */
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+/* There must be at least one MPEG4 decoder */
+#if !defined(M4VSS_SUPPORT_VIDEC_3GP) && !defined(M4VSS_ENABLE_EXTERNAL_DECODERS)
+#error "Wait, what?"
+/* "Hey, this is the VSS3GPP speaking. Pray tell, how the heck do you expect me to be able to do
+any editing without a built-in video decoder, nor the possibility to receive an external one?!
+Seriously, I'd love to know." */
+#endif
+
+/* Include files for each reader to subscribe */
+#ifdef M4VSS_SUPPORT_READER_3GP
+#include "VideoEditor3gpReader.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_AMR
+#include "M4READER_Amr.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_MP3
+#include "VideoEditorMp3Reader.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_PCM
+#include "M4READER_Pcm.h"
+#endif
+
+
+/* ______________________________ */
+/*|                              |*/
+/*|  audio decoder subscription  |*/
+/*|______________________________|*/
+
+#include "VideoEditorAudioDecoder.h"
+#include "VideoEditorVideoDecoder.h"
+#ifdef M4VSS_SUPPORT_AUDEC_NULL
+#include "M4AD_Null.h"
+#endif
+
+/* _______________________ */
+/*|                       |*/
+/*|  writer subscription  |*/
+/*|_______________________|*/
+
+/* Writer registration : at least one writer must be defined */
+//#ifndef M4VSS_SUPPORT_WRITER_AMR
+#ifndef M4VSS_SUPPORT_WRITER_3GPP
+#error "no writer registered"
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+//#endif /* M4VSS_SUPPORT_WRITER_AMR */
+
+/* Include files for each writer to subscribe */
+//#ifdef M4VSS_SUPPORT_WRITER_AMR
+/*extern M4OSA_ERR M4WRITER_AMR_getInterfaces( M4WRITER_OutputFileType* Type,
+M4WRITER_GlobalInterface** SrcGlobalInterface,
+M4WRITER_DataInterface** SrcDataInterface);*/
+//#endif
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces( M4WRITER_OutputFileType* Type,
+                                            M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                            M4WRITER_DataInterface** SrcDataInterface);
+#endif
+
+/* ______________________________ */
+/*|                              |*/
+/*|  video encoder subscription  |*/
+/*|______________________________|*/
+#include "VideoEditorAudioEncoder.h"
+#include "VideoEditorVideoEncoder.h"
+
+
+/* ______________________________ */
+/*|                              |*/
+/*|  audio encoder subscription  |*/
+/*|______________________________|*/
+
+
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL)\
+    return ((M4OSA_ERR)(retval));
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_SubscribeMediaAndCodec()
+ * @brief    This function registers the reader, decoders, writers and encoders
+ *          in the VSS.
+ * @note
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext is NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_subscribeMediaAndCodec(M4VSS3GPP_MediaAndCodecCtxt *pContext)
+{
+    M4OSA_ERR                   err = M4NO_ERROR;
+
+    M4READER_MediaType          readerMediaType;
+    M4READER_GlobalInterface*   pReaderGlobalInterface;
+    M4READER_DataInterface*     pReaderDataInterface;
+
+    M4WRITER_OutputFileType     writerMediaType;
+    M4WRITER_GlobalInterface*   pWriterGlobalInterface;
+    M4WRITER_DataInterface*     pWriterDataInterface;
+
+    M4AD_Type                   audioDecoderType;
+    M4ENCODER_AudioFormat       audioCodecType;
+    M4ENCODER_AudioGlobalInterface* pAudioCodecInterface;
+    M4AD_Interface*             pAudioDecoderInterface;
+
+    M4DECODER_VideoType         videoDecoderType;
+    M4ENCODER_Format            videoCodecType;
+    M4ENCODER_GlobalInterface*  pVideoCodecInterface;
+    M4DECODER_VideoInterface*   pVideoDecoderInterface;
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
+
+    /* _______________________ */
+    /*|                       |*/
+    /*|  reader subscription  |*/
+    /*|_______________________|*/
+
+    /* --- 3GP --- */
+
+#ifdef M4VSS_SUPPORT_READER_3GP
+    err = VideoEditor3gpReader_getInterface( &readerMediaType, &pReaderGlobalInterface,
+         &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_3GP interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GP reader");
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+    /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_READER_AMR
+    err = M4READER_AMR_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
+        &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register AMR reader");
+#endif /* M4VSS_SUPPORT_READER_AMR */
+
+    /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_READER_MP3
+    err = VideoEditorMp3Reader_getInterface( &readerMediaType, &pReaderGlobalInterface,
+         &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_MP3 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 reader");
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+
+    /* --- PCM --- */
+
+#ifdef M4VSS_SUPPORT_READER_PCM
+    err = M4READER_PCM_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
+        &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_PCM interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register PCM reader");
+#endif /* M4VSS_SUPPORT_READER_PCM */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  video decoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- MPEG4 & H263 --- */
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+    err = VideoEditorVideoDecoder_getInterface_MPEG4(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4DECODER_MPEG4 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register MPEG4 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+    err = VideoEditorVideoDecoder_getInterface_H264(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4DECODER_H264 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register H264 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  audio decoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- AMRNB --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
+    err = VideoEditorAudioDecoder_getInterface_AMRNB(&audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4 AMRNB interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register AMRNB decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AMRNB */
+
+    /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AAC
+    err = VideoEditorAudioDecoder_getInterface_AAC(&audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4 AAC interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register AAC decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AAC */
+
+    /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_MP3
+    err = VideoEditorAudioDecoder_getInterface_MP3(&audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4 MP3 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 decoder");
+#endif  /* M4VSS_SUPPORT_AUDEC_MP3 */`
+
+
+    /* --- NULL --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_NULL
+    err = M4AD_NULL_getInterface( &audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AD NULL Decoder interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register EVRC decoder");
+#endif  /* M4VSS_SUPPORT_AUDEC_NULL */
+
+    /* _______________________ */
+    /*|                       |*/
+    /*|  writer subscription  |*/
+    /*|_______________________|*/
+
+
+    /* --- 3GPP --- */
+
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+    /* retrieves the 3GPP writer media type and pointer to functions*/
+    err = M4WRITER_3GP_getInterfaces( &writerMediaType, &pWriterGlobalInterface,
+        &pWriterDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4WRITER_3GP interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerWriter( pContext, writerMediaType, pWriterGlobalInterface,
+        pWriterDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GPP writer");
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  video encoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- MPEG4 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+    /* retrieves the MPEG4 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_MPEG4(&videoCodecType, &pVideoCodecInterface,
+         M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4MP4E_MPEG4 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register video MPEG4 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+    /* --- H263 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+    /* retrieves the H263 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_H263(&videoCodecType, &pVideoCodecInterface,
+         M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4MP4E_H263 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register video H263 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+    /* retrieves the H264 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_H264(&videoCodecType, &pVideoCodecInterface,
+         M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4VSS3GPP_subscribeMediaAndCodec: M4H264E interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register video H264 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AVC */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  audio encoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AMR
+    /* retrieves the AMR encoder type and pointer to functions*/
+    err = VideoEditorAudioEncoder_getInterface_AMRNB(&audioCodecType, &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AMR interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AMR encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AMR */
+
+    /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AAC
+    /* retrieves the AAC encoder type and pointer to functions*/
+    err = VideoEditorAudioEncoder_getInterface_AAC(&audioCodecType, &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AAC interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AAC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AAC */
+
+    /* --- EVRC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_EVRC
+    /* retrieves the EVRC encoder type and pointer to functions*/
+    err = M4EVRC_getInterfaces( &audioCodecType, &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4EVRC interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio EVRC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_EVRC */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+    pContext->bAllowFreeingOMXCodecInterface = M4OSA_TRUE;   /* when NXP SW codecs are registered,
+                                                               then allow unregistration*/
+#endif
+
+
+    return err;
+}
+
diff --git a/libvideoeditor/vss/src/M4xVSS_API.c b/libvideoeditor/vss/src/M4xVSS_API.c
new file mode 100755
index 0000000..33c28b0
--- /dev/null
+++ b/libvideoeditor/vss/src/M4xVSS_API.c
@@ -0,0 +1,7004 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4xVSS_API.c
+ * @brief    API of eXtended Video Studio Service (Video Studio 2.1)
+ * @note
+ ******************************************************************************
+ */
+
+/**
+ * OSAL main types and errors ***/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_FileExtra.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_CharStar.h"
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+
+
+/**
+ * VSS 3GPP API definition */
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/*************************
+Begin of xVSS API
+ **************************/
+
+#include "M4xVSS_API.h"
+#include "M4xVSS_Internal.h"
+
+/* RC: to delete unecessary temp files on the fly */
+#include "M4VSS3GPP_InternalTypes.h"
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_Init(M4OSA_Context* pContext, M4xVSS_InitParams* pParams)
+ * @brief        This function initializes the xVSS
+ * @note        Initializes the xVSS edit operation (allocates an execution context).
+ *
+ * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
+ * @param    params                (IN) Parameters mandatory for xVSS
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_Init( M4OSA_Context *pContext, M4xVSS_InitParams *pParams )
+{
+    M4xVSS_Context *xVSS_context;
+    M4OSA_UInt32 length = 0, i;
+
+    if( pParams == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Parameter structure for M4xVSS_Init function is NULL");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pParams->pFileReadPtr == M4OSA_NULL
+        || pParams->pFileWritePtr == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0(
+            "pFileReadPtr or pFileWritePtr in M4xVSS_InitParams structure is NULL");
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context = (M4xVSS_Context *)M4OSA_malloc(sizeof(M4xVSS_Context), M4VS,
+        (M4OSA_Char *)"Context of the xVSS layer");
+
+    if( xVSS_context == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+        return M4ERR_ALLOC;
+    }
+
+    /* Initialize file read/write functions pointers */
+    xVSS_context->pFileReadPtr = pParams->pFileReadPtr;
+    xVSS_context->pFileWritePtr = pParams->pFileWritePtr;
+
+    /*UTF Conversion support: copy conversion functions pointers and allocate the temporary
+     buffer*/
+    if( pParams->pConvFromUTF8Fct != M4OSA_NULL )
+    {
+        if( pParams->pConvToUTF8Fct != M4OSA_NULL )
+        {
+            xVSS_context->UTFConversionContext.pConvFromUTF8Fct =
+                pParams->pConvFromUTF8Fct;
+            xVSS_context->UTFConversionContext.pConvToUTF8Fct =
+                pParams->pConvToUTF8Fct;
+            xVSS_context->UTFConversionContext.m_TempOutConversionSize =
+                UTF_CONVERSION_BUFFER_SIZE;
+            xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+                (M4OSA_Void *)M4OSA_malloc(UTF_CONVERSION_BUFFER_SIZE
+                * sizeof(M4OSA_UInt8),
+                M4VA, (M4OSA_Char *)"M4xVSS_Init: UTF conversion buffer");
+
+            if( M4OSA_NULL
+                == xVSS_context->UTFConversionContext.pTempOutConversionBuffer )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+                xVSS_context->pTempPath = M4OSA_NULL;
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+                xVSS_context = M4OSA_NULL;
+                return M4ERR_ALLOC;
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0("M4xVSS_Init: one UTF conversion pointer is null and the other\
+                           is not null");
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+            xVSS_context->pTempPath = M4OSA_NULL;
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+            xVSS_context = M4OSA_NULL;
+            return M4ERR_PARAMETER;
+        }
+    }
+    else
+    {
+        xVSS_context->UTFConversionContext.pConvFromUTF8Fct = M4OSA_NULL;
+        xVSS_context->UTFConversionContext.pConvToUTF8Fct = M4OSA_NULL;
+        xVSS_context->UTFConversionContext.m_TempOutConversionSize = 0;
+        xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+            M4OSA_NULL;
+    }
+
+    if( pParams->pTempPath != M4OSA_NULL )
+    {
+        /*No need to convert into UTF8 as all input of xVSS are in UTF8
+        (the conversion customer format into UTF8
+        is done in VA/VAL)*/
+        xVSS_context->pTempPath =
+            (M4OSA_Void *)M4OSA_malloc(M4OSA_chrLength(pParams->pTempPath) + 1,
+            M4VS, (M4OSA_Char *)"xVSS Path for temporary files");
+
+        if( xVSS_context->pTempPath == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pTempPath, pParams->pTempPath,
+            M4OSA_chrLength(pParams->pTempPath) + 1);
+        /* TODO: Check that no previous xVSS temporary files are present ? */
+    }
+    else
+    {
+        M4OSA_TRACE1_0("Path for temporary files is NULL");
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+        xVSS_context = M4OSA_NULL;
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context->pSettings =
+        (M4VSS3GPP_EditSettings *)M4OSA_malloc(sizeof(M4VSS3GPP_EditSettings),
+        M4VS, (M4OSA_Char *)"Copy of VSS structure");
+
+    if( xVSS_context->pSettings == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+        xVSS_context->pTempPath = M4OSA_NULL;
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+        xVSS_context = M4OSA_NULL;
+        return M4ERR_ALLOC;
+    }
+
+    /* Initialize pointers in pSettings */
+    xVSS_context->pSettings->pClipList = M4OSA_NULL;
+    xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+    xVSS_context->pSettings->Effects = M4OSA_NULL; /* RC */
+    xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+
+    /* This is used to know if the user has added or removed some medias */
+    xVSS_context->previousClipNumber = 0;
+
+    /* "State machine" */
+    xVSS_context->editingStep = 0;
+    xVSS_context->analyseStep = 0;
+
+    xVSS_context->pcmPreviewFile = M4OSA_NULL;
+
+    /* Initialize Pto3GPP and MCS lists */
+    xVSS_context->pMCSparamsList = M4OSA_NULL;
+    xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+    xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
+    xVSS_context->pMCScurrentParams = M4OSA_NULL;
+
+    xVSS_context->tempFileIndex = 0;
+
+    xVSS_context->targetedBitrate = 0;
+
+    xVSS_context->targetedTimescale = 0;
+
+    xVSS_context->pAudioMixContext = M4OSA_NULL;
+    xVSS_context->pAudioMixSettings = M4OSA_NULL;
+
+    /*FB: initialize to avoid crash when error during the editing*/
+    xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    for ( i = 0; i < M4VD_kVideoType_NB; i++ )
+    {
+        xVSS_context->registeredExternalDecs[i].pDecoderInterface = M4OSA_NULL;
+        xVSS_context->registeredExternalDecs[i].pUserData = M4OSA_NULL;
+        xVSS_context->registeredExternalDecs[i].registered = M4OSA_FALSE;
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    for ( i = 0; i < M4VE_kEncoderType_NB; i++ )
+    {
+        xVSS_context->registeredExternalEncs[i].pEncoderInterface = M4OSA_NULL;
+        xVSS_context->registeredExternalEncs[i].pUserData = M4OSA_NULL;
+        xVSS_context->registeredExternalEncs[i].registered = M4OSA_FALSE;
+    }
+
+    /* Initialize state if all initializations are corrects */
+    xVSS_context->m_state = M4xVSS_kStateInitialized;
+
+    /* initialize MCS context*/
+    xVSS_context->pMCS_Ctxt = M4OSA_NULL;
+
+    *pContext = xVSS_context;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_ReduceTranscode
+ * @brief        This function changes the given editing structure in order to
+ *                minimize the transcoding time.
+ * @note        The xVSS analyses this structure, and if needed, changes the
+ *                output parameters (Video codec, video size, audio codec,
+ *                audio nb of channels) to minimize the transcoding time.
+ *
+ * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
+ * @param    pSettings            (IN) Edition settings (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_ReduceTranscode( M4OSA_Context pContext,
+                                 M4VSS3GPP_EditSettings *pSettings )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VIDEOEDITING_ClipProperties fileProperties;
+    M4OSA_UInt8 i, j;
+    M4OSA_Bool bAudioTransition = M4OSA_FALSE;
+    M4OSA_Bool bIsBGMReplace = M4OSA_FALSE;
+    M4OSA_Bool bFound;
+    M4OSA_UInt32 videoConfig[9] =
+    {
+        0, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    /** Index <-> Video config **/
+    /* 0:        H263  SQCIF        */
+    /* 1:        H263  QCIF        */
+    /* 2:        H263  CIF        */
+    /* 3:        MPEG4 SQCIF        */
+    /* 4:        MPEG4 QQVGA        */
+    /* 5:        MPEG4 QCIF        */
+    /* 6:        MPEG4 QVGA        */
+    /* 7:        MPEG4 CIF        */
+    /* 8:        MPEG4 VGA        */
+    /****************************/
+    M4OSA_UInt32 audioConfig[3] =
+    {
+        0, 0, 0
+    };
+    /** Index <-> Audio config **/
+    /* 0:    AMR                    */
+    /* 1:    AAC    16kHz mono        */
+    /* 2:    AAC 16kHz stereo    */
+    /****************************/
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateInitialized \
+        && xVSS_context->m_state != M4xVSS_kStateOpened )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_ReduceTranscode function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* Check number of clips */
+    if( pSettings->uiClipNumber == 0 )
+    {
+        M4OSA_TRACE1_0("The number of input clip must be greater than 0 !");
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check if there is a background music, and if its audio will replace input clip audio */
+    if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+    {
+        if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100 )
+        {
+            bIsBGMReplace = M4OSA_TRUE;
+        }
+    }
+
+    /* Parse all clips, and give occurences of each combination */
+    for ( i = 0; i < pSettings->uiClipNumber; i++ )
+    {
+        /* We ignore JPG input files as they are always transcoded */
+        if( pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP )
+        {
+            /**
+            * UTF conversion: convert into the customer format*/
+            M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+            M4OSA_UInt32 ConvertedSize = 0;
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)pSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &ConvertedSize);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1("M4xVSS_ReduceTranscode:\
+                                   M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+            /**
+            * End of the utf conversion, now use the converted path*/
+            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+                &fileProperties);
+
+            //err = M4xVSS_internalGetProperties(xVSS_context, pSettings->pClipList[i]->pFile,
+            //     &fileProperties);
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
+                    err);
+                /* TODO: Translate error code of MCS to an xVSS error code ? */
+                return err;
+            }
+
+            /* Check best video settings */
+            if( fileProperties.uiVideoWidth == 128
+                && fileProperties.uiVideoHeight == 96 )
+            {
+                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+                {
+                    videoConfig[0] += fileProperties.uiClipVideoDuration;
+                }
+                else if( ( fileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[3] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 160
+                && fileProperties.uiVideoHeight == 120 )
+            {
+                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[4] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 176
+                && fileProperties.uiVideoHeight == 144 )
+            {
+                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+                {
+                    videoConfig[1] += fileProperties.uiClipVideoDuration;
+                }
+                else if( ( fileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[5] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 320
+                && fileProperties.uiVideoHeight == 240 )
+            {
+                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[6] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 352
+                && fileProperties.uiVideoHeight == 288 )
+            {
+                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+                {
+                    videoConfig[2] += fileProperties.uiClipVideoDuration;
+                }
+                else if( ( fileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[7] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 640
+                && fileProperties.uiVideoHeight == 480 )
+            {
+                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[8] += fileProperties.uiClipVideoDuration;
+                }
+            }
+
+            /* If there is a BGM that replaces existing audio track, we do not care about
+            audio track as it will be replaced */
+            /* If not, we try to minimize audio reencoding */
+            if( bIsBGMReplace == M4OSA_FALSE )
+            {
+                if( fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC )
+                {
+                    if( fileProperties.uiSamplingFrequency == 16000 && \
+                        fileProperties.uiNbChannels == 1 )
+                    {
+                        audioConfig[1] += fileProperties.uiClipAudioDuration;
+                    }
+                    else if( fileProperties.uiSamplingFrequency == 16000 && \
+                        fileProperties.uiNbChannels == 2 )
+                    {
+                        audioConfig[2] += fileProperties.uiClipAudioDuration;
+                    }
+                }
+                else if( fileProperties.AudioStreamType
+                    == M4VIDEOEDITING_kAMR_NB )
+                {
+                    audioConfig[0] += fileProperties.uiClipAudioDuration;
+                }
+            }
+        }
+    }
+
+    /* Find best output video format (the most occuring combination) */
+    j = 0;
+    bFound = M4OSA_FALSE;
+
+    for ( i = 0; i < 9; i++ )
+    {
+        if( videoConfig[i] >= videoConfig[j] )
+        {
+            j = i;
+            bFound = M4OSA_TRUE;
+        }
+    }
+
+    if( bFound )
+    {
+        switch( j )
+        {
+            case 0:
+                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
+                break;
+
+            case 1:
+                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
+                break;
+
+            case 2:
+                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
+                break;
+
+            case 3:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
+                break;
+
+            case 4:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQQVGA;
+                break;
+
+            case 5:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
+                break;
+
+            case 6:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQVGA;
+                break;
+
+            case 7:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
+                break;
+
+            case 8:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kVGA;
+                break;
+        }
+    }
+
+    /* Find best output audio format (the most occuring combination) */
+    j = 0;
+    bFound = M4OSA_FALSE;
+
+    for ( i = 0; i < 3; i++ )
+    {
+        if( audioConfig[i] >= audioConfig[j] )
+        {
+            j = i;
+            bFound = M4OSA_TRUE;
+        }
+    }
+
+    if( bFound )
+    {
+        switch( j )
+        {
+            case 0:
+                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+                pSettings->xVSS.bAudioMono = M4OSA_TRUE;
+                break;
+
+            case 1:
+                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
+                pSettings->xVSS.bAudioMono = M4OSA_TRUE;
+                break;
+
+            case 2:
+                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
+                pSettings->xVSS.bAudioMono = M4OSA_FALSE;
+                break;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_SendCommand(M4OSA_Context pContext,
+ *                                         M4VSS3GPP_EditSettings* pSettings)
+ * @brief        This function gives to the xVSS an editing structure
+ * @note        The xVSS analyses this structure, and prepare edition
+ *                This function must be called after M4xVSS_Init, after
+ *                M4xVSS_CloseCommand, or after M4xVSS_PreviewStop.
+ *                After this function, the user must call M4xVSS_Step until
+ *                it returns another error than M4NO_ERROR.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    pSettings            (IN) Edition settings (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SendCommand( M4OSA_Context pContext,
+                             M4VSS3GPP_EditSettings *pSettings )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_UInt8 i, j;
+    M4OSA_UInt8 nbOfSameClip = 0;
+    M4OSA_ERR err;
+    M4OSA_Bool isNewBGM = M4OSA_TRUE;
+    M4xVSS_Pto3GPP_params *pPto3GPP_last = M4OSA_NULL;
+    M4xVSS_MCS_params *pMCS_last = M4OSA_NULL;
+    M4OSA_UInt32 width, height, samplingFreq;
+    M4OSA_Bool bIsTranscoding = M4OSA_FALSE;
+    M4OSA_Int32 totalDuration;
+    M4OSA_UInt32 outputSamplingFrequency = 0;
+    M4OSA_UInt32 length = 0;
+    M4OSA_Int8 masterClip = -1;
+
+    i = 0;
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateInitialized \
+        && xVSS_context->m_state != M4xVSS_kStateOpened )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_SendCommand function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* State is back to initialized to allow call of cleanup function in case of error */
+    xVSS_context->m_state = M4xVSS_kStateInitialized;
+
+    /* Check if a previous sendCommand has been called */
+    if( xVSS_context->previousClipNumber != 0 )
+    {
+        M4OSA_UInt32 pCmpResult = 0;
+
+        /* Compare BGM input */
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL \
+            && pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            M4OSA_chrCompare(xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+                pSettings->xVSS.pBGMtrack->pFile, (M4OSA_Int32 *) &pCmpResult);
+
+            if( pCmpResult == 0 )
+            {
+                /* Check if audio output parameters have changed */
+                if( xVSS_context->pSettings->xVSS.outputAudioFormat ==
+                    pSettings->xVSS.outputAudioFormat
+                    && xVSS_context->pSettings->xVSS.bAudioMono
+                    == pSettings->xVSS.bAudioMono )
+                {
+                    /* It means that BGM is the same as before, so, no need to redecode it */
+                    M4OSA_TRACE2_0(
+                        "BGM is the same as before, nothing to decode");
+                    isNewBGM = M4OSA_FALSE;
+                }
+                else
+                {
+                    /* We need to unallocate PCM preview file path in internal context */
+                    if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(
+                            (M4OSA_MemAddr32)xVSS_context->pcmPreviewFile);
+                        xVSS_context->pcmPreviewFile = M4OSA_NULL;
+                    }
+                }
+            }
+            else
+            {
+                /* We need to unallocate PCM preview file path in internal context */
+                if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pcmPreviewFile);
+                    xVSS_context->pcmPreviewFile = M4OSA_NULL;
+                }
+            }
+        }
+
+        /* Check if output settings have changed */
+        if( xVSS_context->pSettings->xVSS.outputVideoSize
+            != pSettings->xVSS.outputVideoSize
+            || xVSS_context->pSettings->xVSS.outputVideoFormat
+            != pSettings->xVSS.outputVideoFormat
+            || xVSS_context->pSettings->xVSS.outputAudioFormat
+            != pSettings->xVSS.outputAudioFormat
+            || xVSS_context->pSettings->xVSS.bAudioMono
+            != pSettings->xVSS.bAudioMono
+            || xVSS_context->pSettings->xVSS.outputAudioSamplFreq
+            != pSettings->xVSS.outputAudioSamplFreq )
+        {
+            /* If it is the case, we can't reuse already transcoded/converted files */
+            /* so, we delete these files and remove them from chained list */
+            if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+            {
+                M4xVSS_Pto3GPP_params *pParams =
+                    xVSS_context->pPTo3GPPparamsList;
+                M4xVSS_Pto3GPP_params *pParams_sauv;
+
+                while( pParams != M4OSA_NULL )
+                {
+                    if( pParams->pFileIn != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                        pParams->pFileIn = M4OSA_NULL;
+                    }
+
+                    if( pParams->pFileOut != M4OSA_NULL )
+                    {
+                        /* Delete temporary file */
+                        M4OSA_fileExtraDelete(pParams->pFileOut);
+                        M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                        pParams->pFileOut = M4OSA_NULL;
+                    }
+
+                    if( pParams->pFileTemp != M4OSA_NULL )
+                    {
+                        /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+                        M4OSA_fileExtraDelete(pParams->pFileTemp);
+                        M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                        pParams->pFileTemp = M4OSA_NULL;
+                    }
+                    pParams_sauv = pParams;
+                    pParams = pParams->pNext;
+                    M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+                    pParams_sauv = M4OSA_NULL;
+                }
+                xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+            }
+
+            if( xVSS_context->pMCSparamsList != M4OSA_NULL )
+            {
+                M4xVSS_MCS_params *pParams = xVSS_context->pMCSparamsList;
+                M4xVSS_MCS_params *pParams_sauv;
+                M4xVSS_MCS_params *pParams_bgm = M4OSA_NULL;
+
+                while( pParams != M4OSA_NULL )
+                {
+                    /* Here, we do not delete BGM */
+                    if( pParams->isBGM != M4OSA_TRUE )
+                    {
+                        if( pParams->pFileIn != M4OSA_NULL )
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                            pParams->pFileIn = M4OSA_NULL;
+                        }
+
+                        if( pParams->pFileOut != M4OSA_NULL )
+                        {
+                            /* Delete temporary file */
+                            M4OSA_fileExtraDelete(pParams->pFileOut);
+                            M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                            pParams->pFileOut = M4OSA_NULL;
+                        }
+
+                        if( pParams->pFileTemp != M4OSA_NULL )
+                        {
+                            /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+                            M4OSA_fileExtraDelete(pParams->pFileTemp);
+                            M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                            pParams->pFileTemp = M4OSA_NULL;
+                        }
+                        pParams_sauv = pParams;
+                        pParams = pParams->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+                        pParams_sauv = M4OSA_NULL;
+                    }
+                    else
+                    {
+                        pParams_bgm = pParams;
+                        pParams = pParams->pNext;
+                        /*PR P4ME00003182 initialize this pointer because the following params
+                        element will be deallocated*/
+                        if( pParams != M4OSA_NULL
+                            && pParams->isBGM != M4OSA_TRUE )
+                        {
+                            pParams_bgm->pNext = M4OSA_NULL;
+                        }
+                    }
+                }
+                xVSS_context->pMCSparamsList = pParams_bgm;
+            }
+            /* Maybe need to implement framerate changing */
+            //xVSS_context->pSettings->videoFrameRate;
+        }
+
+        /* Unallocate previous xVSS_context->pSettings structure */
+        M4xVSS_freeSettings(xVSS_context->pSettings);
+
+        /*Unallocate output file path*/
+        if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+        }
+        xVSS_context->pSettings->uiOutputPathSize = 0;
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+
+    /**********************************
+    Clips registering
+    **********************************/
+
+    /* Copy settings from user given structure to our "local" structure */
+    xVSS_context->pSettings->xVSS.outputVideoFormat =
+        pSettings->xVSS.outputVideoFormat;
+    xVSS_context->pSettings->xVSS.outputVideoSize =
+        pSettings->xVSS.outputVideoSize;
+    xVSS_context->pSettings->xVSS.outputAudioFormat =
+        pSettings->xVSS.outputAudioFormat;
+    xVSS_context->pSettings->xVSS.bAudioMono = pSettings->xVSS.bAudioMono;
+    xVSS_context->pSettings->xVSS.outputAudioSamplFreq =
+        pSettings->xVSS.outputAudioSamplFreq;
+    /*xVSS_context->pSettings->pOutputFile = pSettings->pOutputFile;*/
+    /*FB: VAL CR P4ME00003076
+    The output video and audio bitrate are given by the user in the edition settings structure*/
+    xVSS_context->pSettings->xVSS.outputVideoBitrate =
+        pSettings->xVSS.outputVideoBitrate;
+    xVSS_context->pSettings->xVSS.outputAudioBitrate =
+        pSettings->xVSS.outputAudioBitrate;
+    xVSS_context->pSettings->PTVolLevel = pSettings->PTVolLevel;
+
+    /*FB: bug fix if the output path is given in M4xVSS_sendCommand*/
+
+    if( pSettings->pOutputFile != M4OSA_NULL
+        && pSettings->uiOutputPathSize > 0 )
+    {
+        M4OSA_Void *pDecodedPath = pSettings->pOutputFile;
+        /*As all inputs of the xVSS are in UTF8, convert the output file path into the
+        customer format*/
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)pSettings->pOutputFile,
+                (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("M4xVSS_SendCommand:\
+                               M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            pSettings->uiOutputPathSize = length;
+        }
+
+        xVSS_context->pSettings->pOutputFile = (M4OSA_Void *)M4OSA_malloc \
+            (pSettings->uiOutputPathSize + 1, M4VS,
+            (M4OSA_Char *)"output file path");
+
+        if( xVSS_context->pSettings->pOutputFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->pOutputFile,
+            (M4OSA_MemAddr8)pDecodedPath, pSettings->uiOutputPathSize + 1);
+        xVSS_context->pSettings->uiOutputPathSize = pSettings->uiOutputPathSize;
+        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+    }
+    else
+    {
+        xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+        xVSS_context->pSettings->uiOutputPathSize = 0;
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+    xVSS_context->pSettings->pTemporaryFile = pSettings->pTemporaryFile;
+    xVSS_context->pSettings->uiClipNumber = pSettings->uiClipNumber;
+    xVSS_context->pSettings->videoFrameRate = pSettings->videoFrameRate;
+    xVSS_context->pSettings->uiMasterClip =
+        0; /* With VSS 2.0, this new param is mandatory */
+    xVSS_context->pSettings->xVSS.pTextRenderingFct =
+        pSettings->xVSS.pTextRenderingFct; /* CR text handling */
+    xVSS_context->pSettings->xVSS.outputFileSize =
+        pSettings->xVSS.outputFileSize;
+
+    if( pSettings->xVSS.outputFileSize != 0 \
+        && pSettings->xVSS.outputAudioFormat != M4VIDEOEDITING_kAMR_NB )
+    {
+        M4OSA_TRACE1_0("M4xVSS_SendCommand: Impossible to limit file\
+                       size with other audio output than AAC");
+        return M4ERR_PARAMETER;
+    }
+    xVSS_context->nbStepTotal = 0;
+    xVSS_context->currentStep = 0;
+
+    if( xVSS_context->pSettings->xVSS.outputVideoFormat != M4VIDEOEDITING_kMPEG4
+        && xVSS_context->pSettings->xVSS.outputVideoFormat
+        != M4VIDEOEDITING_kH263
+        && xVSS_context->pSettings->xVSS.outputVideoFormat
+        != M4VIDEOEDITING_kMPEG4_EMP
+        && xVSS_context->pSettings->xVSS.outputVideoFormat
+        != M4VIDEOEDITING_kH264 )
+    {
+        xVSS_context->pSettings->xVSS.outputVideoFormat =
+            M4VIDEOEDITING_kNoneVideo;
+    }
+
+    /* Get output width/height */
+    switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+    {
+        case M4VIDEOEDITING_kSQCIF:
+            width = 128;
+            height = 96;
+            break;
+
+        case M4VIDEOEDITING_kQQVGA:
+            width = 160;
+            height = 120;
+            break;
+
+        case M4VIDEOEDITING_kQCIF:
+            width = 176;
+            height = 144;
+            break;
+
+        case M4VIDEOEDITING_kQVGA:
+            width = 320;
+            height = 240;
+            break;
+
+        case M4VIDEOEDITING_kCIF:
+            width = 352;
+            height = 288;
+            break;
+
+        case M4VIDEOEDITING_kVGA:
+            width = 640;
+            height = 480;
+            break;
+            /* +PR LV5807 */
+        case M4VIDEOEDITING_kWVGA:
+            width = 800;
+            height = 480;
+            break;
+
+        case M4VIDEOEDITING_kNTSC:
+            width = 720;
+            height = 480;
+            break;
+            /* -PR LV5807 */
+            /* +CR Google */
+        case M4VIDEOEDITING_k640_360:
+            width = 640;
+            height = 360;
+            break;
+
+        case M4VIDEOEDITING_k854_480:
+
+            // StageFright encoders require %16 resolution
+
+            width = M4ENCODER_854_480_Width;
+
+            height = 480;
+            break;
+
+        case M4VIDEOEDITING_kHD1280:
+            width = 1280;
+            height = 720;
+            break;
+
+        case M4VIDEOEDITING_kHD1080:
+            // StageFright encoders require %16 resolution
+
+            width = M4ENCODER_HD1080_Width;
+
+            height = 720;
+            break;
+
+        case M4VIDEOEDITING_kHD960:
+            width = 960;
+            height = 720;
+            break;
+
+            /* -CR Google */
+        default: /* If output video size is not given, we take QCIF size */
+            width = 176;
+            height = 144;
+            xVSS_context->pSettings->xVSS.outputVideoSize =
+                M4VIDEOEDITING_kQCIF;
+            break;
+    }
+
+    /* Get output Sampling frequency */
+    switch( xVSS_context->pSettings->xVSS.outputAudioSamplFreq )
+    {
+        case M4VIDEOEDITING_k8000_ASF:
+            samplingFreq = 8000;
+            break;
+
+        case M4VIDEOEDITING_k16000_ASF:
+            samplingFreq = 16000;
+            break;
+
+        case M4VIDEOEDITING_k22050_ASF:
+            samplingFreq = 22050;
+            break;
+
+        case M4VIDEOEDITING_k24000_ASF:
+            samplingFreq = 24000;
+            break;
+
+        case M4VIDEOEDITING_k32000_ASF:
+            samplingFreq = 32000;
+            break;
+
+        case M4VIDEOEDITING_k44100_ASF:
+            samplingFreq = 44100;
+            break;
+
+        case M4VIDEOEDITING_k48000_ASF:
+            samplingFreq = 48000;
+            break;
+
+        case M4VIDEOEDITING_kDefault_ASF:
+        default:
+            if( xVSS_context->pSettings->xVSS.outputAudioFormat
+                == M4VIDEOEDITING_kAMR_NB )
+            {
+                samplingFreq = 8000;
+            }
+            else if( xVSS_context->pSettings->xVSS.outputAudioFormat
+                == M4VIDEOEDITING_kAAC )
+            {
+                samplingFreq = 16000;
+            }
+            else
+            {
+                samplingFreq = 0;
+            }
+            break;
+    }
+
+    /* Allocate clip/transitions if clip number is not null ... */
+    if( 0 < xVSS_context->pSettings->uiClipNumber )
+    {
+        if( xVSS_context->pSettings->pClipList != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->pClipList));
+            xVSS_context->pSettings->pClipList = M4OSA_NULL;
+        }
+
+        if( xVSS_context->pSettings->pTransitionList != M4OSA_NULL )
+        {
+            M4OSA_free(
+                (M4OSA_MemAddr32)(xVSS_context->pSettings->pTransitionList));
+            xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+        }
+
+        xVSS_context->pSettings->pClipList =
+            (M4VSS3GPP_ClipSettings ** )M4OSA_malloc \
+            (sizeof(M4VSS3GPP_ClipSettings *)*xVSS_context->pSettings->uiClipNumber,
+            M4VS, (M4OSA_Char *)"xVSS, copy of pClipList");
+
+        if( xVSS_context->pSettings->pClipList == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        /* Set clip list to NULL */
+        M4OSA_memset((M4OSA_MemAddr8)xVSS_context->pSettings->pClipList,
+            sizeof(M4VSS3GPP_ClipSettings *)
+            *xVSS_context->pSettings->uiClipNumber, 0);
+
+        if( xVSS_context->pSettings->uiClipNumber > 1 )
+        {
+            xVSS_context->pSettings->pTransitionList =
+                (M4VSS3GPP_TransitionSettings ** ) \
+                M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings *)                \
+                *(xVSS_context->pSettings->uiClipNumber - 1), M4VS, (M4OSA_Char *) \
+                "xVSS, copy of pTransitionList");
+
+            if( xVSS_context->pSettings->pTransitionList == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            /* Set transition list to NULL */
+            M4OSA_memset(
+                (M4OSA_MemAddr8)xVSS_context->pSettings->pTransitionList,
+                sizeof(M4VSS3GPP_TransitionSettings *)
+                *(xVSS_context->pSettings->uiClipNumber - 1), 0);
+        }
+        else
+        {
+            xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+        }
+    }
+    /* else, there is a pb in the input settings structure */
+    else
+    {
+        M4OSA_TRACE1_0("No clip in this settings list !!");
+        /*FB: to avoid leaks when there is an error in the send command*/
+        /* Free Send command */
+        M4xVSS_freeCommand(xVSS_context);
+        /**/
+        return M4ERR_PARAMETER;
+    }
+
+    /* RC Allocate effects settings */
+    xVSS_context->pSettings->nbEffects = pSettings->nbEffects;
+
+    if( 0 < xVSS_context->pSettings->nbEffects )
+    {
+        xVSS_context->pSettings->Effects =
+            (M4VSS3GPP_EffectSettings *)M4OSA_malloc \
+            (xVSS_context->pSettings->nbEffects * sizeof(M4VSS3GPP_EffectSettings),
+            M4VS, (M4OSA_Char *)"effects settings");
+
+        if( xVSS_context->pSettings->Effects == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        /*FB bug fix 19.03.2008: these pointers were not initialized -> crash when free*/
+        for ( i = 0; i < xVSS_context->pSettings->nbEffects; i++ )
+        {
+            xVSS_context->pSettings->Effects[i].xVSS.pFramingFilePath =
+                M4OSA_NULL;
+            xVSS_context->pSettings->Effects[i].xVSS.pFramingBuffer =
+                M4OSA_NULL;
+            xVSS_context->pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
+        }
+        /**/
+    }
+
+    if( xVSS_context->targetedTimescale == 0 )
+    {
+        M4OSA_UInt32 pTargetedTimeScale = 0;
+
+        err = M4xVSS_internalGetTargetedTimeScale(xVSS_context, pSettings,
+            &pTargetedTimeScale);
+
+        if( M4NO_ERROR != err || pTargetedTimeScale == 0 )
+        {
+            M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalGetTargetedTimeScale\
+                           returned 0x%x", err);
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return err;
+        }
+        xVSS_context->targetedTimescale = pTargetedTimeScale;
+    }
+
+    /* Initialize total duration variable */
+    totalDuration = 0;
+
+    /* Parsing list of clips given by application, and prepare analyzing */
+    for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
+    {
+        /* Allocate current clip */
+        xVSS_context->pSettings->pClipList[i] =
+            (M4VSS3GPP_ClipSettings *)M4OSA_malloc \
+            (sizeof(M4VSS3GPP_ClipSettings), M4VS, (M4OSA_Char *)"clip settings");
+
+        if( xVSS_context->pSettings->pClipList[i] == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+
+        /* Copy clip settings from given structure to our xVSS_context structure */
+        err =
+            M4xVSS_DuplicateClipSettings(xVSS_context->pSettings->pClipList[i],
+            pSettings->pClipList[i], M4OSA_TRUE);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1(
+                "M4xVSS_SendCommand: M4xVSS_DuplicateClipSettings return error 0x%x",
+                err);
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return err;
+        }
+
+        /* Because there is 1 less transition than clip number */
+        if( i < xVSS_context->pSettings->uiClipNumber - 1 )
+        {
+            xVSS_context->pSettings->pTransitionList[i] =
+                (M4VSS3GPP_TransitionSettings
+                *)M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings),
+                M4VS, (M4OSA_Char *)"transition settings");
+
+            if( xVSS_context->pSettings->pTransitionList[i] == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            M4OSA_memcpy(
+                (M4OSA_MemAddr8)xVSS_context->pSettings->pTransitionList[i],
+                (M4OSA_MemAddr8)pSettings->pTransitionList[i],
+                sizeof(M4VSS3GPP_TransitionSettings));
+            /* Initialize external effect context to NULL, to know if input jpg has already been
+            decoded or not */
+            xVSS_context->pSettings->pTransitionList[i]->
+                pExtVideoTransitionFctCtxt = M4OSA_NULL;
+
+            switch( xVSS_context->pSettings->
+                pTransitionList[i]->VideoTransitionType )
+            {
+                    /* If transition type is alpha magic, we need to decode input file */
+                case M4xVSS_kVideoTransitionType_AlphaMagic:
+                    /* Allocate our alpha magic settings structure to have a copy of the
+                    provided one */
+                    xVSS_context->pSettings->pTransitionList[i]->      \
+                     xVSS.transitionSpecific.pAlphaMagicSettings =
+                        (M4xVSS_AlphaMagicSettings *)M4OSA_malloc \
+                        (sizeof(M4xVSS_AlphaMagicSettings), M4VS,
+                        (M4OSA_Char *)"Input Alpha magic settings structure");
+
+                    if( xVSS_context->pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    /* Copy data from the provided alpha magic settings structure tou our
+                    structure */
+                    M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->
+                        pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings,
+                        (M4OSA_MemAddr8)pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings,
+                        sizeof(M4xVSS_AlphaMagicSettings));
+
+                    /* Allocate our alpha magic input filename */
+                    xVSS_context->pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath = M4OSA_malloc(
+                        (M4OSA_chrLength(pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath)
+                        + 1), M4VS, (M4OSA_Char *)"Alpha magic file path");
+
+                    if( xVSS_context->pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath
+                        == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    /* Copy data from the provided alpha magic filename to our */
+                    M4OSA_chrNCopy(
+                        xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                        transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath,
+                        pSettings->pTransitionList[i]->xVSS.
+                        transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath, M4OSA_chrLength(
+                        pSettings->pTransitionList[i]->xVSS.
+                        transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath) + 1);
+
+                    /* Parse all transition to know if the input jpg has already been decoded */
+                    for ( j = 0; j < i; j++ )
+                    {
+                        if( xVSS_context->pSettings->
+                            pTransitionList[j]->VideoTransitionType
+                            == M4xVSS_kVideoTransitionType_AlphaMagic )
+                        {
+                            M4OSA_UInt32 pCmpResult = 0;
+                            M4OSA_chrCompare(xVSS_context->pSettings->
+                                pTransitionList[i]->xVSS.
+                                transitionSpecific.pAlphaMagicSettings->
+                                pAlphaFilePath, xVSS_context->pSettings->
+                                pTransitionList[j]->xVSS.
+                                transitionSpecific.
+                                pAlphaMagicSettings->pAlphaFilePath,
+                                (M4OSA_Int32 *) &pCmpResult);
+
+                            if( pCmpResult == 0 )
+                            {
+                                M4xVSS_internal_AlphaMagicSettings
+                                    *alphaSettings;
+
+                                alphaSettings =
+                                    (M4xVSS_internal_AlphaMagicSettings
+                                    *)M4OSA_malloc(
+                                    sizeof(
+                                    M4xVSS_internal_AlphaMagicSettings),
+                                    M4VS,
+                                    (M4OSA_Char
+                                    *)
+                                    "Alpha magic settings structure 1");
+
+                                if( alphaSettings == M4OSA_NULL )
+                                {
+                                    M4OSA_TRACE1_0(
+                                        "Allocation error in M4xVSS_SendCommand");
+                                    /*FB: to avoid leaks when there is an error in the send
+                                     command*/
+                                    /* Free Send command */
+                                    M4xVSS_freeCommand(xVSS_context);
+                                    /**/
+                                    return M4ERR_ALLOC;
+                                }
+                                alphaSettings->pPlane =
+                                    ((M4xVSS_internal_AlphaMagicSettings *)(
+                                    xVSS_context->pSettings->
+                                    pTransitionList[j]->
+                                    pExtVideoTransitionFctCtxt))->
+                                    pPlane;
+
+                                if( xVSS_context->pSettings->
+                                    pTransitionList[i]->xVSS.transitionSpecific.
+                                    pAlphaMagicSettings->blendingPercent > 0
+                                    && xVSS_context->pSettings->
+                                    pTransitionList[i]->xVSS.
+                                    transitionSpecific.
+                                    pAlphaMagicSettings->blendingPercent
+                                    <= 100 )
+                                {
+                                    alphaSettings->blendingthreshold =
+                                        ( xVSS_context->pSettings->
+                                        pTransitionList[i]->xVSS.
+                                        transitionSpecific.
+                                        pAlphaMagicSettings->
+                                        blendingPercent) * 255 / 200;
+                                }
+                                else
+                                {
+                                    alphaSettings->blendingthreshold = 0;
+                                }
+                                alphaSettings->isreverse =
+                                    xVSS_context->pSettings->
+                                    pTransitionList[i]->xVSS.
+                                    transitionSpecific.
+                                    pAlphaMagicSettings->isreverse;
+                                /* It means that the input jpg file for alpha magic has already
+                                 been decoded -> no nedd to decode it again */
+                                if( alphaSettings->blendingthreshold == 0 )
+                                {
+                                    xVSS_context->pSettings->
+                                        pTransitionList[i]->
+                                        ExtVideoTransitionFct =
+                                        M4xVSS_AlphaMagic;
+                                }
+                                else
+                                {
+                                    xVSS_context->pSettings->
+                                        pTransitionList[i]->
+                                        ExtVideoTransitionFct =
+                                        M4xVSS_AlphaMagicBlending;
+                                }
+                                xVSS_context->pSettings->pTransitionList[i]->
+                                    pExtVideoTransitionFctCtxt = alphaSettings;
+                                break;
+                            }
+                        }
+                    }
+
+                    /* If the jpg has not been decoded yet ... */
+                    if( xVSS_context->pSettings->
+                        pTransitionList[i]->pExtVideoTransitionFctCtxt
+                        == M4OSA_NULL )
+                    {
+                        M4VIFI_ImagePlane *outputPlane;
+                        M4xVSS_internal_AlphaMagicSettings *alphaSettings;
+                        /*UTF conversion support*/
+                        M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+                        /*To support ARGB8888 : get the width and height */
+                        M4OSA_UInt32 width_ARGB888 =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->width;
+                        M4OSA_UInt32 height_ARGB888 =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->height;
+                        M4OSA_TRACE1_1(
+                            " TransitionListM4xVSS_SendCommand width State is %d",
+                            width_ARGB888);
+                        M4OSA_TRACE1_1(
+                            " TransitionListM4xVSS_SendCommand height! State is %d",
+                            height_ARGB888);
+                        /* Allocate output plane */
+                        outputPlane = (M4VIFI_ImagePlane *)M4OSA_malloc(3
+                            * sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char
+                            *)
+                            "Output plane for Alpha magic transition");
+
+                        if( outputPlane == M4OSA_NULL )
+                        {
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+
+                        outputPlane[0].u_width = width;
+                        outputPlane[0].u_height = height;
+                        outputPlane[0].u_topleft = 0;
+                        outputPlane[0].u_stride = width;
+                        outputPlane[0].pac_data = (M4VIFI_UInt8
+                            *)M4OSA_malloc(( width * height * 3)
+                            >> 1,
+                            M4VS,
+                            (M4OSA_Char
+                            *)
+                            "Alloc for the Alpha magic pac_data output YUV");
+                        ;
+
+                        if( outputPlane[0].pac_data == M4OSA_NULL )
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)outputPlane);
+                            outputPlane = M4OSA_NULL;
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+                        outputPlane[1].u_width = width >> 1;
+                        outputPlane[1].u_height = height >> 1;
+                        outputPlane[1].u_topleft = 0;
+                        outputPlane[1].u_stride = width >> 1;
+                        outputPlane[1].pac_data = outputPlane[0].pac_data
+                            + outputPlane[0].u_width * outputPlane[0].u_height;
+                        outputPlane[2].u_width = width >> 1;
+                        outputPlane[2].u_height = height >> 1;
+                        outputPlane[2].u_topleft = 0;
+                        outputPlane[2].u_stride = width >> 1;
+                        outputPlane[2].pac_data = outputPlane[1].pac_data
+                            + outputPlane[1].u_width * outputPlane[1].u_height;
+
+                        pDecodedPath =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            pAlphaFilePath;
+                        /**
+                        * UTF conversion: convert into the customer format, before being used*/
+                        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                            != M4OSA_NULL && xVSS_context->
+                            UTFConversionContext.
+                            pTempOutConversionBuffer != M4OSA_NULL )
+                        {
+                            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                                (M4OSA_Void *)xVSS_context->pSettings->
+                                pTransitionList[i]->xVSS.
+                                transitionSpecific.
+                                pAlphaMagicSettings->pAlphaFilePath,
+                                (M4OSA_Void *)xVSS_context->
+                                UTFConversionContext.
+                                pTempOutConversionBuffer, &length);
+
+                            if( err != M4NO_ERROR )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                                    err);
+                                /* Free Send command */
+                                M4xVSS_freeCommand(xVSS_context);
+                                return err;
+                            }
+                            pDecodedPath =
+                                xVSS_context->UTFConversionContext.
+                                pTempOutConversionBuffer;
+                        }
+                        /**
+                        End of the conversion, use the decoded path*/
+                        /*To support ARGB8888 : convert + resizing from ARGB8888 to yuv420 */
+
+                        err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(
+                            pDecodedPath,
+                            xVSS_context->pFileReadPtr, outputPlane,
+                            width_ARGB888, height_ARGB888);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_free(
+                                (M4OSA_MemAddr32)outputPlane[0].pac_data);
+                            outputPlane[0].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)outputPlane);
+                            outputPlane = M4OSA_NULL;
+                            M4xVSS_freeCommand(xVSS_context);
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: error when decoding alpha magic JPEG: 0x%x",
+                                err);
+                            return err;
+                        }
+
+                        /* Allocate alpha settings structure */
+                        alphaSettings =
+                            (M4xVSS_internal_AlphaMagicSettings *)M4OSA_malloc(
+                            sizeof(M4xVSS_internal_AlphaMagicSettings),
+                            M4VS, (M4OSA_Char
+                            *)"Alpha magic settings structure 2");
+
+                        if( alphaSettings == M4OSA_NULL )
+                        {
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+                        alphaSettings->pPlane = outputPlane;
+
+                        if( xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            blendingPercent > 0 && xVSS_context->pSettings->
+                            pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            blendingPercent <= 100 )
+                        {
+                            alphaSettings->blendingthreshold =
+                                ( xVSS_context->pSettings->
+                                pTransitionList[i]->xVSS.
+                                transitionSpecific.pAlphaMagicSettings->
+                                blendingPercent) * 255 / 200;
+                        }
+                        else
+                        {
+                            alphaSettings->blendingthreshold = 0;
+                        }
+                        alphaSettings->isreverse =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            isreverse;
+
+                        if( alphaSettings->blendingthreshold == 0 )
+                        {
+                            xVSS_context->pSettings->pTransitionList[i]->
+                                ExtVideoTransitionFct = M4xVSS_AlphaMagic;
+                        }
+                        else
+                        {
+                            xVSS_context->pSettings->pTransitionList[i]->
+                                ExtVideoTransitionFct =
+                                M4xVSS_AlphaMagicBlending;
+                        }
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt = alphaSettings;
+                    }
+
+                    break;
+
+                case M4xVSS_kVideoTransitionType_SlideTransition:
+                    {
+                        M4xVSS_internal_SlideTransitionSettings *slideSettings;
+                        slideSettings =
+                            (M4xVSS_internal_SlideTransitionSettings *)M4OSA_malloc(
+                            sizeof(M4xVSS_internal_SlideTransitionSettings),
+                            M4VS, (M4OSA_Char
+                            *)"Internal slide transition settings");
+
+                        if( M4OSA_NULL == slideSettings )
+                        {
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+                        /* Just copy the lone parameter from the input settings to the internal
+                         context. */
+
+                        slideSettings->direction =
+                            pSettings->pTransitionList[i]->xVSS.transitionSpecific.
+                            pSlideTransitionSettings->direction;
+
+                        /* No need to keep our copy of the settings. */
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            xVSS.transitionSpecific.pSlideTransitionSettings =
+                            M4OSA_NULL;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            ExtVideoTransitionFct = &M4xVSS_SlideTransition;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt = slideSettings;
+                    }
+                    break;
+
+                case M4xVSS_kVideoTransitionType_FadeBlack:
+                    {
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            ExtVideoTransitionFct = &M4xVSS_FadeBlackTransition;
+                    }
+                    break;
+
+                case M4xVSS_kVideoTransitionType_External:
+                    {
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            ExtVideoTransitionFct =
+                            pSettings->pTransitionList[i]->ExtVideoTransitionFct;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt =
+                            pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            VideoTransitionType =
+                            M4VSS3GPP_kVideoTransitionType_External;
+                    }
+                    break;
+
+                default:
+                    break;
+                } // switch
+
+            /* Update total_duration with transition duration */
+            totalDuration -= xVSS_context->pSettings->
+                pTransitionList[i]->uiTransitionDuration;
+        }
+
+        /************************
+        JPG input file type case
+        *************************/
+#if 0
+
+        if( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_JPG )
+        {
+            M4OSA_Char out_img[64];
+            M4OSA_Char out_img_tmp[64];
+            M4xVSS_Pto3GPP_params *pParams;
+            M4OSA_Context pJPEGFileIn;
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+
+            /* Parse Pto3GPP params chained list to know if input file has already been
+            converted */
+            if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+            {
+                M4OSA_UInt32 pCmpResult = 0;
+
+                pParams = xVSS_context->pPTo3GPPparamsList;
+                /* We parse all Pto3gpp Param chained list */
+                while( pParams != M4OSA_NULL )
+                {
+                    M4OSA_chrCompare(pSettings->pClipList[i]->pFile,
+                        pParams->pFileIn, (M4OSA_Int32 *) &pCmpResult);
+
+                    if( pCmpResult == 0
+                        && (pSettings->pClipList[i]->uiEndCutTime
+                        == pParams->duration
+                        || pSettings->pClipList[i]->xVSS.uiDuration
+                        == pParams->duration)
+                        && pSettings->pClipList[i]->xVSS.MediaRendering
+                        == pParams->MediaRendering )
+                    {
+                        /* Replace JPG filename with existing 3GP filename */
+                        goto replaceJPG_3GP;
+                    }
+                    /* We need to update this variable, in case some pictures have been added
+                     between two */
+                    /* calls to M4xVSS_sendCommand */
+                    pPto3GPP_last = pParams;
+                    pParams = pParams->pNext;
+                }
+            }
+
+            /* Construct output temporary 3GP filename */
+            err = M4OSA_chrSPrintf(out_img, 63, (M4OSA_Char *)"%simg%d.3gp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+            err = M4OSA_chrSPrintf(out_img_tmp, 63, "%simg%d.tmp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+            xVSS_context->tempFileIndex++;
+
+            /* Allocate last element Pto3GPP params structure */
+            pParams = (M4xVSS_Pto3GPP_params
+                *)M4OSA_malloc(sizeof(M4xVSS_Pto3GPP_params),
+                M4VS, (M4OSA_Char *)"Element of Pto3GPP Params");
+
+            if( pParams == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: Problem when allocating one element Pto3GPP Params");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            /* Initializes pfilexxx members of pParams to be able to free them correctly */
+            pParams->pFileIn = M4OSA_NULL;
+            pParams->pFileOut = M4OSA_NULL;
+            pParams->pFileTemp = M4OSA_NULL;
+            pParams->pNext = M4OSA_NULL;
+            pParams->MediaRendering = M4xVSS_kResizing;
+
+            if( xVSS_context->pPTo3GPPparamsList
+                == M4OSA_NULL ) /* Means it is the first element of the list */
+            {
+                /* Initialize the xVSS context with the first element of the list */
+                xVSS_context->pPTo3GPPparamsList = pParams;
+
+                /* Save this element in case of other file to convert */
+                pPto3GPP_last = pParams;
+            }
+            else
+            {
+                /* Update next pointer of the previous last element of the chain */
+                pPto3GPP_last->pNext = pParams;
+
+                /* Update save of last element of the chain */
+                pPto3GPP_last = pParams;
+            }
+
+            /* Fill the last M4xVSS_Pto3GPP_params element */
+            pParams->duration =
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+            /* If duration is filled, let's use it instead of EndCutTime */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+            {
+                pParams->duration =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+            }
+
+            pParams->InputFileType = M4VIDEOEDITING_kFileType_JPG;
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                    *)xVSS_context->pSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            pParams->pFileIn = (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+                (M4OSA_Char *)"Pto3GPP Params: file in");
+
+            if( pParams->pFileIn == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+                (length + 1)); /* Copy input file path */
+
+            /* Check that JPG file is present on the FS (P4ME00002974) by just opening and
+            closing it */
+            err =
+                xVSS_context->pFileReadPtr->openRead(&pJPEGFileIn, pDecodedPath,
+                M4OSA_kFileRead);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't open input jpg file %s, error: 0x%x\n",
+                    pDecodedPath, err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+            err = xVSS_context->pFileReadPtr->closeRead(pJPEGFileIn);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't close input jpg file %s, error: 0x%x\n",
+                    pDecodedPath, err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = out_img;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)out_img, (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            pParams->pFileOut = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                (M4OSA_Char *)"Pto3GPP Params: file out");
+
+            if( pParams->pFileOut == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pParams->pFileOut, pDecodedPath,
+                (length + 1)); /* Copy output file path */
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+
+            pDecodedPath = out_img_tmp;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)out_img_tmp, (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            pParams->pFileTemp = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                (M4OSA_Char *)"Pto3GPP Params: file temp");
+
+            if( pParams->pFileTemp == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pParams->pFileTemp, pDecodedPath,
+                (length + 1)); /* Copy temporary file path */
+
+#endif                         /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+            /* Fill PanAndZoom settings if needed */
+
+            if( M4OSA_TRUE
+                == xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom )
+            {
+                pParams->isPanZoom =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom;
+                /* Check that Pan & Zoom parameters are corrects */
+                if( xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa
+                    <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXa > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXa < 0
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYa > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYa < 0
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                    > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                    <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXb > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXb < 0
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYb > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYb < 0 )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                pParams->PanZoomXa =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa;
+                pParams->PanZoomTopleftXa =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftXa;
+                pParams->PanZoomTopleftYa =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftYa;
+                pParams->PanZoomXb =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb;
+                pParams->PanZoomTopleftXb =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftXb;
+                pParams->PanZoomTopleftYb =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftYb;
+            }
+            else
+            {
+                pParams->isPanZoom = M4OSA_FALSE;
+            }
+            /*+ PR No: blrnxpsw#223*/
+            /*Intializing the Video Frame Rate as it may not be intialized*/
+            /*Other changes made is @ M4xVSS_Internal.c @ line 1518 in
+            M4xVSS_internalStartConvertPictureTo3gp*/
+            switch( xVSS_context->pSettings->videoFrameRate )
+            {
+                case M4VIDEOEDITING_k30_FPS:
+                    pParams->framerate = 33;
+                    break;
+
+                case M4VIDEOEDITING_k25_FPS:
+                    pParams->framerate = 40;
+                    break;
+
+                case M4VIDEOEDITING_k20_FPS:
+                    pParams->framerate = 50;
+                    break;
+
+                case M4VIDEOEDITING_k15_FPS:
+                    pParams->framerate = 66;
+                    break;
+
+                case M4VIDEOEDITING_k12_5_FPS:
+                    pParams->framerate = 80;
+                    break;
+
+                case M4VIDEOEDITING_k10_FPS:
+                    pParams->framerate = 100;
+                    break;
+
+                case M4VIDEOEDITING_k7_5_FPS:
+                    pParams->framerate = 133;
+                    break;
+
+                case M4VIDEOEDITING_k5_FPS:
+                    pParams->framerate = 200;
+                    break;
+
+                default:
+                    /*Making Default Frame Rate @ 15 FPS*/
+                    pParams->framerate = 66;
+                    break;
+            }
+            /*-PR No: blrnxpsw#223*/
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+                == M4xVSS_kCropping
+                || xVSS_context->pSettings->pClipList[i]->xVSS.
+                MediaRendering == M4xVSS_kBlackBorders
+                || xVSS_context->pSettings->pClipList[i]->xVSS.
+                MediaRendering == M4xVSS_kResizing )
+            {
+                pParams->MediaRendering =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering;
+            }
+
+            pParams->pNext = M4OSA_NULL;
+            pParams->isCreated = M4OSA_FALSE;
+            xVSS_context->nbStepTotal++;
+
+replaceJPG_3GP:
+            /* Update total duration */
+            totalDuration += pParams->duration;
+
+            /* Replacing in VSS structure the JPG file by the 3gp file */
+            xVSS_context->pSettings->pClipList[i]->FileType =
+                M4VIDEOEDITING_kFileType_3GPP;
+
+            if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+            {
+                M4OSA_free(xVSS_context->pSettings->pClipList[i]->pFile);
+                xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+            }
+
+            /**
+            * UTF conversion: convert into UTF8, before being used*/
+            pDecodedPath = pParams->pFileOut;
+
+            if( xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+                && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                    (M4OSA_Void *)pParams->pFileOut,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+            else
+            {
+                length = M4OSA_chrLength(pDecodedPath);
+            }
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_malloc((length
+                + 1), M4VS, (M4OSA_Char *)"xVSS file path of jpg to 3gp");
+
+            if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(xVSS_context->pSettings->pClipList[i]->pFile,
+                pDecodedPath, (length + 1));
+            /*FB: add file path size because of UTF16 conversion*/
+            xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+        }
+#endif
+
+        if( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_ARGB8888 )
+        {
+            M4OSA_Char out_img[64];
+            M4OSA_Char out_img_tmp[64];
+            M4xVSS_Pto3GPP_params *pParams = M4OSA_NULL;
+            M4OSA_Context pARGBFileIn;
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+
+            /* Parse Pto3GPP params chained list to know if input file has already been
+            converted */
+            if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+            {
+                M4OSA_UInt32 pCmpResult = 0;
+
+                pParams = xVSS_context->pPTo3GPPparamsList;
+                /* We parse all Pto3gpp Param chained list */
+                while( pParams != M4OSA_NULL )
+                {
+                    M4OSA_chrCompare(pSettings->pClipList[i]->pFile,
+                        pParams->pFileIn, (M4OSA_Int32 *)&pCmpResult);
+
+                    if( pCmpResult == 0
+                        && (pSettings->pClipList[i]->uiEndCutTime
+                        == pParams->duration
+                        || pSettings->pClipList[i]->xVSS.uiDuration
+                        == pParams->duration)
+                        && pSettings->pClipList[i]->xVSS.MediaRendering
+                        == pParams->MediaRendering )
+
+
+
+                    {
+                        /* Replace JPG filename with existing 3GP filename */
+                        goto replaceARGB_3GP;
+                    }
+                    /* We need to update this variable, in case some pictures have been
+                     added between two */
+                    /* calls to M4xVSS_sendCommand */
+                    pPto3GPP_last = pParams;
+                    pParams = pParams->pNext;
+                }
+            }
+
+            /* Construct output temporary 3GP filename */
+            err = M4OSA_chrSPrintf(out_img, 63, (M4OSA_Char *)"%simg%d.3gp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+            err = M4OSA_chrSPrintf(out_img_tmp, 63, "%simg%d.tmp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+            xVSS_context->tempFileIndex++;
+
+            /* Allocate last element Pto3GPP params structure */
+            pParams = (M4xVSS_Pto3GPP_params
+                *)M4OSA_malloc(sizeof(M4xVSS_Pto3GPP_params),
+                M4VS, (M4OSA_Char *)"Element of Pto3GPP Params");
+
+            if( pParams == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: Problem when allocating one element Pto3GPP Params");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            /* Initializes pfilexxx members of pParams to be able to free them correctly */
+            pParams->pFileIn = M4OSA_NULL;
+            pParams->pFileOut = M4OSA_NULL;
+            pParams->pFileTemp = M4OSA_NULL;
+            pParams->pNext = M4OSA_NULL;
+            pParams->MediaRendering = M4xVSS_kResizing;
+
+            /*To support ARGB8888 :get the width and height */
+            pParams->height = pSettings->pClipList[
+                i]->ClipProperties.uiStillPicHeight; //ARGB_Height;
+                pParams->width = pSettings->pClipList[
+                    i]->ClipProperties.uiStillPicWidth; //ARGB_Width;
+                    M4OSA_TRACE1_1("CLIP M4xVSS_SendCommand  is %d", pParams->height);
+                    M4OSA_TRACE1_1("CLIP M4xVSS_SendCommand  is %d", pParams->height);
+
+                    if( xVSS_context->pPTo3GPPparamsList
+                        == M4OSA_NULL ) /* Means it is the first element of the list */
+                    {
+                        /* Initialize the xVSS context with the first element of the list */
+                        xVSS_context->pPTo3GPPparamsList = pParams;
+
+                        /* Save this element in case of other file to convert */
+                        pPto3GPP_last = pParams;
+                    }
+                    else
+                    {
+                        /* Update next pointer of the previous last element of the chain */
+                        pPto3GPP_last->pNext = pParams;
+
+                        /* Update save of last element of the chain */
+                        pPto3GPP_last = pParams;
+                    }
+
+                    /* Fill the last M4xVSS_Pto3GPP_params element */
+                    pParams->duration =
+                        xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+                    /* If duration is filled, let's use it instead of EndCutTime */
+                    if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+                    {
+                        pParams->duration =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+                    }
+
+                    pParams->InputFileType = M4VIDEOEDITING_kFileType_ARGB8888;
+
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                            *)xVSS_context->pSettings->pClipList[i]->pFile,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer,
+                            &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    pParams->pFileIn = (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+                        (M4OSA_Char *)"Pto3GPP Params: file in");
+
+                    if( pParams->pFileIn == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+                        (length + 1)); /* Copy input file path */
+
+                    /* Check that JPG file is present on the FS (P4ME00002974) by just opening
+                     and closing it */
+                    err =
+                        xVSS_context->pFileReadPtr->openRead(&pARGBFileIn, pDecodedPath,
+                        M4OSA_kFileRead);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_2("Can't open input jpg file %s, error: 0x%x\n",
+                            pDecodedPath, err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    err = xVSS_context->pFileReadPtr->closeRead(pARGBFileIn);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_2("Can't close input jpg file %s, error: 0x%x\n",
+                            pDecodedPath, err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    pDecodedPath = out_img;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                            (M4OSA_Void *)out_img, (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    pParams->pFileOut = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                        (M4OSA_Char *)"Pto3GPP Params: file out");
+
+                    if( pParams->pFileOut == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(pParams->pFileOut, pDecodedPath,
+                        (length + 1)); /* Copy output file path */
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+
+                    pDecodedPath = out_img_tmp;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                            (M4OSA_Void *)out_img_tmp, (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8\
+                                 returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    pParams->pFileTemp = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                        (M4OSA_Char *)"Pto3GPP Params: file temp");
+
+                    if( pParams->pFileTemp == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(pParams->pFileTemp, pDecodedPath,
+                        (length + 1)); /* Copy temporary file path */
+
+#endif                         /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                    /* Fill PanAndZoom settings if needed */
+
+                    if( M4OSA_TRUE
+                        == xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom )
+                    {
+                        pParams->isPanZoom =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom;
+                        /* Check that Pan & Zoom parameters are corrects */
+                        if( xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa
+                            <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXa > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXa < 0
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYa > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYa < 0
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                            > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                            <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXb > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXb < 0
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYb > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYb < 0 )
+                        {
+                            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                            M4xVSS_freeCommand(xVSS_context);
+                            return M4ERR_PARAMETER;
+                        }
+
+                        pParams->PanZoomXa =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa;
+                        pParams->PanZoomTopleftXa =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftXa;
+                        pParams->PanZoomTopleftYa =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftYa;
+                        pParams->PanZoomXb =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb;
+                        pParams->PanZoomTopleftXb =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftXb;
+                        pParams->PanZoomTopleftYb =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftYb;
+                    }
+                    else
+                    {
+                        pParams->isPanZoom = M4OSA_FALSE;
+                    }
+                    /*+ PR No: blrnxpsw#223*/
+                    /*Intializing the Video Frame Rate as it may not be intialized*/
+                    /*Other changes made is @ M4xVSS_Internal.c @ line 1518 in
+                    M4xVSS_internalStartConvertPictureTo3gp*/
+                    switch( xVSS_context->pSettings->videoFrameRate )
+                    {
+                        case M4VIDEOEDITING_k30_FPS:
+                            pParams->framerate = 33;
+                            break;
+
+                        case M4VIDEOEDITING_k25_FPS:
+                            pParams->framerate = 40;
+                            break;
+
+                        case M4VIDEOEDITING_k20_FPS:
+                            pParams->framerate = 50;
+                            break;
+
+                        case M4VIDEOEDITING_k15_FPS:
+                            pParams->framerate = 66;
+                            break;
+
+                        case M4VIDEOEDITING_k12_5_FPS:
+                            pParams->framerate = 80;
+                            break;
+
+                        case M4VIDEOEDITING_k10_FPS:
+                            pParams->framerate = 100;
+                            break;
+
+                        case M4VIDEOEDITING_k7_5_FPS:
+                            pParams->framerate = 133;
+                            break;
+
+                        case M4VIDEOEDITING_k5_FPS:
+                            pParams->framerate = 200;
+                            break;
+
+                        default:
+                            /*Making Default Frame Rate @ 15 FPS*/
+                            pParams->framerate = 66;
+                            break;
+                    }
+                    /*-PR No: blrnxpsw#223*/
+                    if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+                        == M4xVSS_kCropping
+                        || xVSS_context->pSettings->pClipList[i]->xVSS.
+                        MediaRendering == M4xVSS_kBlackBorders
+                        || xVSS_context->pSettings->pClipList[i]->xVSS.
+                        MediaRendering == M4xVSS_kResizing )
+                    {
+                        pParams->MediaRendering =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering;
+                    }
+
+                    pParams->pNext = M4OSA_NULL;
+                    pParams->isCreated = M4OSA_FALSE;
+                    xVSS_context->nbStepTotal++;
+
+replaceARGB_3GP:
+                    /* Update total duration */
+                    totalDuration += pParams->duration;
+
+                    /* Replacing in VSS structure the JPG file by the 3gp file */
+                    xVSS_context->pSettings->pClipList[i]->FileType =
+                        M4VIDEOEDITING_kFileType_3GPP;
+
+                    if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(xVSS_context->pSettings->pClipList[i]->pFile);
+                        xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+                    }
+
+                    /**
+                    * UTF conversion: convert into UTF8, before being used*/
+                    pDecodedPath = pParams->pFileOut;
+
+                    if( xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+                        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                            (M4OSA_Void *)pParams->pFileOut,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer,
+                            &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: \
+                                0x%x",err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+                    else
+                    {
+                        length = M4OSA_chrLength(pDecodedPath);
+                    }
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_malloc((length
+                        + 1), M4VS, (M4OSA_Char *)"xVSS file path of ARGB to 3gp");
+
+                    if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(xVSS_context->pSettings->pClipList[i]->pFile,
+                        pDecodedPath, (length + 1));
+                    /*FB: add file path size because of UTF16 conversion*/
+                    xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+        }
+        /************************
+        3GP input file type case
+        *************************/
+        else if( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_3GPP
+            || xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_MP4 )
+        {
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+            /* Need to call MCS in case 3GP video/audio types are not compatible
+            (H263/MPEG4 or AMRNB/AAC) */
+            /* => Need to fill MCS_Params structure with the right parameters ! */
+            /* Need also to parse MCS params struct to check if file has already been transcoded */
+
+            M4VIDEOEDITING_ClipProperties fileProperties;
+            M4xVSS_MCS_params *pParams;
+            M4OSA_Bool audioIsDifferent = M4OSA_FALSE;
+            M4OSA_Bool videoIsDifferent = M4OSA_FALSE;
+            M4OSA_Bool bAudioMono;
+#ifdef TIMESCALE_BUG
+
+            M4OSA_Bool timescaleDifferent = M4OSA_FALSE;
+
+#endif
+
+            /* Initialize file properties structure */
+
+            M4OSA_memset((M4OSA_MemAddr8) &fileProperties,
+                sizeof(M4VIDEOEDITING_ClipProperties), 0);
+
+            //fileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+
+            /* Prevent from bad initializing of percentage cut time */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent
+                            > 100 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            uiBeginCutPercent > 100 )
+            {
+                /* These percentage cut time have probably not been initialized */
+                /* Let's not use them by setting them to 0 */
+                xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent = 0;
+                xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent =
+                    0;
+            }
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                    *)xVSS_context->pSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+                &fileProperties);
+
+            if( err != M4NO_ERROR )
+            {
+                M4xVSS_freeCommand(xVSS_context);
+                M4OSA_TRACE1_1(
+                    "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
+                    err);
+                /* TODO: Translate error code of MCS to an xVSS error code */
+                return err;
+            }
+
+            /* Parse MCS params chained list to know if input file has already been converted */
+            if( xVSS_context->pMCSparamsList != M4OSA_NULL )
+            {
+                M4OSA_UInt32 pCmpResult = 0;
+
+                pParams = xVSS_context->pMCSparamsList;
+                /* We parse all MCS Param chained list */
+                while( pParams != M4OSA_NULL )
+                {
+
+                    /**
+                    * UTF conversion: convert into UTF8, before being used*/
+                    pDecodedPath = pParams->pFileIn;
+
+                    if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                            (M4OSA_Void *)pParams->pFileIn,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.
+                            pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err:\
+                                 0x%x", err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath = xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    M4OSA_chrCompare(pSettings->pClipList[i]->pFile,
+                        pDecodedPath, (M4OSA_Int32 *) &pCmpResult);
+
+                    /* If input filenames are the same, and if this is not a BGM, we can reuse
+                    the transcoded file */
+                    if( pCmpResult == 0 && pParams->isBGM == M4OSA_FALSE
+                        && pParams->BeginCutTime
+                        == pSettings->pClipList[i]->uiBeginCutTime
+                        && (pParams->EndCutTime
+                        == pSettings->pClipList[i]->uiEndCutTime
+                        || pParams->EndCutTime
+                        == pSettings->pClipList[i]->uiBeginCutTime
+                        + pSettings->pClipList[i]->xVSS.uiDuration)
+                        && pSettings->pClipList[i]->xVSS.MediaRendering
+                        == pParams->MediaRendering )
+                    {
+                        if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+                        {
+                            if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100
+                                || (pParams->OutputAudioFormat
+                                == M4VIDEOEDITING_kNullAudio
+                                && fileProperties.AudioStreamType
+                                == pSettings->xVSS.outputAudioFormat)
+                                || pParams->OutputAudioFormat
+                                == pSettings->xVSS.outputAudioFormat
+                                || fileProperties.AudioStreamType
+                                == M4VIDEOEDITING_kNoneAudio )
+                            {
+                                /* Replace 3GP filename with transcoded 3GP filename */
+                                goto replace3GP_3GP;
+                            }
+                        }
+                        else if( ( pParams->OutputAudioFormat
+                            == M4VIDEOEDITING_kNullAudio
+                            && fileProperties.AudioStreamType
+                            == pSettings->xVSS.outputAudioFormat)
+                            || pParams->OutputAudioFormat
+                            == pSettings->xVSS.outputAudioFormat
+                            || fileProperties.AudioStreamType
+                            == M4VIDEOEDITING_kNoneAudio )
+                        {
+                            /* Replace 3GP filename with transcoded 3GP filename */
+                            goto replace3GP_3GP;
+                        }
+                    }
+
+                    /* We need to update this variable, in case some 3GP files have been added
+                    between two */
+                    /* calls to M4xVSS_sendCommand */
+                    pMCS_last = pParams;
+                    pParams = pParams->pNext;
+                }
+            }
+
+            /* If we have percentage information let's use it... */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent != 0
+                || xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent
+                != 0 )
+            {
+                /* If percentage information are not correct and if duration field is not filled */
+                if( ( xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiEndCutPercent
+                    <= xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiBeginCutPercent)
+                    && xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration
+                    == 0 )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4xVSS_sendCommand: Bad percentage for begin and end cut time !");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /* We transform the percentage into absolute time */
+                xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
+                    = (M4OSA_UInt32)(
+                    xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiBeginCutPercent
+                    * fileProperties.uiClipDuration / 100);
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    = (M4OSA_UInt32)(
+                    xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiEndCutPercent
+                    * fileProperties.uiClipDuration / 100);
+            }
+            /* ...Otherwise, we use absolute time. */
+            else
+            {
+                /* If endCutTime == 0, it means all the file is taken. Let's change to the file
+                duration, to accurate preview. */
+                if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime == 0
+                    || xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    > fileProperties.uiClipDuration )
+                {
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+                        fileProperties.uiClipDuration;
+                }
+            }
+
+            /* If duration field is filled, it has priority on other fields on EndCutTime,
+             so let's use it */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+            {
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+                    xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
+                    +xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+
+                if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    > fileProperties.uiClipDuration )
+                {
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+                        fileProperties.uiClipDuration;
+                }
+            }
+
+            /* If output video format is not set, we take video format of the first 3GP video */
+            if( xVSS_context->pSettings->xVSS.outputVideoFormat
+                == M4VIDEOEDITING_kNoneVideo )
+            {
+                //xVSS_context->pSettings->xVSS.outputVideoFormat = fileProperties.VideoStreamType;
+                //M4OSA_TRACE2_1("Output video format is not set, set it to current clip: %d",
+                // xVSS_context->pSettings->xVSS.outputVideoFormat);
+                M4OSA_TRACE1_0(
+                    "Output video format is not set, an error parameter is returned.");
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_PARAMETER;
+            }
+
+            if( xVSS_context->pSettings->xVSS.outputAudioFormat
+                == M4VIDEOEDITING_kNoneAudio )
+            {
+                //xVSS_context->pSettings->xVSS.outputAudioFormat = fileProperties.AudioStreamType;
+                M4OSA_TRACE2_1(
+                    "Output audio format is not set -> remove audio track of clip: %d",
+                    i);
+            }
+
+#ifdef TIMESCALE_BUG
+            /* Check timescale */
+
+            if( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4 //&&
+                /* !!!!!!!!!!!! Add condition to update timescale !!!!!!!!!!!!!!!!!!!!!!!!! */ )
+            {
+                timescaleDifferent = M4OSA_TRUE;
+            }
+
+#endif
+            /* If the output video format/size is not the same as provided video,
+            let's transcode it */
+
+            if( fileProperties.VideoStreamType
+                != xVSS_context->pSettings->xVSS.outputVideoFormat
+                || fileProperties.uiVideoWidth != width
+                || fileProperties.uiVideoHeight != height
+                || (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4
+                && fileProperties.uiVideoTimeScale
+                != xVSS_context->targetedTimescale) )
+            {
+                videoIsDifferent = M4OSA_TRUE;
+            }
+            /*temp solution for fixng issue for H.264 compressed domain  */
+            videoIsDifferent = M4OSA_TRUE;
+
+            if( fileProperties.uiNbChannels == 1 )
+            {
+                bAudioMono = M4OSA_TRUE;
+            }
+            else
+            {
+                bAudioMono = M4OSA_FALSE;
+            }
+
+            if( fileProperties.AudioStreamType
+                != xVSS_context->pSettings->xVSS.outputAudioFormat
+                || (fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC
+                && (fileProperties.uiSamplingFrequency != samplingFreq
+                || bAudioMono
+                != xVSS_context->pSettings->xVSS.bAudioMono)) )
+            {
+                audioIsDifferent = M4OSA_TRUE;
+                /* If we want to replace audio, there is no need to transcode audio */
+                if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+                {
+                    /* temp fix :PT volume not herad in the second clip */
+                    if( /*(pSettings->xVSS.pBGMtrack->uiAddVolume == 100
+                        && xVSS_context->pSettings->xVSS.outputFileSize == 0)
+                        ||*/
+                        fileProperties.AudioStreamType
+                        == M4VIDEOEDITING_kNoneAudio ) /*11/12/2008 CR 3283 VAL for the MMS
+                        use case, we need to transcode except the media without audio*/
+                    {
+                        audioIsDifferent = M4OSA_FALSE;
+                    }
+                }
+                else if( fileProperties.AudioStreamType
+                    == M4VIDEOEDITING_kNoneAudio )
+                {
+                    audioIsDifferent = M4OSA_FALSE;
+                }
+            }
+
+            if( videoIsDifferent == M4OSA_TRUE || audioIsDifferent == M4OSA_TRUE
+#ifdef TIMESCALE_BUG
+
+                || timescaleDifferent == M4OSA_TRUE
+
+#endif
+
+                )
+            {
+                M4OSA_Char out_3gp[64];
+                M4OSA_Char out_3gp_tmp[64];
+
+                /* Construct output temporary 3GP filename */
+                err = M4OSA_chrSPrintf(out_3gp, 63, (M4OSA_Char *)"%svid%d.3gp",
+                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                    return err;
+                }
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+                err = M4OSA_chrSPrintf(out_3gp_tmp, 63, "%svid%d.tmp",
+                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                    return err;
+                }
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                xVSS_context->tempFileIndex++;
+
+                pParams =
+                    (M4xVSS_MCS_params *)M4OSA_malloc(sizeof(M4xVSS_MCS_params),
+                    M4VS, (M4OSA_Char *)"Element of MCS Params (for 3GP)");
+
+                if( pParams == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                pParams->MediaRendering = M4xVSS_kResizing;
+
+                if( xVSS_context->pMCSparamsList
+                    == M4OSA_NULL ) /* Means it is the first element of the list */
+                {
+                    /* Initialize the xVSS context with the first element of the list */
+                    xVSS_context->pMCSparamsList = pParams;
+                }
+                else
+                {
+                    /* Update next pointer of the previous last element of the chain */
+                    pMCS_last->pNext = pParams;
+                }
+
+                /* Save this element in case of other file to convert */
+                pMCS_last = pParams;
+
+                /* Fill the last M4xVSS_MCS_params element */
+                pParams->InputFileType = M4VIDEOEDITING_kFileType_3GPP;
+                pParams->OutputFileType = M4VIDEOEDITING_kFileType_3GPP;
+
+#ifdef TIMESCALE_BUG
+                /* Check if timescale only needs to be modified */
+
+                if( timescaleDifferent == M4OSA_TRUE
+                    && videoIsDifferent == M4OSA_FALSE )
+                {
+                    pParams->OutputVideoTimescale = 30;
+                    pParams->OutputVideoFormat = M4VIDEOEDITING_kNullVideo;
+                    pParams->OutputVideoFrameRate =
+                        M4VIDEOEDITING_k15_FPS; /* Must be set, otherwise,
+                                                    MCS returns an error ... */
+                }
+                else
+                {
+                    pParams->OutputVideoTimescale = 0;
+                }
+
+#endif
+
+                pParams->OutputVideoTimescale = xVSS_context->targetedTimescale;
+
+                /* We do not need to reencode video if its parameters do not differ */
+                /* from output settings parameters */
+                if( videoIsDifferent == M4OSA_TRUE )
+                {
+                    pParams->OutputVideoFormat =
+                        xVSS_context->pSettings->xVSS.outputVideoFormat;
+                    pParams->OutputVideoFrameRate =
+                        xVSS_context->pSettings->videoFrameRate;
+                    pParams->OutputVideoFrameSize =
+                        xVSS_context->pSettings->xVSS.outputVideoSize;
+
+                    /*FB: VAL CR P4ME00003076
+                    The output video bitrate is now directly given by the user in the edition
+                    settings structure If the bitrate given by the user is irrelevant
+                    (the MCS minimum and maximum video bitrate are used),
+                    the output video bitrate is hardcoded according to the output video size*/
+                    if( xVSS_context->pSettings->xVSS.outputVideoBitrate
+                        >= M4VIDEOEDITING_k16_KBPS
+                        && xVSS_context->pSettings->xVSS.outputVideoBitrate
+                        <= M4VIDEOEDITING_k8_MBPS ) /*+ New Encoder bitrates */
+                    {
+                        pParams->OutputVideoBitrate =
+                            xVSS_context->pSettings->xVSS.outputVideoBitrate;
+                    }
+                    else
+                    {
+                        switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+                        {
+                            case M4VIDEOEDITING_kSQCIF:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k48_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kQQVGA:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kQCIF:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k128_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kQVGA:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k384_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kCIF:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k384_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kVGA:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k512_KBPS;
+                                break;
+
+                            default: /* Should not happen !! */
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+                        }
+                    }
+                }
+                else
+                {
+                    pParams->OutputVideoFormat = M4VIDEOEDITING_kNullVideo;
+                    pParams->OutputVideoFrameRate =
+                        M4VIDEOEDITING_k15_FPS; /* Must be set, otherwise, MCS returns an error */
+                }
+
+                if( audioIsDifferent == M4OSA_TRUE )
+                {
+                    pParams->OutputAudioFormat =
+                        xVSS_context->pSettings->xVSS.outputAudioFormat;
+
+                    switch( xVSS_context->pSettings->xVSS.outputAudioFormat )
+                    {
+                        case M4VIDEOEDITING_kNoneAudio:
+                            break;
+
+                        case M4VIDEOEDITING_kAMR_NB:
+                            pParams->OutputAudioBitrate =
+                                M4VIDEOEDITING_k12_2_KBPS;
+                            pParams->bAudioMono = M4OSA_TRUE;
+                            pParams->OutputAudioSamplingFrequency =
+                                M4VIDEOEDITING_kDefault_ASF;
+                            break;
+
+                        case M4VIDEOEDITING_kAAC:
+                            {
+                                /*FB: VAL CR P4ME00003076
+                                The output audio bitrate in the AAC case is now directly given by
+                                the user in the edition settings structure
+                                If the bitrate given by the user is irrelevant or undefined
+                                (the MCS minimum and maximum audio bitrate are used),
+                                the output audio bitrate is hard coded according to the output
+                                audio sampling frequency*/
+
+                                /*Check if the audio bitrate is correctly defined*/
+
+                                /*Mono
+                                MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
+                                if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+                                    >= M4VIDEOEDITING_k16_KBPS
+                                    && xVSS_context->pSettings->
+                                    xVSS.outputAudioBitrate
+                                    <= M4VIDEOEDITING_k192_KBPS
+                                    && xVSS_context->pSettings->xVSS.bAudioMono
+                                    == M4OSA_TRUE )
+                                {
+                                    pParams->OutputAudioBitrate =
+                                        xVSS_context->pSettings->
+                                        xVSS.outputAudioBitrate;
+                                }
+                                /*Stereo
+                                MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
+                                else if( xVSS_context->pSettings->
+                                    xVSS.outputAudioBitrate
+                                    >= M4VIDEOEDITING_k32_KBPS
+                                    && xVSS_context->pSettings->
+                                    xVSS.outputAudioBitrate
+                                    <= M4VIDEOEDITING_k192_KBPS
+                                    && xVSS_context->pSettings->xVSS.bAudioMono
+                                    == M4OSA_FALSE )
+                                {
+                                    pParams->OutputAudioBitrate =
+                                        xVSS_context->pSettings->
+                                        xVSS.outputAudioBitrate;
+                                }
+
+                                /*The audio bitrate is hard coded according to the output audio
+                                 sampling frequency*/
+                                else
+                                {
+                                    switch( xVSS_context->pSettings->
+                                        xVSS.outputAudioSamplFreq )
+                                    {
+                                        case M4VIDEOEDITING_k16000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k24_KBPS;
+                                            break;
+
+                                        case M4VIDEOEDITING_k22050_ASF:
+                                        case M4VIDEOEDITING_k24000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k32_KBPS;
+                                            break;
+
+                                        case M4VIDEOEDITING_k32000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k48_KBPS;
+                                            break;
+
+                                        case M4VIDEOEDITING_k44100_ASF:
+                                        case M4VIDEOEDITING_k48000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k64_KBPS;
+                                            break;
+
+                                        default:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k64_KBPS;
+                                            break;
+                                    }
+
+                                    if( xVSS_context->pSettings->xVSS.bAudioMono
+                                        == M4OSA_FALSE )
+                                    {
+                                        /* Output bitrate have to be doubled */
+                                        pParams->OutputAudioBitrate +=
+                                            pParams->OutputAudioBitrate;
+                                    }
+                                }
+
+                                pParams->bAudioMono =
+                                    xVSS_context->pSettings->xVSS.bAudioMono;
+
+                                if( xVSS_context->pSettings->
+                                    xVSS.outputAudioSamplFreq
+                                    == M4VIDEOEDITING_k8000_ASF )
+                                {
+                                    /* Prevent from unallowed sampling frequencies */
+                                    pParams->OutputAudioSamplingFrequency =
+                                        M4VIDEOEDITING_kDefault_ASF;
+                                }
+                                else
+                                {
+                                    pParams->OutputAudioSamplingFrequency =
+                                        xVSS_context->pSettings->
+                                        xVSS.outputAudioSamplFreq;
+                                }
+                                break;
+                            }
+
+                        default: /* Should not happen !! */
+                            pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+                            pParams->OutputAudioBitrate =
+                                M4VIDEOEDITING_k12_2_KBPS;
+                            pParams->bAudioMono = M4OSA_TRUE;
+                            pParams->OutputAudioSamplingFrequency =
+                                M4VIDEOEDITING_kDefault_ASF;
+                            break;
+                        }
+                }
+                else
+                {
+                    pParams->OutputAudioFormat = M4VIDEOEDITING_kNullAudio;
+                }
+
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+                length = M4OSA_chrLength(pDecodedPath);
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)xVSS_context->pSettings->
+                        pClipList[i]->pFile,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                pParams->pFileIn =
+                    (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                    (M4OSA_Char *)"MCS 3GP Params: file in");
+
+                if( pParams->pFileIn == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+                    (length + 1)); /* Copy input file path */
+
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = out_3gp;
+                length = M4OSA_chrLength(pDecodedPath);
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                pParams->pFileOut =
+                    (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                    (M4OSA_Char *)"MCS 3GP Params: file out");
+
+                if( pParams->pFileOut == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pParams->pFileOut, pDecodedPath,
+                    (length + 1)); /* Copy output file path */
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+
+                pDecodedPath = out_3gp_tmp;
+                length = M4OSA_chrLength(pDecodedPath);
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)out_3gp_tmp,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                pParams->pFileTemp =
+                    (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                    (M4OSA_Char *)"MCS 3GP Params: file temp");
+
+                if( pParams->pFileTemp == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pParams->pFileTemp, pDecodedPath,
+                    (length + 1)); /* Copy temporary file path */
+
+#else
+
+                pParams->pFileTemp = M4OSA_NULL;
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                /*FB 2008/10/20 keep media aspect ratio, add media rendering parameter*/
+
+                if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+                    == M4xVSS_kCropping
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    MediaRendering == M4xVSS_kBlackBorders
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    MediaRendering == M4xVSS_kResizing )
+                {
+                    pParams->MediaRendering =
+                        xVSS_context->pSettings->pClipList[i]->xVSS.
+                        MediaRendering;
+                }
+
+                /*FB: transcoding per parts*/
+                pParams->BeginCutTime =
+                    xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+                pParams->EndCutTime =
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+
+                pParams->pNext = M4OSA_NULL;
+                pParams->isBGM = M4OSA_FALSE;
+                pParams->isCreated = M4OSA_FALSE;
+                xVSS_context->nbStepTotal++;
+                bIsTranscoding = M4OSA_TRUE;
+
+replace3GP_3GP:
+                /* Update total duration */
+                totalDuration +=
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+
+                /*the cuts are done in the MCS, so we need to replace the beginCutTime
+                and endCutTime to keep the entire video*/
+                xVSS_context->pSettings->pClipList[i]->uiBeginCutTime = 0;
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime = 0;
+
+                /* Replacing in VSS structure the original 3GP file by the transcoded 3GP file */
+                xVSS_context->pSettings->pClipList[i]->FileType =
+                    M4VIDEOEDITING_kFileType_3GPP;
+
+                if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+                {
+                    M4OSA_free(xVSS_context->pSettings->pClipList[i]->pFile);
+                    xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+                }
+
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = pParams->pFileOut;
+
+                if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                        (M4OSA_Void *)pParams->pFileOut,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+                else
+                {
+                    length = M4OSA_chrLength(pDecodedPath);
+                }
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_malloc(
+                    (length + 1),
+                    M4VS, (M4OSA_Char *)"xVSS file path of 3gp to 3gp");
+
+                if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(xVSS_context->pSettings->pClipList[i]->pFile,
+                    pDecodedPath, (length + 1));
+                /*FB: add file path size because of UTF 16 conversion*/
+                xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+
+                /* We define master clip as first 3GP input clip */
+                /*if(xVSS_context->pSettings->uiMasterClip == 0 && fileProperties.
+                AudioStreamType != M4VIDEOEDITING_kNoneAudio)
+                {
+                xVSS_context->pSettings->uiMasterClip = i;
+                }*/
+            }
+            else
+            {
+                /* Update total duration */
+                totalDuration +=
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+            }
+            /* We define master clip as first 3GP input clip */
+            if( masterClip == -1
+                && fileProperties.AudioStreamType != M4VIDEOEDITING_kNoneAudio )
+            {
+                masterClip = i;
+                xVSS_context->pSettings->uiMasterClip = i;
+            }
+#if 0 /* Changed to be able to mix with video only files */
+
+            if( xVSS_context->pSettings->uiMasterClip == 0
+                && fileProperties.AudioStreamType != M4VIDEOEDITING_kNoneAudio )
+            {
+                xVSS_context->pSettings->uiMasterClip = i;
+            }
+
+#endif
+
+        }
+        /**************************
+        Other input file type case
+        ***************************/
+        else
+        {
+            M4OSA_TRACE1_0("Bad file type as input clip");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_PARAMETER;
+        }
+    }
+
+    /*********************************************************
+    * Parse all effects to make some adjustment for framing, *
+    * text and to transform relative time into absolute time *
+    **********************************************************/
+    for ( j = 0; j < xVSS_context->pSettings->nbEffects; j++ )
+    {
+        /* Copy effect to "local" structure */
+        M4OSA_memcpy((M4OSA_MemAddr8) &(xVSS_context->pSettings->Effects[j]),
+            (M4OSA_MemAddr8) &(pSettings->Effects[j]),
+            sizeof(M4VSS3GPP_EffectSettings));
+
+        /* Prevent from bad initializing of effect percentage time */
+        if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent > 100
+            || xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent > 100 )
+        {
+            /* These percentage time have probably not been initialized */
+            /* Let's not use them by setting them to 0 */
+            xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent = 0;
+            xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent = 0;
+        }
+
+        /* If we have percentage information let's use it... Otherwise, we use absolute time. */
+        if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent != 0 )
+        {
+            xVSS_context->pSettings->
+                Effects[j].uiStartTime = (M4OSA_UInt32)(totalDuration
+                * xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent
+                / 100);
+            /* The percentage of effect duration is based on the duration of the clip -
+            start time */
+            xVSS_context->pSettings->
+                Effects[j].uiDuration = (M4OSA_UInt32)(totalDuration
+                * xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent
+                / 100);
+        }
+
+        /* If there is a framing effect, we need to allocate framing effect structure */
+        if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Framing )
+        {
+#ifdef DECODE_GIF_ON_SAVING
+
+            M4xVSS_FramingContext *framingCtx;
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+#else
+
+            M4xVSS_FramingStruct *framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+            M4OSA_Char *pExt2 = M4OSA_NULL;
+            M4VIFI_ImagePlane *pPlane =
+                xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+            M4OSA_Int32 result1, result2;
+
+            /* Copy framing file path */
+            if( pSettings->Effects[j].xVSS.pFramingFilePath != M4OSA_NULL )
+            {
+                xVSS_context->pSettings->
+                    Effects[j].xVSS.pFramingFilePath = M4OSA_malloc(
+                    M4OSA_chrLength(pSettings->Effects[j].xVSS.pFramingFilePath)
+                    + 1, M4VS, (M4OSA_Char *)"Local Framing file path");
+
+                if( xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath
+                    == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->
+                    Effects[j].xVSS.pFramingFilePath,
+                    (M4OSA_MemAddr8)pSettings->
+                    Effects[j].xVSS.pFramingFilePath, M4OSA_chrLength(
+                    pSettings->Effects[j].xVSS.pFramingFilePath) + 1);
+
+                pExt2 =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+            }
+
+#ifdef DECODE_GIF_ON_SAVING
+
+            framingCtx = (M4xVSS_FramingContext
+                *)M4OSA_malloc(sizeof(M4xVSS_FramingContext),
+                M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+            if( framingCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            framingCtx->aFramingCtx = M4OSA_NULL;
+            framingCtx->aFramingCtx_last = M4OSA_NULL;
+            framingCtx->pSPSContext = M4OSA_NULL;
+            framingCtx->outputVideoSize =
+                xVSS_context->pSettings->xVSS.outputVideoSize;
+            framingCtx->topleft_x =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+            framingCtx->topleft_y =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+            framingCtx->bEffectResize =
+                xVSS_context->pSettings->Effects[j].xVSS.bResize;
+            framingCtx->pEffectFilePath =
+                xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+            framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
+            framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
+            framingCtx->effectDuration =
+                xVSS_context->pSettings->Effects[j].uiDuration;
+            framingCtx->b_IsFileGif = M4OSA_FALSE;
+            framingCtx->alphaBlendingStruct = M4OSA_NULL;
+            framingCtx->b_animated = M4OSA_FALSE;
+
+            /* Output ratio for the effect is stored in uiFiftiesOutFrameRate parameters of the
+            extended xVSS effects structure */
+            if( xVSS_context->pSettings->Effects[j].xVSS.uiFiftiesOutFrameRate
+                != 0 )
+            {
+                framingCtx->frameDurationRatio =
+                    (M4OSA_Float)(( xVSS_context->pSettings->
+                    Effects[j].xVSS.uiFiftiesOutFrameRate) / 1000.0);
+            }
+            else
+            {
+                framingCtx->frameDurationRatio = 1.0;
+            }
+
+            /*Alpha blending*/
+            /*Check if the alpha blending parameters are corrects*/
+            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingEnd < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingStart < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 0 )
+            {
+                /*Allocate the alpha blending structure*/
+                framingCtx->alphaBlendingStruct =
+                    (M4xVSS_internalEffectsAlphaBlending *)M4OSA_malloc(
+                    sizeof(M4xVSS_internalEffectsAlphaBlending),
+                    M4VS, (M4OSA_Char *)"alpha blending structure");
+
+                if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_ALLOC;
+                }
+                /*Fill the alpha blending structure*/
+                framingCtx->alphaBlendingStruct->m_fadeInTime =
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
+                framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
+                framingCtx->alphaBlendingStruct->m_end =
+                    pSettings->Effects[j].xVSS.uialphaBlendingEnd;
+                framingCtx->alphaBlendingStruct->m_middle =
+                    pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
+                framingCtx->alphaBlendingStruct->m_start =
+                    pSettings->Effects[j].xVSS.uialphaBlendingStart;
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+                    + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                        > 100 )
+                {
+                    framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                        100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
+                }
+            }
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath =
+                xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)xVSS_context->pSettings->
+                    Effects[j].xVSS.pFramingFilePath,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            framingCtx->pEffectFilePath = M4OSA_malloc(length + 1, M4VS,
+                (M4OSA_Char *)"Local Framing file path");
+
+            if( framingCtx->pEffectFilePath == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy((M4OSA_MemAddr8)framingCtx->pEffectFilePath,
+                (M4OSA_MemAddr8)pDecodedPath, length + 1);
+
+            /* Save framing structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                framingCtx;
+
+#else
+
+            framingCtx = (M4xVSS_FramingStruct
+                *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+            if( framingCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            framingCtx->topleft_x =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+            framingCtx->topleft_y =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+            /* BugFix 1.2.0: Leak when decoding error */
+            framingCtx->FramingRgb = M4OSA_NULL;
+            framingCtx->FramingYuv = M4OSA_NULL;
+            framingCtx->pNext = framingCtx;
+            /* Save framing structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+            if( pExt2 != M4OSA_NULL )
+            {
+                /* Decode the image associated to the effect, and fill framing structure */
+                pExt2 += (M4OSA_chrLength(pExt2) - 4);
+
+                M4OSA_chrCompare(pExt2,(M4OSA_Char *)".rgb", &result1);
+                M4OSA_chrCompare(pExt2,(M4OSA_Char *)".RGB", &result2);
+
+                if( 0 == result1 || 0 == result2 )
+                {
+#ifdef DECODE_GIF_ON_SAVING
+
+                    framingCtx->aFramingCtx =
+                        (M4xVSS_FramingStruct
+                        *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                        M4VS,
+                        (M4OSA_Char
+                        *)
+                        "M4xVSS_internalDecodeGIF: Context of the framing effect");
+
+                    if( framingCtx->aFramingCtx == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        /* TODO: Translate error code of SPS to an xVSS error code */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return M4ERR_ALLOC;
+                    }
+                    framingCtx->aFramingCtx->pCurrent =
+                        M4OSA_NULL; /* Only used by the first element of the chain */
+                    framingCtx->aFramingCtx->previousClipTime = -1;
+                    framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                    framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                    framingCtx->aFramingCtx->topleft_x =
+                        xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                    framingCtx->aFramingCtx->topleft_y =
+                        xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+                    /*To support ARGB8888 : get the width and height */
+
+                    framingCtx->aFramingCtx->width =
+                        xVSS_context->pSettings->Effects[j].xVSS.width;
+                    framingCtx->aFramingCtx->height =
+                        xVSS_context->pSettings->Effects[j].xVSS.height;
+                    M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->width);
+                    M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->height);
+
+#endif
+
+                    err = M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(
+                        xVSS_context,
+                        &(xVSS_context->pSettings->Effects[j]),
+                        framingCtx->aFramingCtx,xVSS_context->pSettings->xVSS.outputVideoSize);
+                    M4OSA_TRACE1_1("FRAMMING AFTER M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->width);
+                    M4OSA_TRACE1_1("FRAMMING AFTER M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->height);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalDecodePNG returned 0x%x",
+                            err);
+                        /* TODO: Translate error code of SPS to an xVSS error code */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                }
+                else
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: Not supported still picture format 0x%x",
+                        err);
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_PARAMETER;
+                }
+            }
+            else if( pPlane != M4OSA_NULL )
+            {
+#ifdef DECODE_GIF_ON_SAVING
+
+                framingCtx->aFramingCtx = (M4xVSS_FramingStruct
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                    M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+                if( framingCtx->aFramingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                framingCtx->aFramingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->aFramingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+                /* BugFix 1.2.0: Leak when decoding error */
+                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->duration = 0;
+                framingCtx->aFramingCtx->previousClipTime = -1;
+                framingCtx->aFramingCtx->FramingRgb =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+                /* Force input RGB buffer to even size to avoid errors in YUV conversion */
+                framingCtx->aFramingCtx->FramingRgb->u_width =
+                    framingCtx->aFramingCtx->FramingRgb->u_width & ~1;
+                framingCtx->aFramingCtx->FramingRgb->u_height =
+                    framingCtx->aFramingCtx->FramingRgb->u_height & ~1;
+                /* Input RGB plane is provided, let's convert it to YUV420, and update framing
+                structure  */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
+
+#else
+
+                framingCtx->FramingRgb =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+                /* Force input RGB buffer to even size to avoid errors in YUV conversion */
+                framingCtx->FramingRgb.u_width =
+                    framingCtx->FramingRgb.u_width & ~1;
+                framingCtx->FramingRgb.u_height =
+                    framingCtx->FramingRgb.u_height & ~1;
+                /* Input RGB plane is provided, let's convert it to YUV420, and update framing
+                 structure  */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
+
+#endif
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+                        err);
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return err;
+                }
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: No input image/plane provided for framing effect.");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_PARAMETER;
+            }
+        }
+        /* CR: Add text handling with external text interface */
+        /* If effect type is text, we call external text function to get RGB 565 buffer */
+        if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Text )
+        {
+            /* Call the font engine function pointer to get RGB565 buffer */
+            /* We transform text effect into framing effect from buffer */
+            if( xVSS_context->pSettings->xVSS.pTextRenderingFct != M4OSA_NULL )
+            {
+                /*FB: add UTF convertion for text buffer*/
+                M4OSA_Void *pDecodedPath = M4OSA_NULL;
+#ifdef DECODE_GIF_ON_SAVING
+
+                M4xVSS_FramingContext *framingCtx;
+
+#else
+
+                M4xVSS_FramingStruct *framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+#ifdef DECODE_GIF_ON_SAVING
+
+                framingCtx = (M4xVSS_FramingContext
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingContext),
+                    M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+                if( framingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                framingCtx->aFramingCtx = M4OSA_NULL;
+                framingCtx->aFramingCtx_last = M4OSA_NULL;
+                framingCtx->pSPSContext = M4OSA_NULL;
+                framingCtx->outputVideoSize =
+                    xVSS_context->pSettings->xVSS.outputVideoSize;
+                framingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+                framingCtx->bEffectResize =
+                    xVSS_context->pSettings->Effects[j].xVSS.bResize;
+                framingCtx->pEffectFilePath =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+                framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
+                framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
+                framingCtx->effectDuration =
+                    xVSS_context->pSettings->Effects[j].uiDuration;
+                framingCtx->b_IsFileGif = M4OSA_FALSE;
+                framingCtx->b_animated = M4OSA_FALSE;
+                framingCtx->alphaBlendingStruct = M4OSA_NULL;
+
+                /* Save framing structure associated with corresponding effect */
+                xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                    framingCtx;
+
+                framingCtx->aFramingCtx = (M4xVSS_FramingStruct
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                    M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+                if( framingCtx->aFramingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                framingCtx->aFramingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->aFramingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+                /* BugFix 1.2.0: Leak when decoding error */
+                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->duration = 0;
+                framingCtx->aFramingCtx->previousClipTime = -1;
+
+                /*Alpha blending*/
+                /*Check if the alpha blending parameters are corrects*/
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+                > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingEnd < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingStart < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                    > 0 )
+                {
+                    /*Allocate the alpha blending structure*/
+                    framingCtx->alphaBlendingStruct =
+                        (M4xVSS_internalEffectsAlphaBlending *)M4OSA_malloc(
+                        sizeof(M4xVSS_internalEffectsAlphaBlending),
+                        M4VS, (M4OSA_Char *)"alpha blending structure");
+
+                    if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        M4xVSS_freeCommand(xVSS_context);
+                        return M4ERR_ALLOC;
+                    }
+                    /*Fill the alpha blending structure*/
+                    framingCtx->alphaBlendingStruct->m_fadeInTime =
+                        pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
+                    framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                        pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
+                    framingCtx->alphaBlendingStruct->m_end =
+                        pSettings->Effects[j].xVSS.uialphaBlendingEnd;
+                    framingCtx->alphaBlendingStruct->m_middle =
+                        pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
+                    framingCtx->alphaBlendingStruct->m_start =
+                        pSettings->Effects[j].xVSS.uialphaBlendingStart;
+
+                    if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+                        + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                            > 100 )
+                    {
+                        framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                            100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
+                    }
+                }
+#else
+
+                framingCtx = (M4xVSS_FramingStruct
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                    M4VS, (M4OSA_Char
+                    *)"Context of the framing effect (for text)");
+
+                if( framingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                framingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+                framingCtx->FramingRgb = M4OSA_NULL;
+
+                /* BugFix 1.2.0: Leak when decoding error */
+                framingCtx->FramingYuv = M4OSA_NULL;
+                framingCtx->pNext = framingCtx;
+
+#endif
+                /* Save framing structure associated with corresponding effect */
+
+                xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                    framingCtx;
+
+                /* FB: changes for Video Artist: memcopy pTextBuffer so that it can be changed
+                after a complete analysis*/
+                if( pSettings->Effects[j].xVSS.pTextBuffer == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("M4xVSS_SendCommand: pTextBuffer is null");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /*Convert text buffer into customer format before being used*/
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = pSettings->Effects[j].xVSS.pTextBuffer;
+                xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
+                    pSettings->Effects[j].xVSS.textBufferSize;
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)pSettings->
+                        Effects[j].xVSS.pTextBuffer,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                    xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
+                        length;
+                }
+                /**
+                * End of the UTF conversion, use the converted file path*/
+
+                xVSS_context->pSettings->
+                    Effects[j].xVSS.pTextBuffer = M4OSA_malloc(
+                    xVSS_context->pSettings->Effects[j].xVSS.textBufferSize + 1,
+                    M4VS, (M4OSA_Char *)"Local text buffer effect");
+
+                //xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer =
+                // M4OSA_malloc(M4OSA_chrLength(pSettings->Effects[j].xVSS.pTextBuffer)+1,
+                // M4VS, "Local text buffer effect");
+                if( xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer
+                    == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                if( pSettings->Effects[j].xVSS.pTextBuffer != M4OSA_NULL )
+                {
+                    //M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->Effects[j]
+                    //.xVSS.pTextBuffer, (M4OSA_MemAddr8)pSettings->Effects[j].xVSS.pTextBuffer,
+                    // M4OSA_chrLength(pSettings->Effects[j].xVSS.pTextBuffer)+1);
+                    M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->
+                        Effects[j].xVSS.pTextBuffer,
+                        (M4OSA_MemAddr8)pDecodedPath, xVSS_context->pSettings->
+                        Effects[j].xVSS.textBufferSize + 1);
+                }
+
+                /*Allocate the text RGB buffer*/
+                framingCtx->aFramingCtx->FramingRgb =
+                    (M4VIFI_ImagePlane *)M4OSA_malloc(sizeof(M4VIFI_ImagePlane),
+                    M4VS,
+                    (M4OSA_Char *)"RGB structure for the text effect");
+
+                if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                if( xVSS_context->pSettings->Effects[j].xVSS.uiTextBufferWidth
+                    == 0 || xVSS_context->pSettings->
+                    Effects[j].xVSS.uiTextBufferHeight == 0 )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4xVSS_SendCommand: text plane width and height are not defined");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_PARAMETER;
+                }
+                /* Allocate input RGB text buffer and force it to even size to avoid errors in
+                 YUV conversion */
+                framingCtx->aFramingCtx->FramingRgb->u_width =
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.uiTextBufferWidth & ~1;
+                framingCtx->aFramingCtx->FramingRgb->u_height =
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.uiTextBufferHeight & ~1;
+                framingCtx->aFramingCtx->FramingRgb->u_stride =
+                    2 * framingCtx->aFramingCtx->FramingRgb->u_width;
+                framingCtx->aFramingCtx->FramingRgb->u_topleft = 0;
+                framingCtx->aFramingCtx->FramingRgb->pac_data =
+                    (M4VIFI_UInt8 *)M4OSA_malloc(
+                    framingCtx->aFramingCtx->FramingRgb->u_height
+                    * framingCtx->aFramingCtx->FramingRgb->u_stride,
+                    M4VS, (M4OSA_Char *)"Text RGB plane->pac_data");
+
+                if( framingCtx->aFramingCtx->FramingRgb->pac_data
+                    == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+#ifdef DECODE_GIF_ON_SAVING
+                /**/
+                /* Call text rendering function */
+
+                err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
+                    xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.pTextBuffer,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.textBufferSize,
+                    &(framingCtx->aFramingCtx->FramingRgb));
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_0("Text rendering external function failed\n");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+                /* Check that RGB buffer is set */
+                if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0(
+                        "Text rendering function did not set RGB buffer correctly !");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /* Convert RGB plane to YUV420 and update framing structure */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+                        err);
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+#else
+                /**/
+                /* Call text rendering function */
+
+                err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
+                    xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.pTextBuffer,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.textBufferSize,
+                    &(framingCtx->FramingRgb));
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_0("Text rendering external function failed\n");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+                /* Check that RGB buffer is set */
+                if( framingCtx->FramingRgb == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0(
+                        "Text rendering function did not set RGB buffer correctly !");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /* Convert RGB plane to YUV420 and update framing structure */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+                        err);
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+                /* Change internally effect type from "text" to framing */
+
+                xVSS_context->pSettings->Effects[j].VideoEffectType =
+                    M4xVSS_kVideoEffectType_Framing;
+                xVSS_context->pSettings->Effects[j].xVSS.bResize = M4OSA_FALSE;
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: No text rendering function set !!");
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_PARAMETER;
+            }
+        }
+
+        /* Allocate the structure to store the data needed by the Fifties effect */
+        else if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Fifties )
+        {
+            M4xVSS_FiftiesStruct *fiftiesCtx;
+
+            /* Check the expected frame rate for the fifties effect (must be above 0) */
+            if( 0 == xVSS_context->pSettings->
+                Effects[j].xVSS.uiFiftiesOutFrameRate )
+            {
+                M4OSA_TRACE1_0(
+                    "The frame rate for the fifties effect must be greater than 0 !");
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_PARAMETER;
+            }
+
+            fiftiesCtx = (M4xVSS_FiftiesStruct
+                *)M4OSA_malloc(sizeof(M4xVSS_FiftiesStruct),
+                M4VS, (M4OSA_Char *)"Context of the fifties effect");
+
+            if( fiftiesCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_ALLOC;
+            }
+
+            fiftiesCtx->previousClipTime = -1;
+            fiftiesCtx->fiftiesEffectDuration = 1000 / xVSS_context->pSettings->
+                Effects[j].xVSS.uiFiftiesOutFrameRate;
+            fiftiesCtx->shiftRandomValue = 0;
+            fiftiesCtx->stripeRandomValue = 0;
+
+            /* Save the structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                fiftiesCtx;
+        }
+
+        /* Allocate the structure to store the data needed by the Color effect */
+        else if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_ColorRGB16
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_BlackAndWhite
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Pink
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Green
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Sepia
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Negative
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Gradient )
+        {
+            M4xVSS_ColorStruct *ColorCtx;
+
+            ColorCtx =
+                (M4xVSS_ColorStruct *)M4OSA_malloc(sizeof(M4xVSS_ColorStruct),
+                M4VS, (M4OSA_Char *)"Context of the color effect");
+
+            if( ColorCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_ALLOC;
+            }
+
+            ColorCtx->colorEffectType =
+                xVSS_context->pSettings->Effects[j].VideoEffectType;
+
+            if( xVSS_context->pSettings->Effects[j].VideoEffectType
+                == M4xVSS_kVideoEffectType_ColorRGB16
+                || xVSS_context->pSettings->Effects[j].VideoEffectType
+                == M4xVSS_kVideoEffectType_Gradient )
+            {
+                ColorCtx->rgb16ColorData =
+                    xVSS_context->pSettings->Effects[j].xVSS.uiRgb16InputColor;
+            }
+            else
+            {
+                ColorCtx->rgb16ColorData = 0;
+            }
+
+            /* Save the structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                ColorCtx;
+        }
+    }
+
+    /**********************************
+    Background music registering
+    **********************************/
+    if( pSettings->xVSS.pBGMtrack != M4OSA_NULL && isNewBGM == M4OSA_TRUE )
+    {
+#ifdef PREVIEW_ENABLED
+
+        M4xVSS_MCS_params *pParams;
+        M4OSA_Char *out_pcm;
+        /*UTF conversion support*/
+        M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+#endif
+
+        /* We save output file pointer, because we will need to use it when saving audio mixed
+         file (last save step) */
+
+        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+        xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
+
+        /* If a previous BGM has already been registered, delete it */
+        /* Here can be implemented test to know if the same BGM is registered */
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL )
+            {
+                M4OSA_free(
+                    (M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack->
+                    pFile);
+                xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+            }
+            M4OSA_free(
+                (M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack);
+            xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+        }
+
+        /* Allocate BGM */
+        xVSS_context->pSettings->xVSS.pBGMtrack =
+            (M4xVSS_BGMSettings *)M4OSA_malloc(sizeof(M4xVSS_BGMSettings), M4VS,
+            (M4OSA_Char *)"xVSS_context->pSettings->xVSS.pBGMtrack");
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+
+        /* Copy input structure to our structure */
+        M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->xVSS.pBGMtrack,
+            (M4OSA_MemAddr8)pSettings->xVSS.pBGMtrack,
+            sizeof(M4xVSS_BGMSettings));
+        /* Allocate file name, and copy file name buffer to our structure */
+        xVSS_context->pSettings->xVSS.pBGMtrack->pFile =
+            M4OSA_malloc((M4OSA_chrLength(pSettings->xVSS.pBGMtrack->pFile)
+            + 1), M4VS, (M4OSA_Char *)"xVSS BGM file path");
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+            pSettings->xVSS.pBGMtrack->pFile,
+            M4OSA_chrLength(pSettings->xVSS.pBGMtrack->pFile) + 1);
+
+#ifdef PREVIEW_ENABLED
+        /* Decode BGM track to pcm output file */
+
+        pParams =
+            (M4xVSS_MCS_params *)M4OSA_malloc(sizeof(M4xVSS_MCS_params), M4VS,
+            (M4OSA_Char *)"Element of MCS Params (for BGM)");
+
+        if( pParams == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0(
+                "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
+            return M4ERR_ALLOC;
+        }
+
+        /* Initialize the pointers in case of problem (PR 2273) */
+        pParams->pFileIn = M4OSA_NULL;
+        pParams->pFileOut = M4OSA_NULL;
+        pParams->pFileTemp = M4OSA_NULL;
+        pParams->pNext = M4OSA_NULL;
+        pParams->BeginCutTime = 0;
+        pParams->EndCutTime = 0;
+
+        if( xVSS_context->pMCSparamsList
+            == M4OSA_NULL ) /* Means it is the first element of the list */
+        {
+            /* Initialize the xVSS context with the first element of the list */
+            xVSS_context->pMCSparamsList = pParams;
+
+#if 0 /* Not necessary, BGM is the last element of transcoding */
+            /* Save this element in case of other file to convert (can't happen, BGM ...) */
+
+            pMCS_last = pParams;
+
+#endif
+
+        }
+        else
+        {
+            M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+            M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+            /* Parse MCS params chained list to find and delete BGM element */
+            while( pParams_temp != M4OSA_NULL )
+            {
+                if( pParams_temp->isBGM == M4OSA_TRUE )
+                {
+                    /* Remove this element */
+                    if( pParams_temp->pFileIn != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileIn);
+                        pParams_temp->pFileIn = M4OSA_NULL;
+                    }
+
+                    if( pParams_temp->pFileOut != M4OSA_NULL )
+                    {
+                        /* Remove PCM temporary file */
+                        M4OSA_fileExtraDelete(pParams_temp->pFileOut);
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileOut);
+                        pParams_temp->pFileOut = M4OSA_NULL;
+                    }
+                    /* Chain previous element with next element = remove BGM chained
+                         list element */
+                    if( pParams_prev != M4OSA_NULL )
+                    {
+                        pParams_prev->pNext = pParams_temp->pNext;
+                    }
+                    /* If current pointer is the first of the chained list and next pointer of
+                    the chained list is NULL */
+                    /* it means that there was only one element in the list */
+                    /* => we put the context variable to NULL to reaffect the first chained list
+                     element */
+                    if( pParams_temp == xVSS_context->pMCSparamsList
+                        && pParams_temp->pNext == M4OSA_NULL )
+                    {
+                        xVSS_context->pMCSparamsList = M4OSA_NULL;
+                    }
+                    /* In that case, BGM pointer is the first one, but there are others elements
+                     after it */
+                    /* So, we need to change first chained list element */
+                    else if( pParams_temp->pNext != M4OSA_NULL
+                        && pParams_prev == M4OSA_NULL )
+                    {
+                        xVSS_context->pMCSparamsList = pParams_temp->pNext;
+                    }
+
+                    if( pParams_temp->pNext != M4OSA_NULL )
+                    {
+                        pParams_prev = pParams_temp->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                        pParams_temp = pParams_prev;
+                    }
+                    else
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                    }
+                }
+                else
+                {
+                    pParams_prev = pParams_temp;
+                    pParams_temp = pParams_temp->pNext;
+                }
+            }
+            /* We need to initialize the last element of the chained list to be able to add new
+             BGM element */
+            pMCS_last = pParams_prev;
+
+            if( xVSS_context->pMCSparamsList == M4OSA_NULL )
+            {
+                /* In that case, it means that there was only one element in the chained list */
+                /* So, we need to save the new params*/
+                xVSS_context->pMCSparamsList = pParams;
+            }
+            else
+            {
+                /* Update next pointer of the previous last element of the chain */
+                pMCS_last->pNext = pParams;
+            }
+
+#if 0 /* Not necessary, BGM is the last element of transcoding */
+            /* Update save of last element of the chain (not necessary, BGM ...) */
+
+            pMCS_last = pParams;
+
+#endif
+
+        }
+
+        /* Fill the last M4xVSS_MCS_params element */
+        pParams->InputFileType =
+            xVSS_context->pSettings->xVSS.pBGMtrack->FileType;
+        pParams->OutputFileType = M4VIDEOEDITING_kFileType_PCM;
+        pParams->OutputVideoFormat = M4VIDEOEDITING_kNoneVideo;
+        pParams->OutputVideoFrameSize = M4VIDEOEDITING_kQCIF;
+        pParams->OutputVideoFrameRate = M4VIDEOEDITING_k15_FPS;
+
+        if( xVSS_context->pSettings->xVSS.outputAudioFormat
+            == M4VIDEOEDITING_kAAC )
+        {
+            pParams->OutputAudioFormat = M4VIDEOEDITING_kAAC;
+            pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
+
+            /*FB: VAL CR P4ME00003076
+            The output audio bitrate in the AAC case is now directly given by the user*/
+            /*Check if the audio bitrate is correctly defined*/
+            /*Mono
+            MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
+            if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+                >= M4VIDEOEDITING_k16_KBPS
+                && xVSS_context->pSettings->xVSS.outputAudioBitrate
+                <= M4VIDEOEDITING_k192_KBPS
+                && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
+            {
+                pParams->OutputAudioBitrate =
+                    xVSS_context->pSettings->xVSS.outputAudioBitrate;
+            }
+            /*Stereo
+            MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
+            else if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+                >= M4VIDEOEDITING_k32_KBPS
+                && xVSS_context->pSettings->xVSS.outputAudioBitrate
+                <= M4VIDEOEDITING_k192_KBPS
+                && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_FALSE )
+            {
+                pParams->OutputAudioBitrate =
+                    xVSS_context->pSettings->xVSS.outputAudioBitrate;
+            }
+            else
+            {
+                pParams->OutputAudioBitrate = M4VIDEOEDITING_k32_KBPS;
+            }
+            pParams->bAudioMono = xVSS_context->pSettings->xVSS.bAudioMono;
+        }
+        else
+        {
+            pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+            pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
+            pParams->OutputAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+            pParams->bAudioMono = M4OSA_TRUE;
+        }
+        pParams->OutputVideoBitrate = M4VIDEOEDITING_kUndefinedBitrate;
+
+        /* Prepare output filename */
+        /* 21 is the size of "preview_16000_2.pcm" + \0 */
+        out_pcm =
+            (M4OSA_Char *)M4OSA_malloc(M4OSA_chrLength(xVSS_context->pTempPath)
+            + 21, M4VS, (M4OSA_Char *)"Temp char* for pcmPreviewFile");
+
+        if( out_pcm == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+            return M4ERR_ALLOC;
+        }
+
+        /* Copy temporary path to final preview path string */
+        M4OSA_chrNCopy(out_pcm, xVSS_context->pTempPath,
+            M4OSA_chrLength(xVSS_context->pTempPath) + 1);
+
+        /* Depending of the output sample frequency and nb of channels, we construct preview
+        output filename */
+        if( xVSS_context->pSettings->xVSS.outputAudioFormat
+            == M4VIDEOEDITING_kAAC )
+        {
+            /* Construct output temporary PCM filename */
+            if( xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
+            {
+                M4OSA_chrNCat(out_pcm, (M4OSA_Char *)"preview_16000_1.pcm\0",
+                    20);
+            }
+            else
+            {
+                M4OSA_chrNCat(out_pcm, (M4OSA_Char *)"preview_16000_2.pcm\0",
+                    20);
+            }
+        }
+        else if( xVSS_context->pSettings->xVSS.outputAudioFormat
+            == M4VIDEOEDITING_kAMR_NB )
+        {
+            /* Construct output temporary PCM filename */
+            M4OSA_chrNCat(out_pcm, (M4OSA_Char *)"preview_08000_1.pcm\0", 20);
+        }
+        else
+        {
+            if( out_pcm != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)out_pcm);
+                out_pcm = M4OSA_NULL;
+            }
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Bad audio output format \n");
+            return M4ERR_PARAMETER;
+        }
+
+        xVSS_context->pcmPreviewFile = out_pcm;
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+        pDecodedPath = out_pcm;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)out_pcm, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        xVSS_context->pcmPreviewFile =
+            (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+            (M4OSA_Char *)"pcmPreviewFile");
+
+        if( xVSS_context->pcmPreviewFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pcmPreviewFile, pDecodedPath, length + 1);
+
+        /* Free temporary output filename */
+        if( out_pcm != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+        }
+
+        pParams->pFileOut = M4OSA_malloc((length + 1), M4VS,
+            (M4OSA_Char *)"MCS BGM Params: file out");
+
+        if( pParams->pFileOut == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+        pParams->pFileTemp = M4OSA_NULL;
+
+        M4OSA_memcpy(pParams->pFileOut, xVSS_context->pcmPreviewFile,
+            (length + 1)); /* Copy output file path */
+
+#if 0
+
+        xVSS_context->pcmPreviewFile =
+            (M4OSA_Char *)M4OSA_malloc(M4OSA_chrLength(out_pcm) + 1, M4VS,
+            "pcmPreviewFile");
+
+        if( xVSS_context->pcmPreviewFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        M4OSA_chrNCopy(xVSS_context->pcmPreviewFile, out_pcm,
+            M4OSA_chrLength(out_pcm) + 1);
+
+        /* Free temporary output filename */
+        if( out_pcm != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+        }
+
+#endif
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+
+        pDecodedPath = xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)xVSS_context->pSettings->xVSS.pBGMtrack->
+                pFile, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        pParams->pFileIn = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+            (M4OSA_Char *)"MCS BGM Params: file in");
+
+        if( pParams->pFileIn == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+            (length + 1)); /* Copy input file path */
+
+        pParams->isBGM = M4OSA_TRUE;
+        pParams->isCreated = M4OSA_FALSE;
+        xVSS_context->nbStepTotal++;
+        bIsTranscoding = M4OSA_TRUE;
+#endif /* PREVIEW_ENABLED */
+
+    }
+    else if( pSettings->xVSS.pBGMtrack != M4OSA_NULL
+        && isNewBGM == M4OSA_FALSE )
+    {
+#ifdef PREVIEW_ENABLED
+        /* BGM is the same as previously, no need to redecode audio */
+        /* Need to update MCS params chained list, to signal M4xVSS_step function to skip
+        BGM decoding */
+
+        M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+        M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+#endif /* PREVIEW_ENABLED */
+        /* We save output file pointer, because we will need to use it when saving audio
+         mixed file (last save step) */
+
+        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+        xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
+
+        /* Re-write BGM settings in case they have changed between two sendCommand */
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddCts =
+            pSettings->xVSS.pBGMtrack->uiAddCts;
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume =
+            pSettings->xVSS.pBGMtrack->uiAddVolume;
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiBeginLoop =
+            pSettings->xVSS.pBGMtrack->uiBeginLoop;
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiEndLoop =
+            pSettings->xVSS.pBGMtrack->uiEndLoop;
+
+#ifdef PREVIEW_ENABLED
+        /* Parse MCS params chained list to find and delete BGM element */
+
+        while( pParams_temp != M4OSA_NULL )
+        {
+            if( pParams_temp->isBGM == M4OSA_TRUE )
+            {
+                pParams_temp->isCreated = M4OSA_TRUE;
+                break;
+            }
+            pParams_prev = pParams_temp;
+            pParams_temp = pParams_temp->pNext;
+        }
+
+#endif /* PREVIEW_ENABLED */
+
+        M4OSA_TRACE2_0("M4xVSS_SendCommand has been recalled, BGM is the same");
+    }
+    else
+    {
+        M4OSA_TRACE1_0("No BGM in this xVSS command");
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+#ifdef PREVIEW_ENABLED
+            /* Need to remove MCS previous params chained list */
+
+            M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+            M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+            /* Parse MCS params chained list to find and delete BGM element */
+            while( pParams_temp != M4OSA_NULL )
+            {
+                if( pParams_temp->isBGM == M4OSA_TRUE )
+                {
+                    /* Remove this element */
+                    if( pParams_temp->pFileIn != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileIn);
+                        pParams_temp->pFileIn = M4OSA_NULL;
+                    }
+
+                    if( pParams_temp->pFileOut != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileOut);
+                        pParams_temp->pFileOut = M4OSA_NULL;
+                    }
+                    /* Chain previous element with next element */
+                    if( pParams_prev != M4OSA_NULL )
+                    {
+                        pParams_prev->pNext = pParams_temp->pNext;
+                    }
+                    /* If current pointer is the first of the chained list and next pointer
+                     of the chained list is NULL */
+                    /* it means that there was only one element in the list */
+                    /* => we put the context variable to NULL */
+                    if( pParams_temp == xVSS_context->pMCSparamsList
+                        && pParams_temp->pNext == M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        xVSS_context->pMCSparamsList = M4OSA_NULL;
+                    }
+                    /* In that case, BGM pointer is the first one, but there are others
+                     elements after it */
+                    /* So, we need to change first chained list element */
+                    else if( pParams_temp->pNext != M4OSA_NULL )
+                    {
+                        xVSS_context->pMCSparamsList = pParams_temp->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                    }
+                    /* In all other cases, nothing else to do except freeing the chained
+                    list element */
+                    else
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                    }
+                    break;
+                }
+                pParams_prev = pParams_temp;
+                pParams_temp = pParams_temp->pNext;
+            }
+
+#endif /* PREVIEW_ENABLED */
+            /* Here, we unallocate all BGM components and put xVSS_context->pSettings->
+            xVSS.pBGMtrack to NULL */
+
+            if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+            {
+                if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile
+                    != M4OSA_NULL )
+                {
+                    M4OSA_free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
+                    xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+                }
+                M4OSA_free(
+                    (M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack);
+                xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+            }
+        }
+    }
+
+    /* Default behaviour, if no audio/video output format is set, we put H263/AMR by default */
+#if 0
+
+    if( xVSS_context->pSettings->xVSS.outputVideoFormat
+        == M4VIDEOEDITING_kNoneVideo )
+    {
+        xVSS_context->pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+    }
+
+    if( xVSS_context->pSettings->xVSS.outputAudioFormat
+        == M4VIDEOEDITING_kNoneAudio )
+    {
+        xVSS_context->pSettings->xVSS.outputAudioFormat =
+            M4VIDEOEDITING_kAMR_NB;
+    }
+
+#endif
+    /* Changed to be able to mix with video only files -> in case no master clip is found
+    (i.e only JPG input or video only input) */
+    /* and if there is a BGM, we force the added volume to 100 (i.e replace audio) */
+
+    if( masterClip == -1
+        && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+    {
+        /* In that case, it means that no input 3GP file has a video track.
+        Therefore, if a mixing is asked, it will fail. Thus, we force replace audio. */
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume = 100;
+    }
+
+    /* Save clip number to know if a M4xVSS_sendCommand has already been called */
+    xVSS_context->previousClipNumber = xVSS_context->pSettings->uiClipNumber;
+
+    /* Change state */
+    xVSS_context->m_state = M4xVSS_kStateAnalyzing;
+
+    /* In case of MMS use case, we compute here the max video bitrate */
+    /* In case of too low bitrate, a specific warning is returned */
+    if( xVSS_context->pSettings->xVSS.outputFileSize != 0 && totalDuration > 0 )
+    {
+        M4OSA_UInt32 targetedBitrate = 0;
+        M4VIDEOEDITING_ClipProperties fileProperties;
+        M4OSA_Double ratio;
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            if( xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume
+                == 100 ) /* We are in "replace audio mode, need to check the filetype */
+            {
+                if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+                    == M4VIDEOEDITING_kFileType_3GPP )
+                {
+                    M4OSA_Void *pDecodedPath;
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    pDecodedPath =
+                        xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                            (M4OSA_Void *)xVSS_context->pSettings->
+                            xVSS.pBGMtrack->pFile,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.
+                            pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_SendCommand: \
+                                M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath = xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    err =
+                        M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+                        &fileProperties);
+
+                    /* Get the properties of the BGM track */
+                    /*err = M4xVSS_internalGetProperties(xVSS_context, xVSS_context->pSettings->
+                    xVSS.pBGMtrack->pFile, &fileProperties);*/
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned an error:\
+                             0x%x", err);
+                        return err;
+                    }
+
+                    if( fileProperties.AudioStreamType
+                        != M4VIDEOEDITING_kAMR_NB )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4xVSS_sendCommand: Impossible to use MMS mode with BGM != AMR-NB");
+                        return M4ERR_PARAMETER;
+                    }
+                }
+                else if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+                    != M4VIDEOEDITING_kFileType_AMR
+                    && xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+                    != M4VIDEOEDITING_kFileType_MP3 )
+                {
+                    M4OSA_TRACE1_0("M4xVSS_sendCommand: Bad input BGM file");
+                    return M4ERR_PARAMETER;
+                }
+            }
+        }
+
+        /* Compute targeted bitrate, with 8% margin (moov) */
+        if( totalDuration > 1000 )
+        {
+            targetedBitrate =
+                (M4OSA_UInt32)(( xVSS_context->pSettings->xVSS.outputFileSize
+                * 8 * 0.84) / (totalDuration / 1000));
+        }
+        else
+        {
+            targetedBitrate = 0;
+        }
+
+        /* Remove audio bitrate */
+        if( targetedBitrate >= 12200 )
+        {
+            targetedBitrate -= 12200; /* Only AMR is supported in MMS case */
+        }
+        else
+        {
+            targetedBitrate = 0;
+        }
+
+        /* Compute an indicator of "complexity" depending on nb of sequences and total duration */
+        /* The highest is the number of sequences, the more there are some I frames */
+        /* In that case, it is necessary to reduce the target bitrate */
+        ratio =
+            (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
+            * 100000) / (M4OSA_Double)(totalDuration));
+        M4OSA_TRACE2_3(
+            "Ratio clip_nb/duration = %f\nTargeted bitrate = %d\nTotal duration: %d",
+            (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
+            * 100000) / (M4OSA_Double)(totalDuration)),
+            targetedBitrate, totalDuration);
+
+        if( ratio > 50 && ratio <= 75 )
+        {
+            /* It means that there is a potential risk of having a higher file size
+            than specified */
+            targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.1);
+            M4OSA_TRACE2_2(
+                "New bitrate1 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
+                ratio, targetedBitrate);
+        }
+        else if( ratio > 75 )
+        {
+            targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.15);
+            M4OSA_TRACE2_2(
+                "New bitrate2 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
+                ratio, targetedBitrate);
+        }
+
+        /*CR 3283 MMS use case for VAL:
+        Decrease the output file size to keep a margin of 5%
+        The writer will stop when the targeted output file size will be reached*/
+        xVSS_context->pSettings->xVSS.outputFileSize -=
+            (M4OSA_UInt32)(xVSS_context->pSettings->xVSS.outputFileSize * 0.05);
+
+        switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+        {
+            case M4VIDEOEDITING_kSQCIF:
+                if( targetedBitrate < 32000 )
+                {
+                    xVSS_context->targetedBitrate = 32000;
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kQQVGA:
+                if( targetedBitrate < 32000 )              /*48000)*/
+                {
+                    xVSS_context->targetedBitrate = 32000; /*48000;*/
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kQCIF:
+                if( targetedBitrate < 48000 )              /*64000)*/
+                {
+                    xVSS_context->targetedBitrate = 48000; /*64000;*/
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kQVGA:
+                if( targetedBitrate < 64000 )              /*128000)*/
+                {
+                    xVSS_context->targetedBitrate = 64000; /*128000;*/
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kCIF:
+                if( targetedBitrate < 128000 )
+                {
+                    xVSS_context->targetedBitrate = 128000;
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kVGA:
+                if( targetedBitrate < 192000 )
+                {
+                    xVSS_context->targetedBitrate = 192000;
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            default:
+                /* Cannot happen */
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: Error in output fileSize !");
+                return M4ERR_PARAMETER;
+                break;
+        }
+        xVSS_context->targetedBitrate = (M4OSA_UInt32)targetedBitrate;
+    }
+
+    if( bIsTranscoding )
+    {
+        return M4VSS3GPP_WAR_TRANSCODING_NECESSARY;
+    }
+    else
+    {
+        return M4NO_ERROR;
+    }
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_SaveStart(M4OSA_Context pContext, M4OSA_Char* pFilePath)
+ * @brief        This function prepare the save
+ * @note        The xVSS create 3GP edited final file
+ *                This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_ANALYZING_DONE
+ *                After this function, the user must call M4xVSS_Step until
+ *                it returns another error than M4NO_ERROR.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    pFilePath            (IN) If the user wants to provide a different
+ *                                output filename, else can be NULL (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SaveStart( M4OSA_Context pContext, M4OSA_Void *pFilePath,
+                           M4OSA_UInt32 filePathSize )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err;
+
+    /*Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
+    M4VSS3GPP_EditSettings *pEditSavingSettings = M4OSA_NULL;
+    M4OSA_UInt8 i, j;
+    M4OSA_UInt32 offset = 0;
+    M4OSA_UInt8 nbEffects = 0;
+    /*only for UTF conversion support*/
+    M4OSA_Void *pDecodedPath = M4OSA_NULL;
+    M4OSA_UInt32 length = 0;
+    /**/
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateOpened )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_SaveStart function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* RC: to temporary handle changing of output filepath */
+    /* TO BE CHANGED CLEANLY WITH A MALLOC/MEMCPY !!!! */
+    if( pFilePath != M4OSA_NULL )
+    {
+        if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
+        {
+            /*it means that pOutputFile has been allocated in M4xVSS_sendCommand()*/
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+            xVSS_context->pSettings->uiOutputPathSize = 0;
+        }
+
+        pDecodedPath = pFilePath;
+        /*As all inputs of the xVSS are in UTF8, convert the output file path into the customer
+         format*/
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)pFilePath, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            filePathSize = length;
+        }
+
+        xVSS_context->pOutputFile =
+            (M4OSA_Void *)M4OSA_malloc(filePathSize + 1, M4VS,
+            (M4OSA_Char *)"M4xVSS_SaveStart: output file");
+
+        if( xVSS_context->pOutputFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pOutputFile, pDecodedPath, filePathSize + 1);
+        xVSS_context->pOutputFile[filePathSize] = '\0';
+        xVSS_context->pSettings->pOutputFile = xVSS_context->pOutputFile;
+        xVSS_context->pSettings->uiOutputPathSize = filePathSize;
+    }
+
+    /**
+    ***/
+
+    /*FB: Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
+    /*It is the same principle as in the PreviewStart()*/
+    pEditSavingSettings =
+        (M4VSS3GPP_EditSettings *)M4OSA_malloc(sizeof(M4VSS3GPP_EditSettings),
+        M4VS, (M4OSA_Char *)"Saving, copy of VSS structure");
+
+    if( pEditSavingSettings == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+        if( xVSS_context->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+        return M4ERR_ALLOC;
+    }
+
+    /* Copy settings from input structure */
+    M4OSA_memcpy((M4OSA_MemAddr8) &(pEditSavingSettings->xVSS),
+        (M4OSA_MemAddr8) &(xVSS_context->pSettings->xVSS),
+        sizeof(M4xVSS_EditSettings));
+
+    /* Initialize pEditSavingSettings structure */
+    pEditSavingSettings->xVSS.pBGMtrack = M4OSA_NULL;
+
+    pEditSavingSettings->videoFrameRate =
+        xVSS_context->pSettings->videoFrameRate;
+    pEditSavingSettings->uiClipNumber = xVSS_context->pSettings->uiClipNumber;
+    pEditSavingSettings->uiMasterClip =
+        xVSS_context->pSettings->uiMasterClip; /* VSS2.0 mandatory parameter */
+
+    /* Allocate savingSettings.pClipList/pTransitions structure */
+    pEditSavingSettings->pClipList = (M4VSS3GPP_ClipSettings *
+        * )M4OSA_malloc(sizeof(M4VSS3GPP_ClipSettings *)
+        *pEditSavingSettings->uiClipNumber,
+        M4VS, (M4OSA_Char *)"xVSS, saving , copy of pClipList");
+
+    if( pEditSavingSettings->pClipList == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+        if( xVSS_context->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+        return M4ERR_ALLOC;
+    }
+
+    if( pEditSavingSettings->uiClipNumber > 1 )
+    {
+        pEditSavingSettings->pTransitionList = (M4VSS3GPP_TransitionSettings *
+            * )M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings *)
+            *(pEditSavingSettings->uiClipNumber - 1),
+            M4VS, (M4OSA_Char *)"xVSS, saving, copy of pTransitionList");
+
+        if( pEditSavingSettings->pTransitionList == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+    }
+    else
+    {
+        pEditSavingSettings->pTransitionList = M4OSA_NULL;
+    }
+
+    for ( i = 0; i < pEditSavingSettings->uiClipNumber; i++ )
+    {
+        pEditSavingSettings->pClipList[i] = (M4VSS3GPP_ClipSettings
+            *)M4OSA_malloc(sizeof(M4VSS3GPP_ClipSettings),
+            M4VS, (M4OSA_Char *)"saving clip settings");
+
+        if( pEditSavingSettings->pClipList[i] == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+
+        if( i < pEditSavingSettings->uiClipNumber
+            - 1 ) /* Because there is 1 less transition than clip number */
+        {
+            pEditSavingSettings->pTransitionList[i] =
+                (M4VSS3GPP_TransitionSettings
+                *)M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings),
+                M4VS, (M4OSA_Char *)"saving transition settings");
+
+            if( pEditSavingSettings->pTransitionList[i] == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return M4ERR_ALLOC;
+            }
+        }
+    }
+
+    for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
+    {
+        // Add MP4 file support
+
+        if( ( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_3GPP)
+            || (xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_MP4) )
+
+        {
+            /* Copy data from given structure to our saving structure */
+            M4xVSS_DuplicateClipSettings(pEditSavingSettings->pClipList[i],
+                xVSS_context->pSettings->pClipList[i],
+                M4OSA_FALSE /* remove effects */);
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = pEditSavingSettings->pClipList[i]->pFile;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err =
+                    M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                    *)pEditSavingSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+
+                    if( xVSS_context->pOutputFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(
+                            (M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                        xVSS_context->pOutputFile = M4OSA_NULL;
+                    }
+                    return err;
+                }
+                pDecodedPath = xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer;
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                M4OSA_free((M4OSA_MemAddr32)
+                    pEditSavingSettings->pClipList[i]->pFile);
+                pEditSavingSettings->pClipList[i]->pFile = (M4OSA_Void
+                    *)M4OSA_malloc((length + 1),
+                    M4VS, (M4OSA_Char *)"saving transition settings");
+
+                if( pEditSavingSettings->pClipList[i]->pFile == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+                    if( xVSS_context->pOutputFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(
+                            (M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                        xVSS_context->pOutputFile = M4OSA_NULL;
+                    }
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pEditSavingSettings->pClipList[i]->pFile,
+                    pDecodedPath, length + 1);
+            }
+            /*FB: add file path size because of UTF 16 conversion*/
+            pEditSavingSettings->pClipList[i]->filePathSize = length+1;
+
+            if( i
+                < xVSS_context->pSettings->uiClipNumber
+                - 1 ) /* Because there is 1 less transition than clip number */
+            {
+                M4OSA_memcpy(
+                    (M4OSA_MemAddr8)pEditSavingSettings->pTransitionList[i],
+                    (M4OSA_MemAddr8)xVSS_context->pSettings->
+                    pTransitionList[i],
+                    sizeof(M4VSS3GPP_TransitionSettings));
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0(
+                "M4xVSS_SaveStart: Error when parsing xVSS_context->pSettings->pClipList[i]:\
+                 Bad file type");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_PARAMETER;
+        }
+    }
+
+    /* Count the number of video effects, used to know how much memory is needed to allocate*/
+    /* FB 2008/10/15: removed : not compatible with M4VSS3GPP_kVideoEffectType_None
+    for(j=0;j<xVSS_context->pSettings->nbEffects;j++)
+    {
+    if(xVSS_context->pSettings->Effects[j].VideoEffectType != M4VSS3GPP_kVideoEffectType_None)
+    {
+    nbEffects++;
+    }
+    }*/
+    nbEffects = xVSS_context->pSettings->nbEffects;
+
+    /* Allocate effects saving structure with correct number of effects */
+    if( nbEffects != 0 )
+    {
+        pEditSavingSettings->Effects =
+            (M4VSS3GPP_EffectSettings *)M4OSA_malloc(nbEffects
+            * sizeof(M4VSS3GPP_EffectSettings), M4VS, (M4OSA_Char
+            *)"Saving settings, effects table of structure settings");
+
+        if( pEditSavingSettings->Effects == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+
+        /* Just copy effect structure to saving structure, as effects time are now */
+        /* relative to output clip time*/
+        M4OSA_memcpy((M4OSA_MemAddr8)pEditSavingSettings->Effects,
+            (M4OSA_MemAddr8)xVSS_context->pSettings->Effects,
+            nbEffects * sizeof(M4VSS3GPP_EffectSettings));
+    }
+    else
+    {
+        pEditSavingSettings->Effects = M4OSA_NULL;
+        pEditSavingSettings->nbEffects = 0;
+    }
+    pEditSavingSettings->nbEffects = nbEffects;
+
+    if( pFilePath != M4OSA_NULL )
+    {
+        pEditSavingSettings->pOutputFile = pFilePath;
+    }
+
+    /* Save pointer of saving video editor to use in step function */
+    xVSS_context->pCurrentEditSettings = pEditSavingSettings;
+
+    /* Change output file name to temporary output file name, because final file will be
+     generated by audio mixer */
+    if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+    {
+
+        M4OSA_Char out_3gp[64];
+        M4OSA_Char out_3gp_tmp[64];
+
+        /**/
+        pEditSavingSettings->xVSS.pBGMtrack =
+            (M4xVSS_BGMSettings *)M4OSA_malloc(sizeof(M4xVSS_BGMSettings), M4VS,
+            (M4OSA_Char
+            *)"Saving settings, effects table of structure settings");
+
+        if( pEditSavingSettings->xVSS.pBGMtrack == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+
+        /* Just copy effect structure to saving structure, as effects time are now */
+        /* relative to output clip time*/
+        M4OSA_memcpy((M4OSA_MemAddr8)pEditSavingSettings->xVSS.pBGMtrack,
+            (M4OSA_MemAddr8)xVSS_context->pSettings->xVSS.pBGMtrack,
+            sizeof(M4xVSS_BGMSettings));
+
+        /* Allocate file name, and copy file name buffer to our structure */
+        pEditSavingSettings->xVSS.pBGMtrack->pFile = M4OSA_malloc(
+            (M4OSA_chrLength(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
+            + 1),
+            M4VS, (M4OSA_Char *)"Saving struct xVSS BGM file path");
+
+        if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(pEditSavingSettings->xVSS.pBGMtrack->pFile,
+            xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+            M4OSA_chrLength(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
+            + 1);
+
+        /*Copy BGM track file path*/
+
+        /**
+        * UTF conversion*/
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
+                (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+
+            M4OSA_free(
+                (M4OSA_MemAddr32)pEditSavingSettings->xVSS.pBGMtrack->pFile);
+            pEditSavingSettings->xVSS.pBGMtrack->pFile =
+                (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS, (M4OSA_Char
+                *)"M4xVSS_SaveStart: Temp filename in case of BGM");
+
+            if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pEditSavingSettings->xVSS.pBGMtrack->pFile,
+                pDecodedPath, length + 1);
+        }
+
+        /**/
+
+        M4OSA_chrNCopy(out_3gp, xVSS_context->pTempPath, 64);
+        M4OSA_chrNCopy(out_3gp_tmp, xVSS_context->pTempPath, 64);
+
+        /* Construct output temporary 3GP filename */
+        M4OSA_chrNCat(out_3gp, (M4OSA_Char *)"savetemp.3gp\0", 13);
+        M4OSA_chrNCat(out_3gp_tmp, (M4OSA_Char *)"savetemp.tmp\0", 13);
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+        pDecodedPath = out_3gp;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        xVSS_context->pCurrentEditSettings->pOutputFile =
+            (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+            (M4OSA_Char *)"M4xVSS_SaveStart: Temp filename in case of BGM");
+
+        if( xVSS_context->pCurrentEditSettings->pOutputFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pCurrentEditSettings->pOutputFile,
+            pDecodedPath, length + 1);
+        xVSS_context->pCurrentEditSettings->uiOutputPathSize = length + 1;
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+        pDecodedPath = out_3gp_tmp;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)out_3gp_tmp, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        xVSS_context->pCurrentEditSettings->pTemporaryFile =
+            (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+            (M4OSA_Char *)"M4xVSS_SaveStart: Temporary file");
+
+        if( xVSS_context->pCurrentEditSettings->pTemporaryFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pCurrentEditSettings->pTemporaryFile,
+            pDecodedPath, length + 1);
+
+        /* Put nb of step for progression monitoring to 2, because audio mixing is needed */
+        xVSS_context->nbStepTotal = 2;
+    }
+    else
+    {
+        xVSS_context->pCurrentEditSettings->pOutputFile =
+            xVSS_context->pOutputFile;
+        xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+
+        /* Put nb of step for progression monitoring to 1, because no audio mixing is needed */
+        xVSS_context->nbStepTotal = 1;
+    }
+
+    /**
+    ***/
+
+    err = M4xVSS_internalGenerateEditedFile(xVSS_context);
+
+    if( err != M4NO_ERROR )
+    {
+        M4OSA_TRACE1_1(
+            "M4xVSS_SaveStart: M4xVSS_internalGenerateEditedFile returned an error: 0x%x",
+            err);
+
+        /**/
+        if( xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL
+            && xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->
+                pOutputFile);
+            xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+
+        if( xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL
+            && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->
+                pTemporaryFile);
+            xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+        }
+
+        if( xVSS_context->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+        /* TODO: Translate error code of VSS to an xVSS error code */
+        return err;
+    }
+
+    /* Reinitialize current step number for progression monitoring */
+    xVSS_context->currentStep = 0;
+
+    /* Change xVSS state */
+    xVSS_context->m_state = M4xVSS_kStateSaving;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_SaveStop(M4OSA_Context pContext)
+ * @brief        This function unallocate save ressources and change xVSS
+ *                internal state.
+ * @note        This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_SAVING_DONE
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SaveStop( M4OSA_Context pContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateSaving )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_SaveStop function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* Free saving structures */
+    M4xVSS_internalFreeSaving(xVSS_context);
+
+    if( xVSS_context->pOutputFile != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+
+    /* Change xVSS state */
+    xVSS_context->m_state = M4xVSS_kStateSaved;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_Step(M4OSA_Context pContext, M4OSA_UInt8 *pProgress)
+ * @brief        This function executes differents tasks, depending of xVSS
+ *                internal state.
+ * @note        This function:
+ *                    - analyses editing structure if called after M4xVSS_SendCommand
+ *                    - generates preview file if called after M4xVSS_PreviewStart
+ *                    - generates final edited file if called after M4xVSS_SaveStart
+ *
+ * @param    pContext                        (IN) Pointer on the xVSS edit context
+ * @param    pProgress                        (IN/OUT) Pointer on an integer giving a
+ *                                            progress indication (between 0-100)
+ * @return    M4NO_ERROR:                        No error, the user must call M4xVSS_Step again
+ * @return    M4ERR_PARAMETER:                At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:                    This function cannot not be called at this time
+ * @return    M4VSS3GPP_WAR_PREVIEW_READY:    Preview file is generated
+ * @return    M4VSS3GPP_WAR_SAVING_DONE:        Final edited file is generated
+ * @return    M4VSS3GPP_WAR_ANALYZING_DONE:    Analyse is done
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_Step( M4OSA_Context pContext, M4OSA_UInt8 *pProgress )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
+    M4VSS3GPP_AudioMixingContext pAudioMixingCtxt =
+        xVSS_context->pAudioMixContext;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt8 uiProgress = 0;
+
+    switch( xVSS_context->m_state )
+    {
+        case M4xVSS_kStateSaving:
+        //case M4xVSS_kStateGeneratingPreview:
+            {
+                if( xVSS_context->editingStep
+                    == M4xVSS_kMicroStateEditing ) /* VSS -> creating effects, transitions ... */
+                {
+                    /* RC: to delete unecessary temp files on the fly */
+                    M4VSS3GPP_InternalEditContext *pVSSContext =
+                        (M4VSS3GPP_InternalEditContext *)pVssCtxt;
+
+                    err = M4VSS3GPP_editStep(pVssCtxt, &uiProgress);
+
+                    if( ( err != M4NO_ERROR) && (err != M4VSS3GPP_WAR_EDITING_DONE)
+                        && (err != M4VSS3GPP_WAR_SWITCH_CLIP) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_Step: M4VSS3GPP_editStep returned 0x%x\n", err);
+                        M4VSS3GPP_editCleanUp(pVssCtxt);
+                        /* TODO ? : Translate error code of VSS to an xVSS error code ? */
+                        xVSS_context->pCurrentEditContext = M4OSA_NULL;
+                        return err;
+                    }
+
+                    /* RC: to delete unecessary temp files on the fly */
+                    if( err == M4VSS3GPP_WAR_SWITCH_CLIP )
+                    {
+#ifndef DO_NOT_REMOVE_TEMP_FILES
+                        /* It means we can delete the temporary file */
+                        /* First step, check the temp file is not use somewhere else after */
+
+                        M4OSA_UInt32 i;
+                        M4OSA_Int32 cmpResult = -1;
+
+                        for ( i = pVSSContext->uiCurrentClip;
+                            i < pVSSContext->uiClipNumber; i++ )
+                        {
+                            if( pVSSContext->pClipList[pVSSContext->uiCurrentClip
+                                - 1].filePathSize
+                                == pVSSContext->pClipList[i].filePathSize )
+                            {
+                                cmpResult = M4OSA_memcmp(pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile, pVSSContext->pClipList[i].pFile,
+                                    pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].filePathSize);
+
+                                if( cmpResult == 0 )
+                                {
+                                    /* It means we found a corresponding file, we do not delete
+                                    this temporary file */
+                                    break;
+                                }
+                            }
+                        }
+
+                        if( cmpResult != 0 )
+                        {
+                            M4OSA_UInt32 ConvertedSize = 0;
+                            M4OSA_Char *toto;
+                            M4OSA_Char *pTmpStr;
+
+                            /* Convert result in UTF8 to check if we can delete it or not */
+                            if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+                                != M4OSA_NULL && xVSS_context->
+                                UTFConversionContext.
+                                pTempOutConversionBuffer != M4OSA_NULL )
+                            {
+                                M4xVSS_internalConvertToUTF8(xVSS_context,
+                                    (M4OSA_Void *)pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile, (M4OSA_Void *)xVSS_context->
+                                    UTFConversionContext.
+                                    pTempOutConversionBuffer, &ConvertedSize);
+                                err = M4OSA_chrFindPattern(xVSS_context->
+                                    UTFConversionContext.
+                                    pTempOutConversionBuffer,
+                                    xVSS_context->pTempPath, &toto);
+                                pTmpStr =
+                                    xVSS_context->UTFConversionContext.
+                                    pTempOutConversionBuffer;
+                            }
+                            else
+                            {
+                                err = M4OSA_chrFindPattern(pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile, xVSS_context->pTempPath, &toto);
+                                pTmpStr = pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile;
+                            }
+
+                            if( err == M4NO_ERROR )
+                            {
+                                /* As temporary files can be imgXXX.3gp or vidXXX.3gp */
+                                pTmpStr +=
+                                    (M4OSA_chrLength(pTmpStr)
+                                    - 10); /* Because temporary files have a length at most of
+                                    10 bytes */
+                                err = M4OSA_chrFindPattern(pTmpStr,
+                                    (M4OSA_Char *)"img", &toto);
+
+                                if( err != M4NO_ERROR )
+                                {
+                                    err = M4OSA_chrFindPattern(pTmpStr,
+                                        (M4OSA_Char *)"vid", &toto);
+                                }
+
+                                if( err
+                                    == M4NO_ERROR ) /* It means the file is a temporary file, we
+                                    can delete it */
+                                {
+                                    M4OSA_fileExtraDelete(pVSSContext->
+                                        pClipList[pVSSContext->uiCurrentClip
+                                        - 1].pFile);
+                                }
+                            }
+                        }
+
+#endif /* DO_NOT_REMOVE_TEMP_FILES*/
+                        /* */
+
+                        err = M4NO_ERROR;
+                    }
+
+                    if( err == M4VSS3GPP_WAR_EDITING_DONE )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                        uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        err = M4xVSS_internalCloseEditedFile(xVSS_context);
+                        /* Fix for  blrnxpsw#234---> */
+                        if( err != M4NO_ERROR )
+                        {
+                            if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                            {
+                                err = M4xVSSERR_NO_MORE_SPACE;
+                            }
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_internalCloseEditedFile returned an error: 0x%x",
+                                err);
+                            return err;
+                        }
+                        /*<---- Fix for  blrnxpsw#234 */
+                        if( xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack
+                            != M4OSA_NULL )
+                        {
+                            xVSS_context->editingStep =
+                                M4xVSS_kMicroStateAudioMixing;
+                            /* Open Audio mixing component */
+                            err = M4xVSS_internalGenerateAudioMixFile(xVSS_context);
+
+                            if( err != M4NO_ERROR )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4xVSS_internalGenerateAudioMixFile returned an error: 0x%x",
+                                    err);
+                                /* TODO ? : Translate error code of VSS to an xVSS error code */
+                                return err;
+                            }
+                            err = M4NO_ERROR;
+                            goto end_step;
+                        }
+                        else
+                        {
+
+                            err = M4VSS3GPP_WAR_SAVING_DONE;
+                            goto end_step;
+
+                        }
+                    }
+                }
+                else if( xVSS_context->editingStep
+                    == M4xVSS_kMicroStateAudioMixing ) /* Audio mixing: mix/replace audio track
+                    with given BGM */
+                {
+                    err = M4VSS3GPP_audioMixingStep(pAudioMixingCtxt, &uiProgress);
+
+                    if( ( err != M4NO_ERROR)
+                        && (err != M4VSS3GPP_WAR_END_OF_AUDIO_MIXING) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_audioMixingMain: M4VSS3GPP_audioMixingStep returned 0x%x\n",
+                            err);
+                        /* TODO ? : Translate error code of VSS to an xVSS error code */
+                        return err;
+                    }
+
+                    if( err == M4VSS3GPP_WAR_END_OF_AUDIO_MIXING )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                        uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x",
+                                err);
+                            /* TODO ? : Translate error code of VSS to an xVSS error code */
+                            return err;
+                        }
+
+                            err = M4VSS3GPP_WAR_SAVING_DONE;
+                            goto end_step;
+
+                    }
+                }
+                else
+                {
+                    M4OSA_TRACE1_0("Bad state in step function !");
+                    return M4ERR_STATE;
+                }
+            }
+            break;
+
+        case M4xVSS_kStateAnalyzing:
+            {
+                if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateAnalysePto3GPP ) /* Pto3GPP, analysing input parameters */
+                {
+                    if( xVSS_context->pPTo3GPPcurrentParams == M4OSA_NULL
+                        && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pPTo3GPPcurrentParams =
+                            xVSS_context->
+                            pPTo3GPPparamsList; /* Current Pto3GPP Parameter is the first element
+                            of the list */
+                    }
+                    else if( xVSS_context->pPTo3GPPcurrentParams != M4OSA_NULL
+                        && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pPTo3GPPcurrentParams =
+                            xVSS_context->pPTo3GPPcurrentParams->
+                            pNext; /* Current Pto3GPP Parameter is the next element of the list */
+
+                        if( xVSS_context->pPTo3GPPcurrentParams
+                            == M4OSA_NULL ) /* It means there is no next image to convert */
+                        {
+                            /* We step to MCS phase */
+                            xVSS_context->analyseStep =
+                                M4xVSS_kMicroStateAnalyzeMCS;
+                            err = M4NO_ERROR;
+                            goto end_step;
+                        }
+                    }
+                    else if( xVSS_context->pPTo3GPPparamsList == M4OSA_NULL )
+                    {
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalyzeMCS; /* Change Analyzing micro state to
+                             MCS phase */
+                        err = M4NO_ERROR;
+                        goto end_step;
+                    }
+
+                    /* Check if this file has to be converted or not */
+                    /* If not, we just return M4NO_ERROR, and go to next file */
+                    if( xVSS_context->pPTo3GPPcurrentParams->isCreated
+                        == M4OSA_FALSE )
+                    {
+                        /* Opening Pto3GPP */
+                        err = M4xVSS_internalStartConvertPictureTo3gp(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartConvertPictureTo3gp \
+                            returned error: 0x%x",
+                                err)
+                                /* TODO ? : Translate error code of VSS to an xVSS error code */
+                                return err;
+                        }
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateConvertPto3GPP;
+                    }
+                }
+                else if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateConvertPto3GPP ) /* Pto3GPP, converting */
+                {
+                    err = M4PTO3GPP_Step(xVSS_context->pM4PTO3GPP_Ctxt);
+
+                    if( ( err != M4NO_ERROR) && (err
+                        != ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING)) )
+                    {
+                        /* TO BE CHECKED NO LEAKS  !!!!! */
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_Step: M4PTO3GPP_Step returned 0x%x\n", err);
+                        /* TODO ? : Translate error code of VSS to an xVSS error code */
+                        return err;
+                    }
+                    else if( err
+                        == ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING) )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                         uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalysePto3GPP; /* We go back to analyze parameters
+                            to see if there is a next file to convert */
+                        /* RC !!!!!!!! */
+                        xVSS_context->pPTo3GPPcurrentParams->isCreated =
+                            M4OSA_TRUE; /* To avoid reconverting it if another SendCommand is
+                            called */
+                        err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
+                        /*SS:blrnxpsw#  234 */
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step:\
+                                           M4xVSS_internalStopConvertPictureTo3gp returned 0x%x",
+                                            err);
+                            /* TODO ? : Translate error code of VSS to an xVSS error code */
+                            return err;
+                        }
+                    }
+                }
+                else if( xVSS_context->analyseStep
+                    ==
+                    M4xVSS_kMicroStateAnalyzeMCS ) /* MCS: analyzing input parameters */
+                {
+                    if( xVSS_context->pMCScurrentParams == M4OSA_NULL \
+                        && xVSS_context->pMCSparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pMCScurrentParams = xVSS_context->
+                            pMCSparamsList; /* Current MCS Parameter is the first
+                                            element of the list */
+                    }
+                    else if( xVSS_context->pMCScurrentParams != M4OSA_NULL \
+                        && xVSS_context->pMCSparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pMCScurrentParams =
+                            xVSS_context->pMCScurrentParams->
+                            pNext; /* Current MCS Parameter
+                                   is the next element of the list */
+
+                        if( xVSS_context->pMCScurrentParams == M4OSA_NULL )
+                            /* It means there is no next image to convert */
+                        {
+                            xVSS_context->analyseStep =
+                                M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
+                            xVSS_context->m_state =
+                                M4xVSS_kStateOpened; /* Change xVSS state */
+                            err = M4VSS3GPP_WAR_ANALYZING_DONE;
+                            goto end_step; /* End of Analysis */
+                        }
+                    }
+                    else if( xVSS_context->pMCSparamsList == M4OSA_NULL )
+                    {
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
+                        xVSS_context->m_state =
+                            M4xVSS_kStateOpened; /* Change xVSS state */
+                        err = M4VSS3GPP_WAR_ANALYZING_DONE;
+                        goto end_step;                        /* End of Analysis */
+                    }
+
+                    /* Check if this file has to be transcoded or not */
+                    /* If not, we just return M4NO_ERROR, and go to next file */
+                    if( xVSS_context->pMCScurrentParams->isCreated == M4OSA_FALSE )
+                    {
+                        /* Opening MCS */
+                        err = M4xVSS_internalStartTranscoding(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartTranscoding returned\
+                                 error: 0x%x", err)
+                                           /* TODO ? : Translate error code of MCS to an xVSS error
+                                           code ? */
+                                           return err;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartTranscoding returned\
+                                success; MCS context: 0x%x",
+                                 xVSS_context->pMCS_Ctxt)xVSS_context->analyseStep =
+                                       M4xVSS_kMicroStateTranscodeMCS;
+                    }
+                }
+                else if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateTranscodeMCS )
+                    /* MCS: transcoding file */
+                {
+                    err = M4MCS_step(xVSS_context->pMCS_Ctxt, &uiProgress);
+                    /*SS:blrnxpsw#  234 */
+                    if( err == ((M4OSA_UInt32)M4MCS_ERR_NOMORE_SPACE) )
+                    {
+                        err = M4xVSSERR_NO_MORE_SPACE;
+                    }
+
+                    if( ( err != M4NO_ERROR)
+                        && (err != M4MCS_WAR_TRANSCODING_DONE) )
+                    {
+                        /* TO BE CHECKED NO LEAKS  !!!!! */
+                        M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_step returned 0x%x\n",
+                            err);
+                        /* TODO ? : Translate error code of MCS to an xVSS error code ? */
+                        return err;
+                    }
+                    else if( err == M4MCS_WAR_TRANSCODING_DONE )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                        uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalyzeMCS; /* We go back to
+                                                          analyze parameters to see if there is
+                                                           a next file to transcode */
+                        /* RC !!!!!!!!!*/
+                        xVSS_context->pMCScurrentParams->isCreated =
+                            M4OSA_TRUE; /* To avoid
+                                        reconverting it if another SendCommand is called */
+                        err = M4xVSS_internalStopTranscoding(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step:\
+                                           M4xVSS_internalStopTranscoding returned 0x%x", err);
+                            /* TODO ? : Translate error code of MCS to an xVSS error code ? */
+                            return err;
+                        }
+                    }
+                }
+                else
+                {
+                    M4OSA_TRACE1_0("Bad micro state in analyzing state")
+                        return M4ERR_STATE;
+                }
+            }
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "Bad state when calling M4xVSS_Step function! State is %d",
+                xVSS_context->m_state);
+            return M4ERR_STATE;
+    }
+
+end_step:
+    /* Compute progression */
+    if( xVSS_context->nbStepTotal != 0 )
+    {
+        *pProgress = (M4OSA_UInt8)(( ( xVSS_context->currentStep * 100) \
+            / (xVSS_context->nbStepTotal))
+            + (uiProgress / (xVSS_context->nbStepTotal)));
+
+        if( *pProgress > 100 )
+        {
+            *pProgress = 100;
+        }
+    }
+    else
+    {
+        *pProgress = 100;
+    }
+
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_CloseCommand(M4OSA_Context pContext)
+ * @brief        This function deletes current editing profile, unallocate
+ *                ressources and change xVSS internal state.
+ * @note        After this function, the user can call a new M4xVSS_SendCommand
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CloseCommand( M4OSA_Context pContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check state */
+    /* Depending of the state, differents things have to be done */
+    switch( xVSS_context->m_state )
+    {
+        case M4xVSS_kStateOpened:
+            /* Nothing to do here */
+            err = M4xVSS_internalFreeSaving(xVSS_context);
+            break;
+
+        case M4xVSS_kStateSaving:
+            {
+                if( xVSS_context->editingStep == M4xVSS_kMicroStateEditing )
+                {
+                    err = M4xVSS_internalCloseEditedFile(xVSS_context);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        /* Fix for blrnxpsw#234---->*/
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_CloseCommand:\
+                                       M4xVSS_internalCloseEditedFile returned an error: 0x%x",
+                                        err);
+                        /* we are retaining error here and returning error  in the end of the
+                        function  as to aviod memory leak*/
+                        //return err;
+                    }
+                }
+                else if( xVSS_context->editingStep
+                    == M4xVSS_kMicroStateAudioMixing )
+                {
+                    err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        /* Fix for blrnxpsw#234---->*/
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_CloseCommand: \
+                                M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x", err);
+                        /* we are retaining error here and returning error  in the end of
+                        the function  as to aviod memory leak*/
+                        //return err;
+                        /* <----Fix for blrnxpsw#234*/
+                    }
+                }
+                err = M4xVSS_internalFreeSaving(xVSS_context);
+                /* We free this pointer only if a BGM track is present, because in that case,
+                this pointer owns to us */
+                if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL ) {
+                    /*if(M4OSA_NULL != xVSS_context->pSettings->pOutputFile)
+                    {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+                    xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+                    }*/
+                    /*if(M4OSA_NULL != xVSS_context->pSettings->pTemporaryFile)
+                    {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pTemporaryFile);
+                    xVSS_context->pSettings->pTemporaryFile = M4OSA_NULL;
+                    }*/
+                }
+            }
+            break;
+
+        case M4xVSS_kStateSaved:
+            break;
+
+        case M4xVSS_kStateAnalyzing:
+            {
+                if( xVSS_context->analyseStep == M4xVSS_kMicroStateConvertPto3GPP )
+                {
+                    /* Free Pto3GPP module */
+                    err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
+                    /* Fix for blrnxpsw#234---->*/
+                    if( err != M4NO_ERROR )
+                    {
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_Step: \
+                                       M4xVSS_internalStopConvertPictureTo3gp returned 0x%x", err);
+                        /* we are retaining error here and returning error  in the end of the
+                        function  as to aviod memory leak*/
+                        //return err;
+                    }
+                    /* <-----Fix for blrnxpsw#234>*/
+                }
+                else if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateTranscodeMCS )
+                {
+                    /* Free MCS module */
+                    err = M4MCS_abort(xVSS_context->pMCS_Ctxt);
+                    /* Fix for blrnxpsw#234---->*/
+                    if( err != M4NO_ERROR )
+                    {
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_abort returned 0x%x",
+                            err);
+                        /* we are retaining error here and returning error  in the end of the
+                        function  as to aviod memory leak*/
+                        //return err;
+                    }
+                    /* <---Fix for blrnxpsw#234*/
+                }
+            }
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "Bad state when calling M4xVSS_CloseCommand function! State is %d",
+                xVSS_context->m_state);
+            return M4ERR_STATE;
+    }
+
+    /* Free Send command */
+    M4xVSS_freeCommand(xVSS_context);
+
+    xVSS_context->m_state = M4xVSS_kStateInitialized; /* Change xVSS state */
+
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_CleanUp(M4OSA_Context pContext)
+ * @brief        This function deletes all xVSS ressources
+ * @note        This function must be called after M4xVSS_CloseCommand.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CleanUp( M4OSA_Context pContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_TRACE3_0("M4xVSS_CleanUp:entering");
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateInitialized )
+    {
+        M4OSA_TRACE1_1(\
+            "Bad state when calling M4xVSS_CleanUp function! State is %d",\
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /**
+    * UTF conversion: free temporary buffer*/
+    if( xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+        != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->
+            UTFConversionContext.pTempOutConversionBuffer);
+        xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+            M4OSA_NULL;
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+    xVSS_context->pTempPath = M4OSA_NULL;
+
+    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings);
+    xVSS_context->pSettings = M4OSA_NULL;
+
+    M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+    xVSS_context = M4OSA_NULL;
+    M4OSA_TRACE3_0("M4xVSS_CleanUp:leaving ");
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_RegisterExternalVideoDecoder( M4OSA_Context pContext,
+                                              M4VD_VideoType decoderType,
+                                              M4VD_Interface *pDecoderInterface,
+                                              M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    /* Here the situation is a bit special: we need to record the registrations that are made,
+    so that we can replay them for each clip we create. */
+
+    if( decoderType >= M4VD_kVideoType_NB )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context->registeredExternalDecs[decoderType].pDecoderInterface =
+        pDecoderInterface;
+    xVSS_context->registeredExternalDecs[decoderType].pUserData = pUserData;
+    xVSS_context->registeredExternalDecs[decoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW decoder that may already have been registered for this type;
+    this is normal.*/
+
+    return M4NO_ERROR;
+
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+}
+
+M4OSA_ERR M4xVSS_RegisterExternalVideoEncoder( M4OSA_Context pContext,
+                                              M4VE_EncoderType encoderType,
+                                              M4VE_Interface *pEncoderInterface,
+                                              M4OSA_Void *pUserData )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    /* Here the situation is a bit special: we need to record the registrations that are made,
+    so that we can replay them for each clip we create. */
+
+    if( encoderType >= M4VE_kEncoderType_NB )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context->registeredExternalEncs[encoderType].pEncoderInterface =
+        pEncoderInterface;
+    xVSS_context->registeredExternalEncs[encoderType].pUserData = pUserData;
+    xVSS_context->registeredExternalEncs[encoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW encoder that may already have been registered for this type;
+    this is normal.*/
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_GetVersion(M4_VersionInfo *pVersion)
+ * @brief        This function get the version of the Video Studio 2.1
+ *
+ * @param    pVersion            (IN) Pointer on the version info struct
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_GetVersion( M4_VersionInfo *pVersion )
+{
+    /* Just used for a grep in code */
+    /* CHANGE_VERSION_HERE */
+    static const M4OSA_Char cVersion[26] = "NXPSW_VideoStudio21_1_3_0";
+
+    if( M4OSA_NULL == pVersion )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    pVersion->m_major = M4_xVSS_MAJOR;
+    pVersion->m_minor = M4_xVSS_MINOR;
+    pVersion->m_revision = M4_xVSS_REVISION;
+    pVersion->m_structSize = sizeof(M4_VersionInfo);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_CreateClipSettings()
+ * @brief    Allows filling a clip settings structure with default values
+ *
+ * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ *                   pClipSettings->pFile      will be allocated in this function.
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   pFile               (IN) Clip file name
+ * @param   filePathSize        (IN) Size of the clip path (needed for the UTF16 conversion)
+ * @param    nbEffects           (IN) Nb of effect settings to allocate
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
+                                    M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
+                                     M4OSA_UInt8 nbEffects )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE3_1("M4xVSS_CreateClipSettings called with pClipSettings=0x%p",
+        pClipSettings);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4xVSS_CreateClipSettings: pClipSettings is NULL");
+
+    /* Create inherited VSS3GPP stuff */
+    /*err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile,nbEffects);*/
+    /*FB: add clip path size (needed for UTF 16 conversion)*/
+    err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile, filePathSize,
+        nbEffects);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
+                       ERROR in M4VSS3GPP_editCreateClipSettings = 0x%x", err);
+        return err;
+    }
+
+    /* Set the clip settings to default */
+    pClipSettings->xVSS.uiBeginCutPercent = 0;
+    pClipSettings->xVSS.uiEndCutPercent = 0;
+    pClipSettings->xVSS.uiDuration = 0;
+    pClipSettings->xVSS.isPanZoom = M4OSA_FALSE;
+    pClipSettings->xVSS.PanZoomTopleftXa = 0;
+    pClipSettings->xVSS.PanZoomTopleftYa = 0;
+    pClipSettings->xVSS.PanZoomTopleftXb = 0;
+    pClipSettings->xVSS.PanZoomTopleftYb = 0;
+    pClipSettings->xVSS.PanZoomXa = 0;
+    pClipSettings->xVSS.PanZoomXb = 0;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4xVSS_CreateClipSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_DuplicateClipSettings()
+ * @brief    Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_DuplicateClipSettings( M4VSS3GPP_ClipSettings
+                                       *pClipSettingsDest,
+                                       M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+                                        M4OSA_Bool bCopyEffects )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE3_2(
+        "M4xVSS_DuplicateClipSettings called with dest=0x%p src=0x%p",
+        pClipSettingsDest, pClipSettingsOrig);
+
+    /* Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
+        "M4xVSS_DuplicateClipSettings: pClipSettingsDest is NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
+        "M4xVSS_DuplicateClipSettings: pClipSettingsOrig is NULL");
+
+    /* Call inherited VSS3GPP duplication */
+    err = M4VSS3GPP_editDuplicateClipSettings(pClipSettingsDest,
+        pClipSettingsOrig, bCopyEffects);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
+                       ERROR in M4VSS3GPP_editDuplicateClipSettings = 0x%x", err);
+        return err;
+    }
+
+    /* Return with no error */
+    M4OSA_TRACE3_0("M4xVSS_DuplicateClipSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_FreeClipSettings()
+ * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects, ...).
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_FreeClipSettings( M4VSS3GPP_ClipSettings *pClipSettings )
+{
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4xVSS_FreeClipSettings: pClipSettings is NULL");
+
+    /* Free inherited VSS3GPP stuff */
+    M4VSS3GPP_editFreeClipSettings(pClipSettings);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext)
+ * @brief        This function returns the MCS context within the xVSS internal context
+ * @note        This function must be called only after VSS state has moved to analyzing state or
+ * beyond
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    mcsContext        (OUT) Pointer to pointer of mcs context to return
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_getMCSContext( M4OSA_Context pContext,
+                               M4OSA_Context *mcsContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4xVSS_getMCSContext: pContext is NULL");
+
+    if( xVSS_context->m_state == M4xVSS_kStateInitialized )
+    {
+        M4OSA_TRACE1_1("M4xVSS_getMCSContext: Bad state! State is %d",\
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    *mcsContext = xVSS_context->pMCS_Ctxt;
+
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext,
+ *                                                   M4OSA_Context* mcsContext)
+ * @brief        This function returns the VSS3GPP context within the xVSS internal context
+ * @note        This function must be called only after VSS state has moved to Generating preview
+ *              or beyond
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    vss3gppContext        (OUT) Pointer to pointer of vss3gpp context to return
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_getVSS3GPPContext( M4OSA_Context pContext,
+                                   M4OSA_Context *vss3gppContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4xVSS_getVSS3GPPContext: pContext is NULL");
+
+    if( xVSS_context->m_state < M4xVSS_kStateSaving )
+    {
+        M4OSA_TRACE1_1("M4xVSS_getVSS3GPPContext: Bad state! State is %d",\
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    *vss3gppContext = xVSS_context->pCurrentEditContext;
+
+    return err;
+}
diff --git a/libvideoeditor/vss/src/M4xVSS_internal.c b/libvideoeditor/vss/src/M4xVSS_internal.c
new file mode 100755
index 0000000..62107aa
--- /dev/null
+++ b/libvideoeditor/vss/src/M4xVSS_internal.c
@@ -0,0 +1,5047 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4xVSS_internal.c
+ * @brief    Internal functions of extended Video Studio Service (Video Studio 2.1)
+ * @note
+ ******************************************************************************
+ */
+#include "M4OSA_Debug.h"
+#include "M4OSA_CharStar.h"
+#include "M4OSA_FileExtra.h"
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+#include "M4xVSS_API.h"
+#include "M4xVSS_Internal.h"
+
+/*for rgb16 color effect*/
+#include "M4VIFI_Defines.h"
+#include "M4VIFI_Clip.h"
+
+/**
+ * component includes */
+#include "M4VFL_transition.h"            /**< video effects */
+
+/* Internal header file of VSS is included because of MMS use case */
+#include "M4VSS3GPP_InternalTypes.h"
+
+/*Exif header files to add image rendering support (cropping, black borders)*/
+#include "M4EXIFC_CommonAPI.h"
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+
+#define TRANSPARENT_COLOR 0x7E0
+
+/* Prototype of M4VIFI_xVSS_RGB565toYUV420 function (avoid green effect of transparency color) */
+M4VIFI_UInt8 M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+                                        M4VIFI_ImagePlane *pPlaneOut);
+
+
+/*special MCS function used only in VideoArtist and VideoStudio to open the media in the normal
+ mode. That way the media duration is accurate*/
+extern M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+                                         M4VIDEOEDITING_FileType InputFileType,
+                                         M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext)
+ * @brief        This function initializes MCS (3GP transcoder) with the given
+ *                parameters
+ * @note        The transcoding parameters are given by the internal xVSS context.
+ *                This context contains a pointer on the current element of the
+ *                chained list of MCS parameters.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4MCS_Context mcs_context;
+    M4MCS_OutputParams Params;
+    M4MCS_EncodingParams Rates;
+    M4OSA_UInt32 i;
+
+    err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_init: 0x%x", err);
+        return err;
+    }
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    /* replay recorded external decoder registrations on the MCS */
+    for (i=0; i<M4VD_kVideoType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalDecs[i].registered)
+        {
+            err = M4MCS_registerExternalVideoDecoder(mcs_context, i,
+                    xVSS_context->registeredExternalDecs[i].pDecoderInterface,
+                    xVSS_context->registeredExternalDecs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalStartTranscoding:\
+                     M4MCS_registerExternalVideoDecoder() returns 0x%x!", err);
+                M4MCS_abort(mcs_context);
+                return err;
+            }
+        }
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    /* replay recorded external encoder registrations on the MCS */
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalEncs[i].registered)
+        {
+            err = M4MCS_registerExternalVideoEncoder(mcs_context, i,
+                    xVSS_context->registeredExternalEncs[i].pEncoderInterface,
+                    xVSS_context->registeredExternalEncs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalStartTranscoding:\
+                     M4MCS_registerExternalVideoEncoder() returns 0x%x!", err);
+                M4MCS_abort(mcs_context);
+                return err;
+            }
+        }
+    }
+
+    err = M4MCS_open(mcs_context, xVSS_context->pMCScurrentParams->pFileIn,
+         xVSS_context->pMCScurrentParams->InputFileType,
+             xVSS_context->pMCScurrentParams->pFileOut,
+             xVSS_context->pMCScurrentParams->pFileTemp);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_open: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    /**
+     * Fill MCS parameters with the parameters contained in the current element of the
+       MCS parameters chained list */
+    Params.OutputFileType = xVSS_context->pMCScurrentParams->OutputFileType;
+    Params.OutputVideoFormat = xVSS_context->pMCScurrentParams->OutputVideoFormat;
+    Params.OutputVideoFrameSize = xVSS_context->pMCScurrentParams->OutputVideoFrameSize;
+    Params.OutputVideoFrameRate = xVSS_context->pMCScurrentParams->OutputVideoFrameRate;
+    Params.OutputAudioFormat = xVSS_context->pMCScurrentParams->OutputAudioFormat;
+    Params.OutputAudioSamplingFrequency =
+         xVSS_context->pMCScurrentParams->OutputAudioSamplingFrequency;
+    Params.bAudioMono = xVSS_context->pMCScurrentParams->bAudioMono;
+    Params.pOutputPCMfile = M4OSA_NULL;
+    /*FB 2008/10/20: add media rendering parameter to keep aspect ratio*/
+    switch(xVSS_context->pMCScurrentParams->MediaRendering)
+    {
+    case M4xVSS_kResizing:
+        Params.MediaRendering = M4MCS_kResizing;
+        break;
+    case M4xVSS_kCropping:
+        Params.MediaRendering = M4MCS_kCropping;
+        break;
+    case M4xVSS_kBlackBorders:
+        Params.MediaRendering = M4MCS_kBlackBorders;
+        break;
+    default:
+        break;
+    }
+    /**/
+#ifdef TIMESCALE_BUG
+    Params.OutputVideoTimescale = xVSS_context->pMCScurrentParams->OutputVideoTimescale;
+#endif
+    // new params after integrating MCS 2.0
+    // Set the number of audio effects; 0 for now.
+    Params.nbEffects = 0;
+
+    // Set the audio effect; null for now.
+    Params.pEffects = NULL;
+
+    // Set the audio effect; null for now.
+    Params.bDiscardExif = M4OSA_FALSE;
+
+    // Set the audio effect; null for now.
+    Params.bAdjustOrientation = M4OSA_FALSE;
+    // new params after integrating MCS 2.0
+
+    /**
+     * Set output parameters */
+    err = M4MCS_setOutputParams(mcs_context, &Params);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_setOutputParams: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    Rates.OutputVideoBitrate = xVSS_context->pMCScurrentParams->OutputVideoBitrate;
+    Rates.OutputAudioBitrate = xVSS_context->pMCScurrentParams->OutputAudioBitrate;
+    Rates.BeginCutTime = 0;
+    Rates.EndCutTime = 0;
+    Rates.OutputFileSize = 0;
+
+    /*FB: transcoding per parts*/
+    Rates.BeginCutTime = xVSS_context->pMCScurrentParams->BeginCutTime;
+    Rates.EndCutTime = xVSS_context->pMCScurrentParams->EndCutTime;
+    Rates.OutputVideoTimescale = xVSS_context->pMCScurrentParams->OutputVideoTimescale;
+
+    err = M4MCS_setEncodingParams(mcs_context, &Rates);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_setEncodingParams: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    err = M4MCS_checkParamsAndStart(mcs_context);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_checkParamsAndStart: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    /**
+     * Save MCS context to be able to call MCS step function in M4xVSS_step function */
+    xVSS_context->pMCS_Ctxt = mcs_context;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
+ * @brief        This function cleans up MCS (3GP transcoder)
+ * @note
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    err = M4MCS_close(xVSS_context->pMCS_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_close: 0x%x", err);
+        M4MCS_abort(xVSS_context->pMCS_Ctxt);
+        return err;
+    }
+
+    /**
+     * Free this MCS instance */
+    err = M4MCS_cleanUp(xVSS_context->pMCS_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_cleanUp: 0x%x", err);
+        return err;
+    }
+
+    xVSS_context->pMCS_Ctxt = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+ *                                             M4OSA_FileReadPointer* pFileReadPtr,
+ *                                                M4VIFI_ImagePlane* pImagePlanes,
+ *                                                 M4OSA_UInt32 width,
+ *                                                M4OSA_UInt32 height);
+ * @brief    It Coverts and resizes a ARGB8888 image to YUV420
+ * @note
+ * @param    pFileIn            (IN) The Image input file
+ * @param    pFileReadPtr    (IN) Pointer on filesystem functions
+ * @param    pImagePlanes    (IN/OUT) Pointer on YUV420 output planes allocated by the user
+ *                            ARGB8888 image  will be converted and resized  to output
+ *                             YUV420 plane size
+ *@param    width        (IN) width of the ARGB8888
+ *@param    height            (IN) height of the ARGB8888
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_ALLOC: memory error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+                                                          M4OSA_FileReadPointer* pFileReadPtr,
+                                                          M4VIFI_ImagePlane* pImagePlanes,
+                                                          M4OSA_UInt32 width,M4OSA_UInt32 height)
+{
+    M4OSA_Context pARGBIn;
+    M4VIFI_ImagePlane rgbPlane1 ,rgbPlane2;
+    M4OSA_UInt32 frameSize_argb=(width * height * 4);
+    M4OSA_UInt32 frameSize = (width * height * 3); //Size of RGB888 data.
+    M4OSA_UInt32 i = 0,j= 0;
+    M4OSA_ERR err=M4NO_ERROR;
+
+
+    M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_malloc(frameSize_argb,
+         M4VS, (M4OSA_Char*)"Image argb data");
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Entering :");
+    if(pTmpData == M4OSA_NULL) {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+            Failed to allocate memory for Image clip");
+        return M4ERR_ALLOC;
+    }
+
+    M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :width and height %d %d",
+        width ,height);
+    /* Get file size (mandatory for chunk decoding) */
+    err = pFileReadPtr->openRead(&pARGBIn, pFileIn, M4OSA_kFileRead);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+            Can't open input ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        goto cleanup;
+    }
+
+    err = pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888\
+             file %s, error: 0x%x\n",pFileIn, err);
+        pFileReadPtr->closeRead(pARGBIn);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        goto cleanup;
+    }
+
+    err = pFileReadPtr->closeRead(pARGBIn);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888 \
+             file %s, error: 0x%x\n",pFileIn, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        goto cleanup;
+    }
+
+    rgbPlane1.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(frameSize, M4VS,
+         (M4OSA_Char*)"Image clip RGB888 data");
+    if(rgbPlane1.pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 \
+            Failed to allocate memory for Image clip");
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        return M4ERR_ALLOC;
+    }
+
+        rgbPlane1.u_height = height;
+        rgbPlane1.u_width = width;
+        rgbPlane1.u_stride = width*3;
+        rgbPlane1.u_topleft = 0;
+
+
+    /** Remove the alpha channel */
+    for (i=0, j = 0; i < frameSize_argb; i++) {
+        if ((i % 4) == 0) continue;
+        rgbPlane1.pac_data[j] = pTmpData[i];
+        j++;
+    }
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+
+    /* To Check if resizing is required with color conversion */
+    if(width != pImagePlanes->u_width || height != pImagePlanes->u_height)
+    {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Resizing :");
+        frameSize =  ( pImagePlanes->u_width * pImagePlanes->u_height * 3);
+        rgbPlane2.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(frameSize, M4VS,
+             (M4OSA_Char*)"Image clip RGB888 data");
+        if(rgbPlane2.pac_data == M4OSA_NULL)
+        {
+            M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+            M4OSA_free((M4OSA_MemAddr32)pTmpData);
+            return M4ERR_ALLOC;
+        }
+            rgbPlane2.u_height =  pImagePlanes->u_height;
+            rgbPlane2.u_width = pImagePlanes->u_width;
+            rgbPlane2.u_stride = pImagePlanes->u_width*3;
+            rgbPlane2.u_topleft = 0;
+
+        /* Resizing RGB888 to RGB888 */
+        err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane1, &rgbPlane2);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("error when converting from Resize RGB888 to RGB888: 0x%x\n", err);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane2.pac_data);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+            return err;
+        }
+        /*Converting Resized RGB888 to YUV420 */
+        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane2, pImagePlanes);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("error when converting from RGB888 to YUV: 0x%x\n", err);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane2.pac_data);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+            return err;
+        }
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane2.pac_data);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+
+            M4OSA_TRACE1_0("RGB to YUV done");
+
+
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 NO  Resizing :");
+        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane1, pImagePlanes);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("error when converting from RGB to YUV: 0x%x\n", err);
+        }
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+
+            M4OSA_TRACE1_0("RGB to YUV done");
+    }
+cleanup:
+    M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 leaving :");
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+ *                                             M4OSA_FileReadPointer* pFileReadPtr,
+ *                                                M4VIFI_ImagePlane* pImagePlanes,
+ *                                                 M4OSA_UInt32 width,
+ *                                                M4OSA_UInt32 height);
+ * @brief    It Coverts a ARGB8888 image to YUV420
+ * @note
+ * @param    pFileIn            (IN) The Image input file
+ * @param    pFileReadPtr    (IN) Pointer on filesystem functions
+ * @param    pImagePlanes    (IN/OUT) Pointer on YUV420 output planes allocated by the user
+ *                            ARGB8888 image  will be converted and resized  to output
+ *                            YUV420 plane size
+ * @param    width        (IN) width of the ARGB8888
+ * @param    height            (IN) height of the ARGB8888
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_ALLOC: memory error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+                                                 M4OSA_FileReadPointer* pFileReadPtr,
+                                                 M4VIFI_ImagePlane** pImagePlanes,
+                                                 M4OSA_UInt32 width,M4OSA_UInt32 height)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VIFI_ImagePlane *yuvPlane = M4OSA_NULL;
+
+    yuvPlane = (M4VIFI_ImagePlane*)M4OSA_malloc(3*sizeof(M4VIFI_ImagePlane),
+                M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+    if(yuvPlane == M4OSA_NULL) {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+            Failed to allocate memory for Image clip");
+        return M4ERR_ALLOC;
+    }
+    yuvPlane[0].u_height = height;
+    yuvPlane[0].u_width = width;
+    yuvPlane[0].u_stride = width;
+    yuvPlane[0].u_topleft = 0;
+    yuvPlane[0].pac_data = (M4VIFI_UInt8*)M4OSA_malloc(yuvPlane[0].u_height \
+        * yuvPlane[0].u_width * 1.5, M4VS, (M4OSA_Char*)"imageClip YUV data");
+
+    yuvPlane[1].u_height = yuvPlane[0].u_height >>1;
+    yuvPlane[1].u_width = yuvPlane[0].u_width >> 1;
+    yuvPlane[1].u_stride = yuvPlane[1].u_width;
+    yuvPlane[1].u_topleft = 0;
+    yuvPlane[1].pac_data = (M4VIFI_UInt8*)(yuvPlane[0].pac_data + yuvPlane[0].u_height \
+        * yuvPlane[0].u_width);
+
+    yuvPlane[2].u_height = yuvPlane[0].u_height >>1;
+    yuvPlane[2].u_width = yuvPlane[0].u_width >> 1;
+    yuvPlane[2].u_stride = yuvPlane[2].u_width;
+    yuvPlane[2].u_topleft = 0;
+    yuvPlane[2].pac_data = (M4VIFI_UInt8*)(yuvPlane[1].pac_data + yuvPlane[1].u_height \
+        * yuvPlane[1].u_width);
+    err = M4xVSS_internalConvertAndResizeARGB8888toYUV420( pFileIn,pFileReadPtr,
+                                                          yuvPlane, width, height);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalConvertAndResizeARGB8888toYUV420 return error: 0x%x\n", err);
+        M4OSA_free((M4OSA_MemAddr32)yuvPlane);
+        return err;
+    }
+
+        *pImagePlanes = yuvPlane;
+
+    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB8888toYUV420 :Leaving");
+    return err;
+
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_PictureCallbackFct (M4OSA_Void* pPictureCtxt,
+ *                                        M4VIFI_ImagePlane* pImagePlanes,
+ *                                        M4OSA_UInt32* pPictureDuration);
+ * @brief    It feeds the PTO3GPP with YUV420 pictures.
+ * @note    This function is given to the PTO3GPP in the M4PTO3GPP_Params structure
+ * @param    pContext    (IN) The integrator own context
+ * @param    pImagePlanes(IN/OUT) Pointer to an array of three valid image planes
+ * @param    pPictureDuration(OUT) Duration of the returned picture
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_PictureCallbackFct(M4OSA_Void* pPictureCtxt, M4VIFI_ImagePlane* pImagePlanes,
+                                     M4OSA_Double* pPictureDuration)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt8    last_frame_flag = 0;
+    M4xVSS_PictureCallbackCtxt* pC = (M4xVSS_PictureCallbackCtxt*) (pPictureCtxt);
+
+    /*Used for pan&zoom*/
+    M4OSA_UInt8 tempPanzoomXa = 0;
+    M4OSA_UInt8 tempPanzoomXb = 0;
+    M4AIR_Params Params;
+    /**/
+
+    /*Used for cropping and black borders*/
+    M4OSA_Context    pPictureContext = M4OSA_NULL;
+    M4OSA_FilePosition    pictureSize = 0 ;
+    M4OSA_UInt8*    pictureBuffer = M4OSA_NULL;
+    //M4EXIFC_Context pExifContext = M4OSA_NULL;
+    M4EXIFC_BasicTags pBasicTags;
+    M4VIFI_ImagePlane pImagePlanes1 = pImagePlanes[0];
+    M4VIFI_ImagePlane pImagePlanes2 = pImagePlanes[1];
+    M4VIFI_ImagePlane pImagePlanes3 = pImagePlanes[2];
+    /**/
+
+    /**
+     * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureCtxt),        M4ERR_PARAMETER,
+         "M4xVSS_PictureCallbackFct: pPictureCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pImagePlanes),        M4ERR_PARAMETER,
+         "M4xVSS_PictureCallbackFct: pImagePlanes is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureDuration), M4ERR_PARAMETER,
+         "M4xVSS_PictureCallbackFct: pPictureDuration is M4OSA_NULL");
+    M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct :Entering");
+    /*PR P4ME00003181 In case the image number is 0, pan&zoom can not be used*/
+    if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom && pC->m_NbImage == 0)
+    {
+        pC->m_pPto3GPPparams->isPanZoom = M4OSA_FALSE;
+    }
+
+    /*If no cropping/black borders or pan&zoom, just decode and resize the picture*/
+    if(pC->m_mediaRendering == M4xVSS_kResizing && M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+    {
+        /**
+         * Convert and resize input ARGB8888 file to YUV420 */
+        /*To support ARGB8888 : */
+        M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 1: width and heght %d %d",
+            pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+        err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(pC->m_FileIn,
+             pC->m_pFileReadPtr, pImagePlanes,pC->m_pPto3GPPparams->width,
+                pC->m_pPto3GPPparams->height);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
+            return err;
+        }
+    }
+    /*In case of cropping, black borders or pan&zoom, call the EXIF reader and the AIR*/
+    else
+    {
+        /**
+         * Computes ratios */
+        if(pC->m_pDecodedPlane == M4OSA_NULL)
+        {
+            /**
+             * Convert input ARGB8888 file to YUV420 */
+             M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 2: width and heght %d %d",
+                pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+            err = M4xVSS_internalConvertARGB8888toYUV420(pC->m_FileIn, pC->m_pFileReadPtr,
+                &(pC->m_pDecodedPlane),pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
+                if(pC->m_pDecodedPlane != M4OSA_NULL)
+                {
+                    /* YUV420 planar is returned but allocation is made only once
+                        (contigous planes in memory) */
+                    if(pC->m_pDecodedPlane->pac_data != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane->pac_data);
+                    }
+                    M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+                    pC->m_pDecodedPlane = M4OSA_NULL;
+                }
+                return err;
+            }
+        }
+
+        /*Initialize AIR Params*/
+        Params.m_inputCoord.m_x = 0;
+        Params.m_inputCoord.m_y = 0;
+        Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+        Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+        Params.m_outputSize.m_width = pImagePlanes->u_width;
+        Params.m_outputSize.m_height = pImagePlanes->u_height;
+        Params.m_bOutputStripe = M4OSA_FALSE;
+        Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+
+        /*Initialize Exif params structure*/
+        pBasicTags.orientation = M4COMMON_kOrientationUnknown;
+
+        /**
+        Pan&zoom params*/
+        if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom)
+        {
+            /*Save ratio values, they can be reused if the new ratios are 0*/
+            tempPanzoomXa = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXa;
+            tempPanzoomXb = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXb;
+#if 0
+            /**
+             * Check size of output JPEG is compatible with pan & zoom parameters
+               First, check final (b) parameters */
+            if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftXb > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad final Pan & Zoom settings !!!\
+                    New final Zoom ratio is: %d", (100 - pC->m_pPto3GPPparams->PanZoomTopleftXb));
+                /* We do not change the topleft parameter as it may correspond to a precise area
+                of the picture -> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXb;
+            }
+
+            if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftYb > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad final Pan & Zoom settings \
+                    !!! New final Zoom ratio is: %d",
+                    (100 - pC->m_pPto3GPPparams->PanZoomTopleftYb));
+                /* We do not change the topleft parameter as it may correspond to a
+                precise area of the picture -> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYb;
+            }
+
+            /**
+             * Then, check initial (a) parameters */
+            if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftXa > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad initial Pan & Zoom settings !!! \
+                    New initial Zoom ratio is: %d",(100 - pC->m_pPto3GPPparams->PanZoomTopleftXa));
+                /* We do not change the topleft parameter as it may correspond to a precise
+                area of the picture-> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXa;
+            }
+
+            if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftYa > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad initial Pan & Zoom settings !!! New initial\
+                     Zoom ratio is: %d", (100 - pC->m_pPto3GPPparams->PanZoomTopleftYa));
+                /* We do not change the topleft parameter as it may correspond to a precise
+                area of the picture-> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYa;
+            }
+#endif
+            /*Check that the ratio is not 0*/
+            /*Check (a) parameters*/
+            if(pC->m_pPto3GPPparams->PanZoomXa == 0)
+            {
+                M4OSA_UInt8 maxRatio = 0;
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXa >=
+                     pC->m_pPto3GPPparams->PanZoomTopleftYa)
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (a)
+                    parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa - 100;
+                    }
+                }
+                else
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (a)
+                     parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa - 100;
+                    }
+                }
+                /*Modify the (a) parameters:*/
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXa >= maxRatio)
+                {
+                    /*The (a) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXa -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (a) topleft parameter to 0 but the ratio will be also further
+                    modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXa = 0;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomTopleftYa >= maxRatio)
+                {
+                    /*The (a) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYa -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (a) topleft parameter to 0 but the ratio will be also further
+                     modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYa = 0;
+                }
+                /*The new ratio is the original one*/
+                pC->m_pPto3GPPparams->PanZoomXa = tempPanzoomXa;
+                if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftXa > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (a) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXa;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftYa > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (a) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYa;
+                }
+            }
+            /*Check (b) parameters*/
+            if(pC->m_pPto3GPPparams->PanZoomXb == 0)
+            {
+                M4OSA_UInt8 maxRatio = 0;
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXb >=
+                     pC->m_pPto3GPPparams->PanZoomTopleftYb)
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (b)
+                     parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb - 100;
+                    }
+                }
+                else
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (b)
+                     parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb - 100;
+                    }
+                }
+                /*Modify the (b) parameters:*/
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXb >= maxRatio)
+                {
+                    /*The (b) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXb -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (b) topleft parameter to 0 but the ratio will be also further
+                     modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXb = 0;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomTopleftYb >= maxRatio)
+                {
+                    /*The (b) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYb -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (b) topleft parameter to 0 but the ratio will be also further
+                    modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYb = 0;
+                }
+                /*The new ratio is the original one*/
+                pC->m_pPto3GPPparams->PanZoomXb = tempPanzoomXb;
+                if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftXb > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (b) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXb;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftYb > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (b) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYb;
+                }
+            }
+
+            /**
+             * Computes AIR parameters */
+/*        Params.m_inputCoord.m_x = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
+            (pC->m_pPto3GPPparams->PanZoomTopleftXa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftXb \
+                - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+        Params.m_inputCoord.m_y = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
+            (pC->m_pPto3GPPparams->PanZoomTopleftYa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftYb\
+                 - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+
+        Params.m_inputSize.m_width = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
+            (pC->m_pPto3GPPparams->PanZoomXa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+
+        Params.m_inputSize.m_height =  (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
+            (pC->m_pPto3GPPparams->PanZoomXa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+ */
+            Params.m_inputCoord.m_x = (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
+                (pC->m_pPto3GPPparams->PanZoomTopleftXa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftXb\
+                     - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+            Params.m_inputCoord.m_y =
+                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
+                (pC->m_pPto3GPPparams->PanZoomTopleftYa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftYb\
+                     - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+
+            Params.m_inputSize.m_width =
+                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
+                (pC->m_pPto3GPPparams->PanZoomXa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb\
+                     - pC->m_pPto3GPPparams->PanZoomXa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+
+            Params.m_inputSize.m_height =
+                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
+                (pC->m_pPto3GPPparams->PanZoomXa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb \
+                    - pC->m_pPto3GPPparams->PanZoomXa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+
+
+            if((Params.m_inputSize.m_width + Params.m_inputCoord.m_x)\
+                 > pC->m_pDecodedPlane->u_width)
+            {
+                Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width \
+                    - Params.m_inputCoord.m_x;
+            }
+
+            if((Params.m_inputSize.m_height + Params.m_inputCoord.m_y)\
+                 > pC->m_pDecodedPlane->u_height)
+            {
+                Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height\
+                     - Params.m_inputCoord.m_y;
+            }
+
+
+
+            Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+            Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+        }
+
+
+
+    /**
+        Picture rendering: Black borders*/
+
+        if(pC->m_mediaRendering == M4xVSS_kBlackBorders)
+        {
+            M4OSA_memset((M4OSA_MemAddr8)pImagePlanes[0].pac_data,
+                (pImagePlanes[0].u_height*pImagePlanes[0].u_stride),Y_PLANE_BORDER_VALUE);
+            M4OSA_memset((M4OSA_MemAddr8)pImagePlanes[1].pac_data,
+                (pImagePlanes[1].u_height*pImagePlanes[1].u_stride),U_PLANE_BORDER_VALUE);
+            M4OSA_memset((M4OSA_MemAddr8)pImagePlanes[2].pac_data,
+                (pImagePlanes[2].u_height*pImagePlanes[2].u_stride),V_PLANE_BORDER_VALUE);
+
+            /**
+            First without pan&zoom*/
+            if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+            {
+                switch(pBasicTags.orientation)
+                {
+                default:
+                case M4COMMON_kOrientationUnknown:
+                    Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+                case M4COMMON_kOrientationTopLeft:
+                case M4COMMON_kOrientationTopRight:
+                case M4COMMON_kOrientationBottomRight:
+                case M4COMMON_kOrientationBottomLeft:
+                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
+                         /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
+                         //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+                    {
+                        /*it is height so black borders will be on the top and on the bottom side*/
+                        Params.m_outputSize.m_width = pImagePlanes->u_width;
+                        Params.m_outputSize.m_height =
+                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height \
+                                * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
+                        /*number of lines at the top*/
+                        pImagePlanes[0].u_topleft =
+                            (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
+                        pImagePlanes[0].u_height = Params.m_outputSize.m_height;
+                        pImagePlanes[1].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[1].u_stride;
+                        pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
+                        pImagePlanes[2].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[2].u_stride;
+                        pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
+                    }
+                    else
+                    {
+                        /*it is width so black borders will be on the left and right side*/
+                        Params.m_outputSize.m_height = pImagePlanes->u_height;
+                        Params.m_outputSize.m_width =
+                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+                                * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
+
+                        pImagePlanes[0].u_topleft =
+                            (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                -Params.m_outputSize.m_width)>>1));
+                        pImagePlanes[0].u_width = Params.m_outputSize.m_width;
+                        pImagePlanes[1].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                -(Params.m_outputSize.m_width>>1)))>>1);
+                        pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
+                        pImagePlanes[2].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                -(Params.m_outputSize.m_width>>1)))>>1);
+                        pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
+                    }
+                    break;
+                case M4COMMON_kOrientationLeftTop:
+                case M4COMMON_kOrientationLeftBottom:
+                case M4COMMON_kOrientationRightTop:
+                case M4COMMON_kOrientationRightBottom:
+                        if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+                             /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
+                             //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
+                        {
+                            /*it is height so black borders will be on the top and on
+                             the bottom side*/
+                            Params.m_outputSize.m_height = pImagePlanes->u_width;
+                            Params.m_outputSize.m_width =
+                                 (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+                                    * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_height);
+                            /*number of lines at the top*/
+                            pImagePlanes[0].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                    -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
+                            pImagePlanes[0].u_height = Params.m_outputSize.m_width;
+                            pImagePlanes[1].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                    -(Params.m_outputSize.m_width>>1)))>>1)\
+                                        *pImagePlanes[1].u_stride)+1;
+                            pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
+                            pImagePlanes[2].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                    -(Params.m_outputSize.m_width>>1)))>>1)\
+                                        *pImagePlanes[2].u_stride)+1;
+                            pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
+                        }
+                        else
+                        {
+                            /*it is width so black borders will be on the left and right side*/
+                            Params.m_outputSize.m_width = pImagePlanes->u_height;
+                            Params.m_outputSize.m_height =
+                                 (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
+                                     * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_width);
+
+                            pImagePlanes[0].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                    -Params.m_outputSize.m_height))>>1))+1;
+                            pImagePlanes[0].u_width = Params.m_outputSize.m_height;
+                            pImagePlanes[1].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
+                            pImagePlanes[2].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
+                        }
+                    break;
+                }
+            }
+
+            /**
+            Secondly with pan&zoom*/
+            else
+            {
+                switch(pBasicTags.orientation)
+                {
+                default:
+                case M4COMMON_kOrientationUnknown:
+                    Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+                case M4COMMON_kOrientationTopLeft:
+                case M4COMMON_kOrientationTopRight:
+                case M4COMMON_kOrientationBottomRight:
+                case M4COMMON_kOrientationBottomLeft:
+                    /*NO ROTATION*/
+                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
+                         /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
+                            //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+                    {
+                        /*Black borders will be on the top and bottom of the output video*/
+                        /*Maximum output height if the input image aspect ratio is kept and if
+                        the output width is the screen width*/
+                        M4OSA_UInt32 tempOutputSizeHeight =
+                            (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
+                                 * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
+                        M4OSA_UInt32 tempInputSizeHeightMax = 0;
+                        M4OSA_UInt32 tempFinalInputHeight = 0;
+                        /*The output width is the screen width*/
+                        Params.m_outputSize.m_width = pImagePlanes->u_width;
+                        tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
+
+                        /*Maximum input height according to the maximum output height
+                        (proportional to the maximum output height)*/
+                        tempInputSizeHeightMax = (pImagePlanes->u_height\
+                            *Params.m_inputSize.m_height)/tempOutputSizeHeight;
+                        tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
+
+                        /*Check if the maximum possible input height is contained into the
+                        input image height*/
+                        if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_height)
+                        {
+                            /*The maximum possible input height is contained in the input
+                            image height,
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR height will be the maximum possible*/
+                            if(((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
+                                 <= Params.m_inputCoord.m_y
+                                && ((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
+                                     <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y\
+                                         + Params.m_inputSize.m_height))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on the
+                                top and bottom side*/
+                                Params.m_inputCoord.m_y -= ((tempInputSizeHeightMax \
+                                    - Params.m_inputSize.m_height)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
+                                -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
+                            {
+                                /*There is not enough place above the input pan zoom area to
+                                extend it symmetrically,
+                                so extend it to the maximum on the top*/
+                                Params.m_inputCoord.m_y = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place below the input pan zoom area to
+                                extend it symmetrically,
+                                so extend it to the maximum on the bottom*/
+                                Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height \
+                                    - tempInputSizeHeightMax;
+                            }
+                            /*The input height of the AIR is the maximum possible height*/
+                            Params.m_inputSize.m_height = tempInputSizeHeightMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input height is greater than the input
+                            image height,
+                            that means black borders are necessary to keep aspect ratio
+                            The input height of the AIR is all the input image height*/
+                            Params.m_outputSize.m_height =
+                                (tempOutputSizeHeight*pC->m_pDecodedPlane->u_height)\
+                                    /Params.m_inputSize.m_height;
+                            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+                            Params.m_inputCoord.m_y = 0;
+                            Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+                            pImagePlanes[0].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                    -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
+                            pImagePlanes[0].u_height = Params.m_outputSize.m_height;
+                            pImagePlanes[1].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                    -(Params.m_outputSize.m_height>>1)))>>1)\
+                                        *pImagePlanes[1].u_stride);
+                            pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
+                            pImagePlanes[2].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                    -(Params.m_outputSize.m_height>>1)))>>1)\
+                                        *pImagePlanes[2].u_stride);
+                            pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
+                        }
+                    }
+                    else
+                    {
+                        /*Black borders will be on the left and right side of the output video*/
+                        /*Maximum output width if the input image aspect ratio is kept and if the
+                         output height is the screen height*/
+                        M4OSA_UInt32 tempOutputSizeWidth =
+                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+                                * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
+                        M4OSA_UInt32 tempInputSizeWidthMax = 0;
+                        M4OSA_UInt32 tempFinalInputWidth = 0;
+                        /*The output height is the screen height*/
+                        Params.m_outputSize.m_height = pImagePlanes->u_height;
+                        tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
+
+                        /*Maximum input width according to the maximum output width
+                        (proportional to the maximum output width)*/
+                        tempInputSizeWidthMax =
+                             (pImagePlanes->u_width*Params.m_inputSize.m_width)\
+                                /tempOutputSizeWidth;
+                        tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
+
+                        /*Check if the maximum possible input width is contained into the input
+                         image width*/
+                        if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_width)
+                        {
+                            /*The maximum possible input width is contained in the input
+                            image width,
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR width will be the maximum possible*/
+                            if(((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1) \
+                                <= Params.m_inputCoord.m_x
+                                && ((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1)\
+                                     <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
+                                        + Params.m_inputSize.m_width))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on the
+                                     right and left side*/
+                                Params.m_inputCoord.m_x -= ((tempInputSizeWidthMax\
+                                     - Params.m_inputSize.m_width)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
+                                -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
+                            {
+                                /*There is not enough place above the input pan zoom area to
+                                    extend it symmetrically,
+                                so extend it to the maximum on the left*/
+                                Params.m_inputCoord.m_x = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place below the input pan zoom area
+                                    to extend it symmetrically,
+                                so extend it to the maximum on the right*/
+                                Params.m_inputCoord.m_x = pC->m_pDecodedPlane->u_width \
+                                    - tempInputSizeWidthMax;
+                            }
+                            /*The input width of the AIR is the maximum possible width*/
+                            Params.m_inputSize.m_width = tempInputSizeWidthMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input width is greater than the input
+                            image width,
+                            that means black borders are necessary to keep aspect ratio
+                            The input width of the AIR is all the input image width*/
+                            Params.m_outputSize.m_width =\
+                                 (tempOutputSizeWidth*pC->m_pDecodedPlane->u_width)\
+                                    /Params.m_inputSize.m_width;
+                            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+                            Params.m_inputCoord.m_x = 0;
+                            Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+                            pImagePlanes[0].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                    -Params.m_outputSize.m_width)>>1));
+                            pImagePlanes[0].u_width = Params.m_outputSize.m_width;
+                            pImagePlanes[1].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                    -(Params.m_outputSize.m_width>>1)))>>1);
+                            pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
+                            pImagePlanes[2].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                    -(Params.m_outputSize.m_width>>1)))>>1);
+                            pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
+                        }
+                    }
+                    break;
+                case M4COMMON_kOrientationLeftTop:
+                case M4COMMON_kOrientationLeftBottom:
+                case M4COMMON_kOrientationRightTop:
+                case M4COMMON_kOrientationRightBottom:
+                    /*ROTATION*/
+                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+                         /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
+                         //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
+                    {
+                        /*Black borders will be on the left and right side of the output video*/
+                        /*Maximum output height if the input image aspect ratio is kept and if
+                        the output height is the screen width*/
+                        M4OSA_UInt32 tempOutputSizeHeight =
+                        (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+                             /pC->m_pDecodedPlane->u_height);
+                        M4OSA_UInt32 tempInputSizeHeightMax = 0;
+                        M4OSA_UInt32 tempFinalInputHeight = 0;
+                        /*The output width is the screen height*/
+                        Params.m_outputSize.m_height = pImagePlanes->u_width;
+                        Params.m_outputSize.m_width= pImagePlanes->u_height;
+                        tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
+
+                        /*Maximum input height according to the maximum output height
+                             (proportional to the maximum output height)*/
+                        tempInputSizeHeightMax =
+                            (pImagePlanes->u_height*Params.m_inputSize.m_width)\
+                                /tempOutputSizeHeight;
+                        tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
+
+                        /*Check if the maximum possible input height is contained into the
+                             input image width (rotation included)*/
+                        if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_width)
+                        {
+                            /*The maximum possible input height is contained in the input
+                            image width (rotation included),
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR width will be the maximum possible*/
+                            if(((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1) \
+                                <= Params.m_inputCoord.m_x
+                                && ((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1)\
+                                     <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
+                                        + Params.m_inputSize.m_width))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on the
+                                 right and left side*/
+                                Params.m_inputCoord.m_x -= ((tempInputSizeHeightMax \
+                                    - Params.m_inputSize.m_width)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
+                                -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
+                            {
+                                /*There is not enough place on the left of the input pan
+                                zoom area to extend it symmetrically,
+                                so extend it to the maximum on the left*/
+                                Params.m_inputCoord.m_x = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place on the right of the input pan zoom
+                                 area to extend it symmetrically,
+                                so extend it to the maximum on the right*/
+                                Params.m_inputCoord.m_x =
+                                     pC->m_pDecodedPlane->u_width - tempInputSizeHeightMax;
+                            }
+                            /*The input width of the AIR is the maximum possible width*/
+                            Params.m_inputSize.m_width = tempInputSizeHeightMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input height is greater than the input
+                            image width (rotation included),
+                            that means black borders are necessary to keep aspect ratio
+                            The input width of the AIR is all the input image width*/
+                            Params.m_outputSize.m_width =
+                            (tempOutputSizeHeight*pC->m_pDecodedPlane->u_width)\
+                                /Params.m_inputSize.m_width;
+                            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+                            Params.m_inputCoord.m_x = 0;
+                            Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+                            pImagePlanes[0].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                    -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
+                            pImagePlanes[0].u_height = Params.m_outputSize.m_width;
+                            pImagePlanes[1].u_topleft =
+                            ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                -(Params.m_outputSize.m_width>>1)))>>1)\
+                                    *pImagePlanes[1].u_stride)+1;
+                            pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
+                            pImagePlanes[2].u_topleft =
+                            ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                -(Params.m_outputSize.m_width>>1)))>>1)\
+                                    *pImagePlanes[2].u_stride)+1;
+                            pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
+                        }
+                    }
+                    else
+                    {
+                        /*Black borders will be on the top and bottom of the output video*/
+                        /*Maximum output width if the input image aspect ratio is kept and if
+                         the output width is the screen height*/
+                        M4OSA_UInt32 tempOutputSizeWidth =
+                        (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_height)\
+                             /pC->m_pDecodedPlane->u_width);
+                        M4OSA_UInt32 tempInputSizeWidthMax = 0;
+                        M4OSA_UInt32 tempFinalInputWidth = 0, tempFinalOutputWidth = 0;
+                        /*The output height is the screen width*/
+                        Params.m_outputSize.m_width = pImagePlanes->u_height;
+                        Params.m_outputSize.m_height= pImagePlanes->u_width;
+                        tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
+
+                        /*Maximum input width according to the maximum output width
+                         (proportional to the maximum output width)*/
+                        tempInputSizeWidthMax =
+                        (pImagePlanes->u_width*Params.m_inputSize.m_height)/tempOutputSizeWidth;
+                        tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
+
+                        /*Check if the maximum possible input width is contained into the input
+                         image height (rotation included)*/
+                        if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_height)
+                        {
+                            /*The maximum possible input width is contained in the input
+                             image height (rotation included),
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR height will be the maximum possible*/
+                            if(((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1) \
+                                <= Params.m_inputCoord.m_y
+                                && ((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1)\
+                                     <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y \
+                                        + Params.m_inputSize.m_height))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on
+                                the right and left side*/
+                                Params.m_inputCoord.m_y -= ((tempInputSizeWidthMax \
+                                    - Params.m_inputSize.m_height)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
+                                -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
+                            {
+                                /*There is not enough place on the top of the input pan zoom
+                                area to extend it symmetrically,
+                                so extend it to the maximum on the top*/
+                                Params.m_inputCoord.m_y = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place on the bottom of the input pan zoom
+                                 area to extend it symmetrically,
+                                so extend it to the maximum on the bottom*/
+                                Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height\
+                                     - tempInputSizeWidthMax;
+                            }
+                            /*The input height of the AIR is the maximum possible height*/
+                            Params.m_inputSize.m_height = tempInputSizeWidthMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input width is greater than the input\
+                             image height (rotation included),
+                            that means black borders are necessary to keep aspect ratio
+                            The input height of the AIR is all the input image height*/
+                            Params.m_outputSize.m_height =
+                                (tempOutputSizeWidth*pC->m_pDecodedPlane->u_height)\
+                                    /Params.m_inputSize.m_height;
+                            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+                            Params.m_inputCoord.m_y = 0;
+                            Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+                            pImagePlanes[0].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                    -Params.m_outputSize.m_height))>>1))+1;
+                            pImagePlanes[0].u_width = Params.m_outputSize.m_height;
+                            pImagePlanes[1].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
+                            pImagePlanes[2].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
+                        }
+                    }
+                    break;
+                }
+            }
+
+            /*Width and height have to be even*/
+            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+            Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+            Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+            pImagePlanes[0].u_width = (pImagePlanes[0].u_width>>1)<<1;
+            pImagePlanes[1].u_width = (pImagePlanes[1].u_width>>1)<<1;
+            pImagePlanes[2].u_width = (pImagePlanes[2].u_width>>1)<<1;
+            pImagePlanes[0].u_height = (pImagePlanes[0].u_height>>1)<<1;
+            pImagePlanes[1].u_height = (pImagePlanes[1].u_height>>1)<<1;
+            pImagePlanes[2].u_height = (pImagePlanes[2].u_height>>1)<<1;
+
+            /*Check that values are coherent*/
+            if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
+            {
+                Params.m_inputSize.m_width = Params.m_outputSize.m_width;
+            }
+            else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
+            {
+                Params.m_inputSize.m_height = Params.m_outputSize.m_height;
+            }
+        }
+
+        /**
+        Picture rendering: Resizing and Cropping*/
+        if(pC->m_mediaRendering != M4xVSS_kBlackBorders)
+        {
+            switch(pBasicTags.orientation)
+            {
+            default:
+            case M4COMMON_kOrientationUnknown:
+                Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+            case M4COMMON_kOrientationTopLeft:
+            case M4COMMON_kOrientationTopRight:
+            case M4COMMON_kOrientationBottomRight:
+            case M4COMMON_kOrientationBottomLeft:
+                Params.m_outputSize.m_height = pImagePlanes->u_height;
+                Params.m_outputSize.m_width = pImagePlanes->u_width;
+                break;
+            case M4COMMON_kOrientationLeftTop:
+            case M4COMMON_kOrientationLeftBottom:
+            case M4COMMON_kOrientationRightTop:
+            case M4COMMON_kOrientationRightBottom:
+                Params.m_outputSize.m_height = pImagePlanes->u_width;
+                Params.m_outputSize.m_width = pImagePlanes->u_height;
+                break;
+            }
+        }
+
+        /**
+        Picture rendering: Cropping*/
+        if(pC->m_mediaRendering == M4xVSS_kCropping)
+        {
+            if((Params.m_outputSize.m_height * Params.m_inputSize.m_width)\
+                 /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
+            {
+                M4OSA_UInt32 tempHeight = Params.m_inputSize.m_height;
+                /*height will be cropped*/
+                Params.m_inputSize.m_height =  (M4OSA_UInt32)((Params.m_outputSize.m_height \
+                    * Params.m_inputSize.m_width) /Params.m_outputSize.m_width);
+                Params.m_inputSize.m_height =  (Params.m_inputSize.m_height>>1)<<1;
+                if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+                {
+                    Params.m_inputCoord.m_y = (M4OSA_Int32)((M4OSA_Int32)\
+                        ((pC->m_pDecodedPlane->u_height - Params.m_inputSize.m_height))>>1);
+                }
+                else
+                {
+                    Params.m_inputCoord.m_y += (M4OSA_Int32)((M4OSA_Int32)\
+                        ((tempHeight - Params.m_inputSize.m_height))>>1);
+                }
+            }
+            else
+            {
+                M4OSA_UInt32 tempWidth= Params.m_inputSize.m_width;
+                /*width will be cropped*/
+                Params.m_inputSize.m_width =  (M4OSA_UInt32)((Params.m_outputSize.m_width \
+                    * Params.m_inputSize.m_height) /Params.m_outputSize.m_height);
+                Params.m_inputSize.m_width =  (Params.m_inputSize.m_width>>1)<<1;
+                if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+                {
+                    Params.m_inputCoord.m_x = (M4OSA_Int32)((M4OSA_Int32)\
+                        ((pC->m_pDecodedPlane->u_width - Params.m_inputSize.m_width))>>1);
+                }
+                else
+                {
+                    Params.m_inputCoord.m_x += (M4OSA_Int32)\
+                        (((M4OSA_Int32)(tempWidth - Params.m_inputSize.m_width))>>1);
+                }
+            }
+        }
+
+
+
+        /**
+         * Call AIR functions */
+        if(M4OSA_NULL == pC->m_air_context)
+        {
+            err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+                M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+                pC->m_pDecodedPlane = M4OSA_NULL;
+                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+                     Error when initializing AIR: 0x%x", err);
+                return err;
+            }
+        }
+
+        err = M4AIR_configure(pC->m_air_context, &Params);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+                 Error when configuring AIR: 0x%x", err);
+            M4AIR_cleanUp(pC->m_air_context);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+            pC->m_pDecodedPlane = M4OSA_NULL;
+            return err;
+        }
+
+        err = M4AIR_get(pC->m_air_context, pC->m_pDecodedPlane, pImagePlanes);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when getting AIR plane: 0x%x", err);
+            M4AIR_cleanUp(pC->m_air_context);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+            pC->m_pDecodedPlane = M4OSA_NULL;
+            return err;
+        }
+        pImagePlanes[0] = pImagePlanes1;
+        pImagePlanes[1] = pImagePlanes2;
+        pImagePlanes[2] = pImagePlanes3;
+    }
+
+
+    /**
+     * Increment the image counter */
+    pC->m_ImageCounter++;
+
+    /**
+     * Check end of sequence */
+    last_frame_flag    = (pC->m_ImageCounter >= pC->m_NbImage);
+
+    /**
+     * Keep the picture duration */
+    *pPictureDuration = pC->m_timeDuration;
+
+    if (1 == last_frame_flag)
+    {
+        if(M4OSA_NULL != pC->m_air_context)
+        {
+            err = M4AIR_cleanUp(pC->m_air_context);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x", err);
+                return err;
+            }
+        }
+        if(M4OSA_NULL != pC->m_pDecodedPlane)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+            pC->m_pDecodedPlane = M4OSA_NULL;
+        }
+        return M4PTO3GPP_WAR_LAST_PICTURE;
+    }
+
+    M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct: Leaving ");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
+ * @brief    This function initializes Pto3GPP with the given parameters
+ * @note    The "Pictures to 3GPP" parameters are given by the internal xVSS
+ *            context. This context contains a pointer on the current element
+ *            of the chained list of Pto3GPP parameters.
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
+{
+    /************************************************************************/
+    /* Definitions to generate dummy AMR file used to add AMR silence in files generated
+     by Pto3GPP */
+    #define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
+    /* This constant is defined in M4VSS3GPP_InternalConfig.h */
+    extern const M4OSA_UInt8\
+         M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
+
+    /* AMR silent frame used to compute dummy AMR silence file */
+    #define M4VSS3GPP_AMR_HEADER_SIZE 6
+    const M4OSA_UInt8 M4VSS3GPP_AMR_HEADER[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+    { 0x23, 0x21, 0x41, 0x4d, 0x52, 0x0a };
+    /************************************************************************/
+
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4PTO3GPP_Context pM4PTO3GPP_Ctxt = M4OSA_NULL;
+    M4PTO3GPP_Params Params;
+     M4xVSS_PictureCallbackCtxt*    pCallBackCtxt;
+    M4OSA_Bool cmpResult=M4OSA_FALSE;
+    M4OSA_Context pDummyAMRFile;
+    M4OSA_Char out_amr[64];
+    /*UTF conversion support*/
+    M4OSA_Char* pDecodedPath = M4OSA_NULL;
+    M4OSA_UInt32 i;
+
+    /**
+     * Create a M4PTO3GPP instance */
+    err = M4PTO3GPP_Init( &pM4PTO3GPP_Ctxt, xVSS_context->pFileReadPtr,
+         xVSS_context->pFileWritePtr);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Init returned %ld\n",err);
+        return err;
+    }
+
+    /* replay recorded external encoder registrations on the PTO3GPP */
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalEncs[i].registered)
+        {
+            err = M4PTO3GPP_RegisterExternalVideoEncoder(pM4PTO3GPP_Ctxt, i,
+                    xVSS_context->registeredExternalEncs[i].pEncoderInterface,
+                    xVSS_context->registeredExternalEncs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
+                     M4PTO3GPP_registerExternalVideoEncoder() returns 0x%x!", err);
+                M4PTO3GPP_CleanUp(pM4PTO3GPP_Ctxt);
+                return err;
+            }
+        }
+    }
+
+    pCallBackCtxt = (M4xVSS_PictureCallbackCtxt*)M4OSA_malloc(sizeof(M4xVSS_PictureCallbackCtxt),
+         M4VS,(M4OSA_Char *) "Pto3gpp callback struct");
+    if(pCallBackCtxt == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalStartConvertPictureTo3gp");
+        return M4ERR_ALLOC;
+    }
+
+    Params.OutputVideoFrameSize = xVSS_context->pSettings->xVSS.outputVideoSize;
+    Params.OutputVideoFormat = xVSS_context->pSettings->xVSS.outputVideoFormat;
+
+    /**
+     * Generate "dummy" amr file containing silence in temporary folder */
+    M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, 64);
+    M4OSA_chrNCat(out_amr, (M4OSA_Char *)"dummy.amr\0", 10);
+
+    /**
+     * UTF conversion: convert the temporary path into the customer format*/
+    pDecodedPath = out_amr;
+
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 length = 0;
+        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
+             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalStartConvertPictureTo3gp:\
+                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+            return err;
+        }
+        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+    }
+
+    /**
+    * End of the conversion, now use the converted path*/
+
+    err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, pDecodedPath, M4OSA_kFileWrite);
+
+    /*Commented because of the use of the UTF conversion see above*/
+/*    err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, out_amr, M4OSA_kFileWrite);
+ */
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't open output dummy amr file %s,\
+             error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    err =  xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
+        (M4OSA_Int8*)M4VSS3GPP_AMR_HEADER, M4VSS3GPP_AMR_HEADER_SIZE);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't write output dummy amr file %s,\
+             error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    err =  xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
+         (M4OSA_Int8*)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048, M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
+            Can't write output dummy amr file %s, error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    err =  xVSS_context->pFileWritePtr->closeWrite(pDummyAMRFile);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
+            Can't close output dummy amr file %s, error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    /**
+     * Fill parameters for Pto3GPP with the parameters contained in the current element of the
+     * Pto3GPP parameters chained list and with default parameters */
+/*+ New Encoder bitrates */
+    if(xVSS_context->pSettings->xVSS.outputVideoBitrate == 0) {
+        Params.OutputVideoBitrate    = M4VIDEOEDITING_kVARIABLE_KBPS;
+    }
+    else {
+          Params.OutputVideoBitrate = xVSS_context->pSettings->xVSS.outputVideoBitrate;
+    }
+    M4OSA_TRACE1_1("M4xVSS_internalStartConvertPicTo3GP: video bitrate = %d",
+        Params.OutputVideoBitrate);
+/*- New Encoder bitrates */
+    Params.OutputFileMaxSize    = M4PTO3GPP_kUNLIMITED;
+    Params.pPictureCallbackFct    = M4xVSS_PictureCallbackFct;
+    Params.pPictureCallbackCtxt    = pCallBackCtxt;
+    /*FB: change to use the converted path (UTF conversion) see the conversion above*/
+    /*Fix :- Adding Audio Track in Image as input :AudioTarckFile Setting to NULL */
+    Params.pInputAudioTrackFile    = M4OSA_NULL;//(M4OSA_Void*)pDecodedPath;//out_amr;
+    Params.AudioPaddingMode        = M4PTO3GPP_kAudioPaddingMode_Loop;
+    Params.AudioFileFormat        = M4VIDEOEDITING_kFileType_AMR;
+    Params.pOutput3gppFile        = xVSS_context->pPTo3GPPcurrentParams->pFileOut;
+    Params.pTemporaryFile        = xVSS_context->pPTo3GPPcurrentParams->pFileTemp;
+    /*+PR No:  blrnxpsw#223*/
+    /*Increasing frequency of Frame, calculating Nos of Frame = duration /FPS */
+    /*Other changes made is @ M4xVSS_API.c @ line 3841 in M4xVSS_SendCommand*/
+    /*If case check for PanZoom removed */
+    Params.NbVideoFrames            = (M4OSA_UInt32)
+        (xVSS_context->pPTo3GPPcurrentParams->duration \
+            / xVSS_context->pPTo3GPPcurrentParams->framerate); /* */
+    pCallBackCtxt->m_timeDuration    = xVSS_context->pPTo3GPPcurrentParams->framerate;
+    /*-PR No:  blrnxpsw#223*/
+    pCallBackCtxt->m_ImageCounter    = 0;
+    pCallBackCtxt->m_FileIn            = xVSS_context->pPTo3GPPcurrentParams->pFileIn;
+    pCallBackCtxt->m_NbImage        = Params.NbVideoFrames;
+    pCallBackCtxt->m_pFileReadPtr    = xVSS_context->pFileReadPtr;
+    pCallBackCtxt->m_pDecodedPlane    = M4OSA_NULL;
+    pCallBackCtxt->m_pPto3GPPparams    = xVSS_context->pPTo3GPPcurrentParams;
+    pCallBackCtxt->m_air_context    = M4OSA_NULL;
+    pCallBackCtxt->m_mediaRendering = xVSS_context->pPTo3GPPcurrentParams->MediaRendering;
+
+    /**
+     * Set the input and output files */
+    err = M4PTO3GPP_Open(pM4PTO3GPP_Ctxt, &Params);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Open returned: 0x%x\n",err);
+        if(pCallBackCtxt != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pCallBackCtxt);
+            pCallBackCtxt = M4OSA_NULL;
+        }
+        M4PTO3GPP_CleanUp(pM4PTO3GPP_Ctxt);
+        return err;
+    }
+
+    /**
+     * Save context to be able to call Pto3GPP step function in M4xVSS_step function */
+    xVSS_context->pM4PTO3GPP_Ctxt = pM4PTO3GPP_Ctxt;
+    xVSS_context->pCallBackCtxt = pCallBackCtxt;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
+ * @brief    This function cleans up Pto3GPP
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4OSA_Char out_amr[64];
+    /*UTF conversion support*/
+    M4OSA_Char* pDecodedPath = M4OSA_NULL;
+
+    /**
+    * Free the PTO3GPP callback context */
+    if(M4OSA_NULL != xVSS_context->pCallBackCtxt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCallBackCtxt);
+        xVSS_context->pCallBackCtxt = M4OSA_NULL;
+    }
+
+    /**
+     * Finalize the output file */
+    err = M4PTO3GPP_Close(xVSS_context->pM4PTO3GPP_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Close returned 0x%x\n",err);
+        M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
+        return err;
+    }
+
+    /**
+     * Free this M4PTO3GPP instance */
+    err = M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_CleanUp returned 0x%x\n",err);
+        return err;
+    }
+
+    /**
+     * Remove dummy.amr file */
+    M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, 64);
+    M4OSA_chrNCat(out_amr, (M4OSA_Char *)"dummy.amr\0", 10);
+
+    /**
+     * UTF conversion: convert the temporary path into the customer format*/
+    pDecodedPath = out_amr;
+
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 length = 0;
+        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
+             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalStopConvertPictureTo3gp:\
+                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+            return err;
+        }
+        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+    }
+    /**
+    * End of the conversion, now use the decoded path*/
+    M4OSA_fileExtraDelete(pDecodedPath);
+
+    /*Commented because of the use of the UTF conversion*/
+/*    M4OSA_fileExtraDelete(out_amr);
+ */
+
+    xVSS_context->pM4PTO3GPP_Ctxt = M4OSA_NULL;
+    xVSS_context->pCallBackCtxt = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+ * @brief    This function converts an RGB565 plane to YUV420 planar
+ * @note    It is used only for framing effect
+ *            It allocates output YUV planes
+ * @param    framingCtx    (IN) The framing struct containing input RGB565 plane
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+{
+    M4OSA_ERR err;
+
+    /**
+     * Allocate output YUV planes */
+    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_malloc(3*sizeof(M4VIFI_ImagePlane),
+         M4VS, (M4OSA_Char *)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+    if(framingCtx->FramingYuv == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
+    framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
+    framingCtx->FramingYuv[0].u_topleft = 0;
+    framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
+    framingCtx->FramingYuv[0].pac_data =
+         (M4VIFI_UInt8*)M4OSA_malloc((framingCtx->FramingYuv[0].u_width\
+            *framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char *)\
+                "Alloc for the Convertion output YUV");;
+    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
+    framingCtx->FramingYuv[1].u_topleft = 0;
+    framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data \
+        + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
+    framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
+    framingCtx->FramingYuv[2].u_topleft = 0;
+    framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data \
+        + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
+
+    /**
+     * Convert input RGB 565 to YUV 420 to be able to merge it with output video in framing
+      effect */
+    err = M4VIFI_xVSS_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV:\
+             error when converting from RGB to YUV: 0x%x\n", err);
+    }
+
+    framingCtx->duration = 0;
+    framingCtx->previousClipTime = -1;
+    framingCtx->previewOffsetClipTime = -1;
+
+    /**
+     * Only one element in the chained list (no animated image with RGB buffer...) */
+    framingCtx->pCurrent = framingCtx;
+    framingCtx->pNext = framingCtx;
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_internalSetPlaneTransparent(M4OSA_UInt8* planeIn, M4OSA_UInt32 size)
+{
+    M4OSA_UInt32 i;
+    M4OSA_UInt8* plane = planeIn;
+    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+
+    for(i=0; i<(size>>1); i++)
+    {
+        *plane++ = transparent1;
+        *plane++ = transparent2;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertARBG888toYUV420_FrammingEffect(M4OSA_Context pContext,
+ *                                                M4VSS3GPP_EffectSettings* pEffect,
+ *                                                M4xVSS_FramingStruct* framingCtx,
+                                                  M4VIDEOEDITING_VideoFrameSize OutputVideoResolution)
+ *
+ * @brief    This function converts ARGB8888 input file  to YUV420 whenused for framming effect
+ * @note    The input ARGB8888 file path is contained in the pEffect structure
+ *            If the ARGB8888 must be resized to fit output video size, this function
+ *            will do it.
+ * @param    pContext    (IN) The integrator own context
+ * @param    pEffect        (IN) The effect structure containing all informations on
+ *                        the file to decode, resizing ...
+ * @param    framingCtx    (IN/OUT) Structure in which the output RGB will be stored
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+
+
+M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pContext,
+                                                               M4VSS3GPP_EffectSettings* pEffect,
+                                                               M4xVSS_FramingStruct* framingCtx,
+                                                               M4VIDEOEDITING_VideoFrameSize\
+                                                                 OutputVideoResolution)
+{
+    M4OSA_ERR err;
+    M4OSA_Context pARGBIn;
+    M4OSA_UInt32 file_size;
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_UInt32 width, height, width_out, height_out;
+    M4OSA_Void* pFile = pEffect->xVSS.pFramingFilePath;
+    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+    /*UTF conversion support*/
+    M4OSA_Char* pDecodedPath = M4OSA_NULL;
+    M4OSA_UInt32 i = 0,j = 0;
+    M4VIFI_ImagePlane rgbPlane;
+    M4OSA_UInt32 frameSize_argb=(framingCtx->width * framingCtx->height * 4);
+    M4OSA_UInt32 frameSize = (framingCtx->width * framingCtx->height * 3); //Size of RGB888 data
+    M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_malloc(frameSize_argb, M4VS, (M4OSA_Char*)\
+        "Image argb data");
+    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Entering ");
+    M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect width and height %d %d ",
+        framingCtx->width,framingCtx->height);
+    if(pTmpData == M4OSA_NULL) {
+        M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+        return M4ERR_ALLOC;
+    }
+    /**
+     * UTF conversion: convert the file path into the customer format*/
+    pDecodedPath = pFile;
+
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 length = 0;
+        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) pFile,
+             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalDecodePNG:\
+                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+            return err;
+        }
+        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+    }
+
+    /**
+    * End of the conversion, now use the decoded path*/
+
+     /* Open input ARGB8888 file and store it into memory */
+    err = xVSS_context->pFileReadPtr->openRead(&pARGBIn, pDecodedPath, M4OSA_kFileRead);
+
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("Can't open input ARGB8888 file %s, error: 0x%x\n",pFile, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        return err;
+    }
+
+    err = xVSS_context->pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
+    if(err != M4NO_ERROR)
+    {
+        xVSS_context->pFileReadPtr->closeRead(pARGBIn);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+    }
+
+
+    err =  xVSS_context->pFileReadPtr->closeRead(pARGBIn);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("Can't close input png file %s, error: 0x%x\n",pFile, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        return err;
+    }
+
+    /* rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(frameSize, M4VS,\
+        (M4OSA_Char*)"Image clip RGB888 data"); */
+    /* temp fix for crashing happening in filter :  allocation 2memory for 2 more width */
+    rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(((frameSize)+ (2 * framingCtx->width)),
+         M4VS, (M4OSA_Char*)"Image clip RGB888 data");
+    if(rgbPlane.pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        return M4ERR_ALLOC;
+    }
+
+        rgbPlane.u_height = (( framingCtx->height+1)>>1)<<1;;
+        rgbPlane.u_width = (( framingCtx->width+1)>>1)<<1;;
+        rgbPlane.u_stride = rgbPlane.u_width*3;
+        rgbPlane.u_topleft = 0;
+
+    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+          Remove the alpha channel  ");
+      /** Remove the alpha channel */
+    for (i=0, j = 0; i < frameSize_argb; i++) {
+        if ((i % 4) == 0) continue;
+        rgbPlane.pac_data[j] = pTmpData[i];
+        j++;
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)pTmpData);
+    /**
+     * Check if output sizes are odd */
+    if(rgbPlane.u_height % 2 != 0)
+    {
+
+        M4VIFI_UInt8* output_pac_data = rgbPlane.pac_data;
+        M4OSA_UInt32 i;
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+             output height is odd  ");
+        output_pac_data +=rgbPlane.u_width * rgbPlane.u_height*3;
+        for(i=0;i<rgbPlane.u_width;i++)
+        {
+            *output_pac_data++ = transparent1;
+            *output_pac_data++ = transparent2;
+        }
+
+        /**
+         * We just add a white line to the PNG that will be transparent */
+        rgbPlane.u_height++;
+    }
+    if(rgbPlane.u_width % 2 != 0)
+    {
+
+        /**
+         * We add a new column of white (=transparent), but we need to parse all RGB lines ... */
+        M4OSA_UInt32 i;
+        M4VIFI_UInt8* newRGBpac_data;
+        M4VIFI_UInt8* output_pac_data, *input_pac_data;
+
+        rgbPlane.u_width++;
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
+             output width is odd  ");
+        /**
+         * We need to allocate a new RGB output buffer in which all decoded data
+          + white line will be copied */
+        newRGBpac_data = (M4VIFI_UInt8*)M4OSA_malloc(rgbPlane.u_height*rgbPlane.u_width*3\
+            *sizeof(M4VIFI_UInt8), M4VS, (M4OSA_Char *)"New Framing GIF Output pac_data RGB");
+        if(newRGBpac_data == M4OSA_NULL)
+        {
+            M4OSA_TRACE1_0("Allocation error in \
+                M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+            /**
+             * Destroy SPS instance */
+            //M4SPS_destroy(pSPSContext);
+            return M4ERR_ALLOC;
+        }
+
+        output_pac_data= newRGBpac_data;
+        input_pac_data = rgbPlane.pac_data;
+
+        for(i=0;i<rgbPlane.u_height;i++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)output_pac_data, (M4OSA_MemAddr8)input_pac_data,
+                 (rgbPlane.u_width-1)*3);
+            output_pac_data += ((rgbPlane.u_width-1)*3);
+            /* Put the pixel to transparency color */
+            *output_pac_data++ = transparent1;
+            *output_pac_data++ = transparent2;
+            input_pac_data += ((rgbPlane.u_width-1)*3);
+        }
+
+        rgbPlane.pac_data = newRGBpac_data;
+    }
+
+    /**
+     * Initialize chained list parameters */
+    framingCtx->duration = 0;
+    framingCtx->previousClipTime = -1;
+    framingCtx->previewOffsetClipTime = -1;
+
+    /**
+     * Only one element in the chained list (no animated image ...) */
+    framingCtx->pCurrent = framingCtx;
+    framingCtx->pNext = framingCtx;
+
+    /**
+     * Get output width/height */
+     switch(OutputVideoResolution)
+    //switch(xVSS_context->pSettings->xVSS.outputVideoSize)
+    {
+    case M4VIDEOEDITING_kSQCIF:
+        width_out = 128;
+        height_out = 96;
+        break;
+    case M4VIDEOEDITING_kQQVGA:
+        width_out = 160;
+        height_out = 120;
+        break;
+    case M4VIDEOEDITING_kQCIF:
+        width_out = 176;
+        height_out = 144;
+        break;
+    case M4VIDEOEDITING_kQVGA:
+        width_out = 320;
+        height_out = 240;
+        break;
+    case M4VIDEOEDITING_kCIF:
+        width_out = 352;
+        height_out = 288;
+        break;
+    case M4VIDEOEDITING_kVGA:
+        width_out = 640;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_kWVGA:
+        width_out = 800;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_kNTSC:
+        width_out = 720;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_k640_360:
+        width_out = 640;
+        height_out = 360;
+        break;
+    case M4VIDEOEDITING_k854_480:
+        // StageFright encoders require %16 resolution
+        width_out = M4ENCODER_854_480_Width;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_kHD1280:
+        width_out = 1280;
+        height_out = 720;
+        break;
+    case M4VIDEOEDITING_kHD1080:
+        // StageFright encoders require %16 resolution
+        width_out = M4ENCODER_HD1080_Width;
+        height_out = 720;
+        break;
+    case M4VIDEOEDITING_kHD960:
+        width_out = 960;
+        height_out = 720;
+        break;
+
+    /**
+     * If output video size is not given, we take QCIF size,
+     * should not happen, because already done in M4xVSS_sendCommand */
+    default:
+        width_out = 176;
+        height_out = 144;
+        break;
+    }
+
+
+        /**
+     * Allocate output planes structures */
+    framingCtx->FramingRgb = (M4VIFI_ImagePlane*)M4OSA_malloc(sizeof(M4VIFI_ImagePlane), M4VS,
+         (M4OSA_Char *)"Framing Output plane RGB");
+    if(framingCtx->FramingRgb == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;NULL;
+        return M4ERR_ALLOC;
+    }
+    /**
+     * Resize RGB if needed */
+    if((pEffect->xVSS.bResize) &&
+         (rgbPlane.u_width != width_out || rgbPlane.u_height != height_out))
+    {
+        width = width_out;
+        height = height_out;
+
+        M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
+             New Width and height %d %d  ",width,height);
+
+        framingCtx->FramingRgb->u_height = height_out;
+        framingCtx->FramingRgb->u_width = width_out;
+        framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*3;
+        framingCtx->FramingRgb->u_topleft = 0;
+
+        framingCtx->FramingRgb->pac_data =
+             (M4VIFI_UInt8*)M4OSA_malloc(framingCtx->FramingRgb->u_height*framingCtx->\
+                FramingRgb->u_width*3*sizeof(M4VIFI_UInt8), M4VS,
+                  (M4OSA_Char *)"Framing Output pac_data RGB");
+        if(framingCtx->FramingRgb->pac_data == M4OSA_NULL)
+        {
+            M4OSA_TRACE1_0("Allocation error in \
+                M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+            M4OSA_free((M4OSA_MemAddr32)pTmpData);
+            pTmpData = M4OSA_NULL;NULL;
+            return M4ERR_ALLOC;
+        }
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:  Resizing Needed ");
+        M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+              rgbPlane.u_height & rgbPlane.u_width %d %d",rgbPlane.u_height,rgbPlane.u_width);
+        err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect :\
+                when resizing RGB plane: 0x%x\n", err);
+            return err;
+        }
+
+        if(rgbPlane.pac_data != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane.pac_data);
+            rgbPlane.pac_data = M4OSA_NULL;
+
+        }
+
+    }
+    else
+    {
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+              Resizing Not Needed ");
+        width = framingCtx->width;
+        height =    framingCtx->height;
+        framingCtx->FramingRgb->u_height = height;
+        framingCtx->FramingRgb->u_width = width;
+        framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*3;
+        framingCtx->FramingRgb->u_topleft = 0;
+        framingCtx->FramingRgb->pac_data = rgbPlane.pac_data;
+    }
+
+
+    if(pEffect->xVSS.bResize)
+    {
+        /**
+         * Force topleft to 0 for pure framing effect */
+        framingCtx->topleft_x = 0;
+        framingCtx->topleft_y = 0;
+    }
+
+
+
+    /**
+     * Convert  RGB output to YUV 420 to be able to merge it with output video in framing
+     effect */
+    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_malloc(3*sizeof(M4VIFI_ImagePlane), M4VS,
+         (M4OSA_Char *)"Framing Output plane YUV");
+    if(framingCtx->FramingYuv == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[0].u_width = ((width+1)>>1)<<1;
+    framingCtx->FramingYuv[0].u_height = ((height+1)>>1)<<1;
+    framingCtx->FramingYuv[0].u_topleft = 0;
+    framingCtx->FramingYuv[0].u_stride = ((width+1)>>1)<<1;
+     framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_malloc
+        ((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height*3)>>1, M4VS,
+            (M4OSA_Char *)"Alloc for the output YUV");;
+    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[1].u_width = (((width+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[1].u_height = (((height+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[1].u_topleft = 0;
+    framingCtx->FramingYuv[1].u_stride = (((width+1)>>1)<<1)>>1;
+
+    framingCtx->FramingYuv[1].pac_data = (M4VIFI_UInt8*)M4OSA_malloc\
+        (((framingCtx->FramingYuv[0].u_width)/2*(framingCtx->FramingYuv[0].u_height)/2), M4VS,
+             (M4OSA_Char *)"Alloc for the output YUV");;
+
+    framingCtx->FramingYuv[2].u_width = (((width+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[2].u_height = (((height+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[2].u_topleft = 0;
+    framingCtx->FramingYuv[2].u_stride = (((width+1)>>1)<<1)>>1;
+
+    framingCtx->FramingYuv[2].pac_data = (M4VIFI_UInt8*)M4OSA_malloc
+        (((framingCtx->FramingYuv[0].u_width)/2*(framingCtx->FramingYuv[0].u_height)/2), M4VS,
+            (M4OSA_Char *)"Alloc for the  output YUV");;
+
+
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+              convert RGB to YUV ");
+
+    err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb,  framingCtx->FramingYuv);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("SPS png: error when converting from RGB to YUV: 0x%x\n", err);
+    }
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:  Leaving ");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
+ *
+ * @brief    This function prepares VSS for editing
+ * @note    It also set special xVSS effect as external effects for the VSS
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4VSS3GPP_EditContext pVssCtxt;
+    M4OSA_UInt32 i,j;
+    M4OSA_ERR err;
+
+    /**
+     * Create a VSS 3GPP edition instance */
+    err = M4VSS3GPP_editInit( &pVssCtxt, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile: M4VSS3GPP_editInit returned 0x%x\n",
+            err);
+        M4VSS3GPP_editCleanUp(pVssCtxt);
+        return err;
+    }
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    /* replay recorded external decoder registrations on the VSS3GPP */
+    for (i=0; i<M4VD_kVideoType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalDecs[i].registered)
+        {
+            err = M4VSS3GPP_editRegisterExternalVideoDecoder(pVssCtxt, i,
+                    xVSS_context->registeredExternalDecs[i].pDecoderInterface,
+                    xVSS_context->registeredExternalDecs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile: \
+                    M4VSS3GPP_editRegisterExternalVideoDecoder() returns 0x%x!", err);
+                M4VSS3GPP_editCleanUp(pVssCtxt);
+                return err;
+            }
+        }
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    /* replay recorded external encoder registrations on the VSS3GPP */
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalEncs[i].registered)
+        {
+            err = M4VSS3GPP_editRegisterExternalVideoEncoder(pVssCtxt, i,
+                    xVSS_context->registeredExternalEncs[i].pEncoderInterface,
+                    xVSS_context->registeredExternalEncs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
+                     M4VSS3GPP_editRegisterExternalVideoEncoder() returns 0x%x!", err);
+                M4VSS3GPP_editCleanUp(pVssCtxt);
+                return err;
+            }
+        }
+    }
+
+    /* In case of MMS use case, we fill directly into the VSS context the targeted bitrate */
+    if(xVSS_context->targetedBitrate != 0)
+    {
+        M4VSS3GPP_InternalEditContext* pVSSContext = (M4VSS3GPP_InternalEditContext*)pVssCtxt;
+
+        pVSSContext->bIsMMS = M4OSA_TRUE;
+        pVSSContext->uiMMSVideoBitrate = xVSS_context->targetedBitrate;
+        pVSSContext->MMSvideoFramerate = xVSS_context->pSettings->videoFrameRate;
+    }
+
+    /*Warning: since the adding of the UTF conversion, pSettings has been changed in the next
+    part in  pCurrentEditSettings (there is a specific current editing structure for the saving,
+     as for the preview)*/
+
+    /**
+     * Set the external video effect functions, for saving mode (to be moved to
+      M4xVSS_saveStart() ?)*/
+    for (i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+    {
+        for (j=0; j<xVSS_context->pCurrentEditSettings->nbEffects; j++)
+        {
+            if (M4xVSS_kVideoEffectType_BlackAndWhite ==
+            xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_BlackAndWhite;
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set
+                 during sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Pink ==
+                xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Pink; /**< we don't
+                // use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context,
+                  it is already set during sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Green ==
+                 xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                    M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                    // (M4OSA_Void*)M4xVSS_kVideoEffectType_Green;
+                     /**< we don't use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set during
+                  sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Sepia ==
+                 xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Sepia;
+                /**< we don't use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Fifties ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectFifties;
+                /**
+                 * We do not need to set the framing context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Negative ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Negative;
+                 /**< we don't use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set during
+                  sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Framing ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectFraming;
+                /**
+                 * We do not need to set the framing context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_ZoomIn ==
+             xVSS_context->pSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectZoom;
+                xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
+                 (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomIn; /**< we don't use any
+                 function context */
+            }
+            if (M4xVSS_kVideoEffectType_ZoomOut ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectZoom;
+                xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
+                 (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomOut; /**< we don't use any
+                 function context */
+            }
+            if (M4xVSS_kVideoEffectType_ColorRGB16 ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
+                /**< we don't use any function context */
+                /**
+                 * We do not need to set the color context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Gradient ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
+                /**< we don't use any function context */
+                /**
+                 * We do not need to set the color context, it is already set during
+                 sendCommand function */
+            }
+
+        }
+    }
+
+    /**
+     * Open the VSS 3GPP */
+    err = M4VSS3GPP_editOpen(pVssCtxt, xVSS_context->pCurrentEditSettings);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
+             M4VSS3GPP_editOpen returned 0x%x\n",err);
+        M4VSS3GPP_editCleanUp(pVssCtxt);
+        return err;
+    }
+
+    /**
+     * Save VSS context to be able to close / free VSS later */
+    xVSS_context->pCurrentEditContext = pVssCtxt;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up VSS
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
+    M4OSA_ERR err;
+
+    if(xVSS_context->pCurrentEditContext != M4OSA_NULL)
+    {
+        /**
+         * Close the VSS 3GPP */
+        err = M4VSS3GPP_editClose(pVssCtxt);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile:\
+                 M4VSS3GPP_editClose returned 0x%x\n",err);
+            M4VSS3GPP_editCleanUp(pVssCtxt);
+            return err;
+        }
+
+        /**
+         * Free this VSS3GPP edition instance */
+        err = M4VSS3GPP_editCleanUp(pVssCtxt);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile: \
+                M4VSS3GPP_editCleanUp returned 0x%x\n",err);
+            return err;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
+ *
+ * @brief    This function prepares VSS for audio mixing
+ * @note    It takes its parameters from the BGM settings in the xVSS internal context
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+/***
+ * FB: the function has been modified since the structure used for the saving is now the
+ *  pCurrentEditSettings and not the pSettings
+ * This change has been added for the UTF support
+ * All the "xVSS_context->pSettings" has been replaced by "xVSS_context->pCurrentEditSettings"
+ ***/
+M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4VSS3GPP_AudioMixingSettings* pAudioMixSettings;
+    M4VSS3GPP_AudioMixingContext pAudioMixingCtxt;
+    M4OSA_ERR err;
+    M4VIDEOEDITING_ClipProperties fileProperties;
+
+    /**
+     * Allocate audio mixing settings structure and fill it with BGM parameters */
+    pAudioMixSettings = (M4VSS3GPP_AudioMixingSettings*)M4OSA_malloc
+        (sizeof(M4VSS3GPP_AudioMixingSettings), M4VS, (M4OSA_Char *)"pAudioMixSettings");
+    if(pAudioMixSettings == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalGenerateAudioMixFile");
+        return M4ERR_ALLOC;
+    }
+
+    if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType ==
+         M4VIDEOEDITING_kFileType_3GPP)
+    {
+        err = M4xVSS_internalGetProperties((M4OSA_Context)xVSS_context,
+             (M4OSA_Char*)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile,
+                 &fileProperties);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
+                 impossible to retrieve audio BGM properties ->\
+                     reencoding audio background music", err);
+            fileProperties.AudioStreamType =
+                 xVSS_context->pCurrentEditSettings->xVSS.outputAudioFormat+1;
+                  /* To force BGM encoding */
+        }
+    }
+
+    pAudioMixSettings->bRemoveOriginal = M4OSA_FALSE;
+    pAudioMixSettings->AddedAudioFileType =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType;
+    pAudioMixSettings->pAddedAudioTrackFile =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile;
+    pAudioMixSettings->uiAddVolume =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume;
+
+    pAudioMixSettings->outputAudioFormat = xVSS_context->pSettings->xVSS.outputAudioFormat;
+    pAudioMixSettings->outputASF = xVSS_context->pSettings->xVSS.outputAudioSamplFreq;
+    pAudioMixSettings->outputAudioBitrate = xVSS_context->pSettings->xVSS.outputAudioBitrate;
+    pAudioMixSettings->uiSamplingFrequency =
+     xVSS_context->pSettings->xVSS.pBGMtrack->uiSamplingFrequency;
+    pAudioMixSettings->uiNumChannels = xVSS_context->pSettings->xVSS.pBGMtrack->uiNumChannels;
+
+    pAudioMixSettings->b_DuckingNeedeed =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->b_DuckingNeedeed;
+    pAudioMixSettings->fBTVolLevel =
+     (M4OSA_Float )xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume/100;
+    pAudioMixSettings->InDucking_threshold =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->InDucking_threshold;
+    pAudioMixSettings->InDucking_lowVolume =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->lowVolume/100;
+    pAudioMixSettings->fPTVolLevel =
+     (M4OSA_Float)xVSS_context->pSettings->PTVolLevel/100;
+    pAudioMixSettings->bLoop = xVSS_context->pSettings->xVSS.pBGMtrack->bLoop;
+
+    if(xVSS_context->pSettings->xVSS.bAudioMono)
+    {
+        pAudioMixSettings->outputNBChannels = 1;
+    }
+    else
+    {
+        pAudioMixSettings->outputNBChannels = 2;
+    }
+
+    /**
+     * Fill audio mix settings with BGM parameters */
+    pAudioMixSettings->uiBeginLoop =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiBeginLoop;
+    pAudioMixSettings->uiEndLoop =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiEndLoop;
+    pAudioMixSettings->uiAddCts =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddCts;
+
+    /**
+     * Output file of the audio mixer will be final file (audio mixing is the last step) */
+    pAudioMixSettings->pOutputClipFile = xVSS_context->pOutputFile;
+    pAudioMixSettings->pTemporaryFile = xVSS_context->pTemporaryFile;
+
+    /**
+     * Input file of the audio mixer is a temporary file containing all audio/video editions */
+    pAudioMixSettings->pOriginalClipFile = xVSS_context->pCurrentEditSettings->pOutputFile;
+
+    /**
+     * Save audio mixing settings pointer to be able to free it in
+     M4xVSS_internalCloseAudioMixedFile function */
+    xVSS_context->pAudioMixSettings = pAudioMixSettings;
+
+    /**
+     * Create a VSS 3GPP audio mixing instance */
+    err = M4VSS3GPP_audioMixingInit(&pAudioMixingCtxt, pAudioMixSettings,
+         xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+
+    /**
+     * Save audio mixing context to be able to call audio mixing step function in
+      M4xVSS_step function */
+    xVSS_context->pAudioMixContext = pAudioMixingCtxt;
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
+             M4VSS3GPP_audioMixingInit returned 0x%x\n",err);
+        //M4VSS3GPP_audioMixingCleanUp(pAudioMixingCtxt);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up VSS for audio mixing
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    /**
+     * Free this VSS3GPP audio mixing instance */
+    if(xVSS_context->pAudioMixContext != M4OSA_NULL)
+    {
+        err = M4VSS3GPP_audioMixingCleanUp(xVSS_context->pAudioMixContext);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalCloseAudioMixedFile:\
+                 M4VSS3GPP_audioMixingCleanUp returned 0x%x\n",err);
+            return err;
+        }
+    }
+
+    /**
+     * Free VSS audio mixing settings */
+    if(xVSS_context->pAudioMixSettings != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pAudioMixSettings);
+        xVSS_context->pAudioMixSettings = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up preview edition structure used to generate
+ *            preview.3gp file given to the VPS
+ * @note    It also free the preview structure given to the VPS
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_UInt8 i;
+
+    /**
+     * Free clip/transition settings */
+    for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+    {
+        M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
+
+        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList[i]));
+        xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
+
+        /**
+         * Because there is 1 less transition than clip number */
+        if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pTransitionList[i]));
+            xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
+        }
+    }
+
+    /**
+     * Free clip/transition list */
+    if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList));
+        xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
+    }
+    if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pTransitionList));
+        xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
+    }
+
+    /**
+     * Free output preview file path */
+    if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
+    {
+        M4OSA_free(xVSS_context->pCurrentEditSettings->pOutputFile);
+        xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+    }
+
+    /**
+     * Free temporary preview file path */
+    if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
+    {
+        M4OSA_fileExtraDelete(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+        M4OSA_free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+        xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+    }
+
+    /**
+     * Free "local" BGM settings */
+    if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+    {
+        if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+        {
+            M4OSA_free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
+            xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+        }
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
+        xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
+    }
+
+    /**
+     * Free current edit settings structure */
+    if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings);
+        xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+    }
+
+    /**
+     * Free preview effects given to application */
+    if(M4OSA_NULL != xVSS_context->pPreviewSettings->Effects)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pPreviewSettings->Effects);
+        xVSS_context->pPreviewSettings->Effects = M4OSA_NULL;
+        xVSS_context->pPreviewSettings->nbEffects = 0;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up saving edition structure used to generate
+ *            output.3gp file given to the VPS
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_UInt8 i;
+
+    if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
+    {
+        /**
+         * Free clip/transition settings */
+        for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+        {
+            M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
+
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList[i]));
+            xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
+
+            /**
+             * Because there is 1 less transition than clip number */
+            if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
+            {
+                M4OSA_free((M4OSA_MemAddr32)\
+                    (xVSS_context->pCurrentEditSettings->pTransitionList[i]));
+                xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
+            }
+        }
+
+        /**
+         * Free clip/transition list */
+        if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList));
+            xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
+        }
+        if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pTransitionList));
+            xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
+        }
+
+        if(xVSS_context->pCurrentEditSettings->Effects != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->Effects));
+            xVSS_context->pCurrentEditSettings->Effects = M4OSA_NULL;
+            xVSS_context->pCurrentEditSettings->nbEffects = 0;
+        }
+
+        /**
+         * Free output saving file path */
+        if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
+        {
+            if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+            {
+                M4OSA_fileExtraDelete(xVSS_context->pCurrentEditSettings->pOutputFile);
+                M4OSA_free(xVSS_context->pCurrentEditSettings->pOutputFile);
+            }
+            if(xVSS_context->pOutputFile != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+            xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+        }
+
+        /**
+         * Free temporary saving file path */
+        if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
+        {
+            M4OSA_fileExtraDelete(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+            M4OSA_free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+            xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+        }
+
+        /**
+         * Free "local" BGM settings */
+        if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+        {
+            if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+            {
+                M4OSA_free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
+                xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+            }
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
+            xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
+        }
+
+        /**
+         * Free current edit settings structure */
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings);
+        xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_freeSettings(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up an M4VSS3GPP_EditSettings structure
+ * @note
+ * @param    pSettings    (IN) Pointer on M4VSS3GPP_EditSettings structure to free
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_freeSettings(M4VSS3GPP_EditSettings* pSettings)
+{
+    M4OSA_UInt8 i,j;
+
+    /**
+     * For each clip ... */
+    for(i=0; i<pSettings->uiClipNumber; i++)
+    {
+        /**
+         * ... free clip settings */
+        if(pSettings->pClipList[i] != M4OSA_NULL)
+        {
+            M4xVSS_FreeClipSettings(pSettings->pClipList[i]);
+
+            M4OSA_free((M4OSA_MemAddr32)(pSettings->pClipList[i]));
+            pSettings->pClipList[i] = M4OSA_NULL;
+        }
+
+        /**
+         * ... free transition settings */
+        if(i < pSettings->uiClipNumber-1) /* Because there is 1 less transition than clip number */
+        {
+            if(pSettings->pTransitionList[i] != M4OSA_NULL)
+            {
+                switch (pSettings->pTransitionList[i]->VideoTransitionType)
+                {
+                    case M4xVSS_kVideoTransitionType_AlphaMagic:
+
+                        /**
+                         * In case of Alpha Magic transition,
+                          some extra parameters need to be freed */
+                        if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt\
+                             != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                                pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
+                                    pPlane->pac_data));
+                            ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i\
+                                ]->pExtVideoTransitionFctCtxt)->pPlane->pac_data = M4OSA_NULL;
+
+                            M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                                pSettings->pTransitionList[i]->\
+                                    pExtVideoTransitionFctCtxt)->pPlane));
+                            ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i]\
+                                ->pExtVideoTransitionFctCtxt)->pPlane = M4OSA_NULL;
+
+                            M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt));
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
+
+                            for(j=i+1;j<pSettings->uiClipNumber-1;j++)
+                            {
+                                if(pSettings->pTransitionList[j] != M4OSA_NULL)
+                                {
+                                    if(pSettings->pTransitionList[j]->VideoTransitionType ==
+                                     M4xVSS_kVideoTransitionType_AlphaMagic)
+                                    {
+                                        M4OSA_UInt32 pCmpResult=0;
+                                        M4OSA_chrCompare(pSettings->pTransitionList[i]->\
+                                            xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                                pAlphaFilePath,
+                                                pSettings->pTransitionList[j]->\
+                                                xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                                pAlphaFilePath, (M4OSA_Int32 *)&pCmpResult);
+                                        if(pCmpResult == 0)
+                                        {
+                                            /* Free extra internal alpha magic structure and put
+                                            it to NULL to avoid refreeing it */
+                                            M4OSA_free((M4OSA_MemAddr32)(pSettings->\
+                                                pTransitionList[j]->pExtVideoTransitionFctCtxt));
+                                            pSettings->pTransitionList[j]->\
+                                                pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+
+                        if(pSettings->pTransitionList[i]->\
+                            xVSS.transitionSpecific.pAlphaMagicSettings != M4OSA_NULL)
+                        {
+                            if(pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                    pAlphaFilePath != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)pSettings->\
+                                    pTransitionList[i]->\
+                                        xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                            pAlphaFilePath);
+                                pSettings->pTransitionList[i]->\
+                                    xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                        pAlphaFilePath = M4OSA_NULL;
+                            }
+                            M4OSA_free((M4OSA_MemAddr32)pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pAlphaMagicSettings);
+                            pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pAlphaMagicSettings = M4OSA_NULL;
+
+                        }
+
+                    break;
+
+
+                    case M4xVSS_kVideoTransitionType_SlideTransition:
+                        if (M4OSA_NULL != pSettings->pTransitionList[i]->\
+                            xVSS.transitionSpecific.pSlideTransitionSettings)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pSlideTransitionSettings);
+                            pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pSlideTransitionSettings = M4OSA_NULL;
+                        }
+                        if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt));
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                        }
+                    break;
+                                        default:
+                    break;
+
+                }
+                /**
+                 * Free transition settings structure */
+                M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList[i]));
+                pSettings->pTransitionList[i] = M4OSA_NULL;
+            }
+        }
+    }
+
+    /**
+     * Free clip list */
+    if(pSettings->pClipList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(pSettings->pClipList));
+        pSettings->pClipList = M4OSA_NULL;
+    }
+
+    /**
+     * Free transition list */
+    if(pSettings->pTransitionList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList));
+        pSettings->pTransitionList = M4OSA_NULL;
+    }
+
+    /**
+     * RC: Free effects list */
+    if(pSettings->Effects != M4OSA_NULL)
+    {
+        for(i=0; i<pSettings->nbEffects; i++)
+        {
+            /**
+             * For each clip, free framing structure if needed */
+            if(pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Framing
+                || pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Text)
+            {
+#ifdef DECODE_GIF_ON_SAVING
+                M4xVSS_FramingContext* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+#else
+                M4xVSS_FramingStruct* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+                M4xVSS_FramingStruct* framingCtx_save;
+                M4xVSS_Framing3102Struct* framingCtx_first = framingCtx;
+#endif
+
+#ifdef DECODE_GIF_ON_SAVING
+                if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non existant
+                 pointer */
+                {
+                    if(framingCtx->aFramingCtx != M4OSA_NULL)
+                    {
+                        if(pSettings->Effects[i].xVSS.pFramingBuffer == M4OSA_NULL)
+                        {
+                            if(framingCtx->aFramingCtx->FramingRgb != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                    FramingRgb->pac_data);
+                                framingCtx->aFramingCtx->FramingRgb->pac_data = M4OSA_NULL;
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->FramingRgb);
+                                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                            }
+                        }
+                        if(framingCtx->aFramingCtx->FramingYuv != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                FramingYuv[0].pac_data);
+                            framingCtx->aFramingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
+                           M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                FramingYuv[1].pac_data);
+                            framingCtx->aFramingCtx->FramingYuv[1].pac_data = M4OSA_NULL;
+                           M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                FramingYuv[2].pac_data);
+                            framingCtx->aFramingCtx->FramingYuv[2].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->FramingYuv);
+                            framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                        }
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx);
+                        framingCtx->aFramingCtx = M4OSA_NULL;
+                    }
+                    if(framingCtx->aFramingCtx_last != M4OSA_NULL)
+                    {
+                        if(pSettings->Effects[i].xVSS.pFramingBuffer == M4OSA_NULL)
+                        {
+                            if(framingCtx->aFramingCtx_last->FramingRgb != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->\
+                                    FramingRgb->pac_data);
+                                framingCtx->aFramingCtx_last->FramingRgb->pac_data = M4OSA_NULL;
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->\
+                                    FramingRgb);
+                                framingCtx->aFramingCtx_last->FramingRgb = M4OSA_NULL;
+                            }
+                        }
+                        if(framingCtx->aFramingCtx_last->FramingYuv != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->\
+                                FramingYuv[0].pac_data);
+                            framingCtx->aFramingCtx_last->FramingYuv[0].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->FramingYuv);
+                            framingCtx->aFramingCtx_last->FramingYuv = M4OSA_NULL;
+                        }
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last);
+                        framingCtx->aFramingCtx_last = M4OSA_NULL;
+                    }
+                    if(framingCtx->pEffectFilePath != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->pEffectFilePath);
+                        framingCtx->pEffectFilePath = M4OSA_NULL;
+                    }
+                    /*In case there are still allocated*/
+                    if(framingCtx->pSPSContext != M4OSA_NULL)
+                    {
+                    //    M4SPS_destroy(framingCtx->pSPSContext);
+                        framingCtx->pSPSContext = M4OSA_NULL;
+#if 0
+                        if(framingCtx->inputStream.data_buffer  != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->inputStream.data_buffer);
+                            framingCtx->inputStream.data_buffer = M4OSA_NULL;
+                        }
+#endif
+                    }
+                    /*Alpha blending structure*/
+                    if(framingCtx->alphaBlendingStruct  != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->alphaBlendingStruct);
+                        framingCtx->alphaBlendingStruct = M4OSA_NULL;
+                    }
+
+                    M4OSA_free((M4OSA_MemAddr32)framingCtx);
+                    framingCtx = M4OSA_NULL;
+                }
+#else
+                do
+                {
+                    if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non
+                    existant pointer */
+                    {
+                        if(pSettings->Effects[i].xVSS.pFramingBuffer == M4OSA_NULL)
+                        {
+                            if(framingCtx->FramingRgb != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingRgb->pac_data);
+                                framingCtx->FramingRgb->pac_data = M4OSA_NULL;
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingRgb);
+                                framingCtx->FramingRgb = M4OSA_NULL;
+                            }
+                        }
+                        if(framingCtx->FramingYuv != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingYuv[0].pac_data);
+                            framingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingYuv);
+                            framingCtx->FramingYuv = M4OSA_NULL;
+                        }
+                        framingCtx_save = framingCtx->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx);
+                        framingCtx = M4OSA_NULL;
+                        framingCtx = framingCtx_save;
+                    }
+                    else
+                    {
+                        /*FB: bug fix P4ME00003002*/
+                        break;
+                    }
+                } while(framingCtx_first != framingCtx);
+#endif
+            }
+            else if( M4xVSS_kVideoEffectType_Fifties == pSettings->Effects[i].VideoEffectType)
+            {
+                /* Free Fifties context */
+                M4xVSS_FiftiesStruct* FiftiesCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+
+                if(FiftiesCtx != M4OSA_NULL)
+                {
+                    M4OSA_free((M4OSA_MemAddr32)FiftiesCtx);
+                    FiftiesCtx = M4OSA_NULL;
+                }
+
+            }
+            else if( M4xVSS_kVideoEffectType_ColorRGB16 == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_BlackAndWhite == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Pink == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Green == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Sepia == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Negative== pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Gradient== pSettings->Effects[i].VideoEffectType)
+            {
+                /* Free Color context */
+                M4xVSS_ColorStruct* ColorCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+
+                if(ColorCtx != M4OSA_NULL)
+                {
+                    M4OSA_free((M4OSA_MemAddr32)ColorCtx);
+                    ColorCtx = M4OSA_NULL;
+                }
+            }
+
+            /* Free simple fields */
+            if(pSettings->Effects[i].xVSS.pFramingFilePath != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pSettings->Effects[i].xVSS.pFramingFilePath);
+                pSettings->Effects[i].xVSS.pFramingFilePath = M4OSA_NULL;
+            }
+            if(pSettings->Effects[i].xVSS.pFramingBuffer != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pSettings->Effects[i].xVSS.pFramingBuffer);
+                pSettings->Effects[i].xVSS.pFramingBuffer = M4OSA_NULL;
+            }
+            if(pSettings->Effects[i].xVSS.pTextBuffer != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pSettings->Effects[i].xVSS.pTextBuffer);
+                pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
+            }
+        }
+        M4OSA_free((M4OSA_MemAddr32)pSettings->Effects);
+        pSettings->Effects = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_freeCommand(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+//    M4OSA_UInt8 i,j;
+
+    /* Free "local" BGM settings */
+    if(xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL)
+    {
+        if(xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+        {
+            M4OSA_free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
+            xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+        }
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack);
+        xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+    }
+#if 0
+    /* Parse transitions to free internal "alpha magic" settings structure */
+    /**
+     * In case there is twice or more the same Alpha Magic effect, the effect context
+     * may be freed twice or more.
+     * So, we parse all remaining transition settings to know if the context can be
+     * "re-freed", and if yes, we put its context to NULL to avoid freeing it again */
+    for(i=0; i<xVSS_context->pSettings->uiClipNumber-1; i++)
+    {
+        if(xVSS_context->pSettings->pTransitionList[i] != M4OSA_NULL)
+        {
+            switch (xVSS_context->pSettings->pTransitionList[i]->VideoTransitionType)
+            {
+                case M4xVSS_kVideoTransitionType_AlphaMagic:
+                    /**
+                     * In case of Alpha Magic transition, some extra parameters need to be freed */
+                    if(xVSS_context->pSettings->pTransitionList[i]->\
+                        pExtVideoTransitionFctCtxt != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                            xVSS_context->pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt)->pPlane->pac_data));
+                        ((M4xVSS_internal_AlphaMagicSettings*)xVSS_context->\
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
+                                pPlane->pac_data = M4OSA_NULL;
+
+                        M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                            xVSS_context->pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt)->pPlane));
+                        ((M4xVSS_internal_AlphaMagicSettings*)xVSS_context->\
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
+                                pPlane = M4OSA_NULL;
+
+                        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->\
+                            pTransitionList[i]->pExtVideoTransitionFctCtxt));
+                        xVSS_context->pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt
+                             = M4OSA_NULL;
+
+                        for(j=i+1;j<xVSS_context->pSettings->uiClipNumber-1;j++)
+                        {
+                            if(xVSS_context->pSettings->pTransitionList[j] != M4OSA_NULL)
+                            {
+                                if(xVSS_context->pSettings->pTransitionList[j]->\
+                                    VideoTransitionType == M4xVSS_kVideoTransitionType_AlphaMagic)
+                                {
+                                    M4OSA_UInt32 pCmpResult=0;
+                                    M4OSA_chrCompare(xVSS_context->pSettings->pTransitionList[i]->\
+                                        xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                            pAlphaFilePath,
+                                        xVSS_context->pSettings->pTransitionList[j]->\
+                                            xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                                pAlphaFilePath, &pCmpResult);
+                                    if(pCmpResult == 0)
+                                        {
+                                        /* Free extra internal alpha magic structure and put it
+                                         to NULL to avoid refreeing it */
+                                        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->\
+                                            pTransitionList[j]->pExtVideoTransitionFctCtxt));
+                                        xVSS_context->pSettings->pTransitionList[j]->\
+                                            pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                                    }
+                                }
+                            }
+                        }
+                    }
+                break;
+
+                case M4xVSS_kVideoTransitionType_SlideTransition:
+                    if(xVSS_context->pSettings->pTransitionList[i]->\
+                        pExtVideoTransitionFctCtxt != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->\
+                            pTransitionList[i]->pExtVideoTransitionFctCtxt));
+                        xVSS_context->pSettings->pTransitionList[i]->\
+                            pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                    }
+                break;
+            }
+        }
+    }
+#endif
+
+    M4xVSS_freeSettings(xVSS_context->pSettings);
+
+    if(xVSS_context->pPTo3GPPparamsList != M4OSA_NULL)
+    {
+        M4xVSS_Pto3GPP_params* pParams = xVSS_context->pPTo3GPPparamsList;
+        M4xVSS_Pto3GPP_params* pParams_sauv;
+
+        while(pParams != M4OSA_NULL)
+        {
+            if(pParams->pFileIn != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                pParams->pFileIn = M4OSA_NULL;
+            }
+            if(pParams->pFileOut != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+                M4OSA_fileExtraDelete(pParams->pFileOut);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                pParams->pFileOut = M4OSA_NULL;
+            }
+            if(pParams->pFileTemp != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                M4OSA_fileExtraDelete(pParams->pFileTemp);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+                pParams->pFileTemp = M4OSA_NULL;
+            }
+            pParams_sauv = pParams;
+            pParams = pParams->pNext;
+            M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+            pParams_sauv = M4OSA_NULL;
+        }
+    }
+
+    if(xVSS_context->pMCSparamsList != M4OSA_NULL)
+    {
+        M4xVSS_MCS_params* pParams = xVSS_context->pMCSparamsList;
+        M4xVSS_MCS_params* pParams_sauv;
+
+        while(pParams != M4OSA_NULL)
+        {
+            if(pParams->pFileIn != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                pParams->pFileIn = M4OSA_NULL;
+            }
+            if(pParams->pFileOut != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+                M4OSA_fileExtraDelete(pParams->pFileOut);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                pParams->pFileOut = M4OSA_NULL;
+            }
+            if(pParams->pFileTemp != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                M4OSA_fileExtraDelete(pParams->pFileTemp);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+                pParams->pFileTemp = M4OSA_NULL;
+            }
+            pParams_sauv = pParams;
+            pParams = pParams->pNext;
+            M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+            pParams_sauv = M4OSA_NULL;
+        }
+    }
+
+    if(xVSS_context->pcmPreviewFile != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pcmPreviewFile);
+        xVSS_context->pcmPreviewFile = M4OSA_NULL;
+    }
+    if(xVSS_context->pSettings->pOutputFile != M4OSA_NULL
+        && xVSS_context->pOutputFile != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+        xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+
+    /* Reinit all context variables */
+    xVSS_context->previousClipNumber = 0;
+    xVSS_context->editingStep = M4xVSS_kMicroStateEditing;
+    xVSS_context->analyseStep = M4xVSS_kMicroStateAnalysePto3GPP;
+    xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+    xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
+    xVSS_context->pMCSparamsList = M4OSA_NULL;
+    xVSS_context->pMCScurrentParams = M4OSA_NULL;
+    xVSS_context->tempFileIndex = 0;
+    xVSS_context->targetedTimescale = 0;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext,
+ *                                    M4OSA_Char* pFile,
+ *                                    M4VIDEOEDITING_ClipProperties *pFileProperties)
+ *
+ * @brief    This function retrieve properties of an input 3GP file using MCS
+ * @note
+ * @param    pContext        (IN) The integrator own context
+ * @param    pFile            (IN) 3GP file to analyse
+ * @param    pFileProperties    (IN/OUT) Pointer on a structure that will contain
+ *                            the 3GP file properties
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext, M4OSA_Char* pFile,
+                                       M4VIDEOEDITING_ClipProperties *pFileProperties)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4MCS_Context mcs_context;
+
+    err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_init: 0x%x", err);
+        return err;
+    }
+
+    /*open the MCS in the "normal opening" mode to retrieve the exact duration*/
+    err = M4MCS_open_normalMode(mcs_context, pFile, M4VIDEOEDITING_kFileType_3GPP,
+        M4OSA_NULL, M4OSA_NULL);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_open: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    err = M4MCS_getInputFileProperties(mcs_context, pFileProperties);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_getInputFileProperties: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    err = M4MCS_abort(mcs_context);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_abort: 0x%x", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+ *                                                M4OSA_UInt32* pTargetedTimeScale)
+ *
+ * @brief    This function retrieve targeted time scale
+ * @note
+ * @param    pContext            (IN)    The integrator own context
+ * @param    pTargetedTimeScale    (OUT)    Targeted time scale
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+                                                 M4VSS3GPP_EditSettings* pSettings,
+                                                  M4OSA_UInt32* pTargetedTimeScale)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4OSA_UInt32 totalDuration = 0;
+    M4OSA_UInt8 i = 0;
+    M4OSA_UInt32 tempTimeScale = 0, tempDuration = 0;
+
+    for(i=0;i<pSettings->uiClipNumber;i++)
+    {
+        /*search timescale only in mpeg4 case*/
+        if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP
+            || pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_MP4)
+        {
+            M4VIDEOEDITING_ClipProperties fileProperties;
+
+            /*UTF conversion support*/
+            M4OSA_Char* pDecodedPath = M4OSA_NULL;
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = pSettings->pClipList[i]->pFile;
+
+            if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+                && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+            {
+                M4OSA_UInt32 length = 0;
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                     (M4OSA_Void*) pSettings->pClipList[i]->pFile,
+                        (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                             &length);
+                if(err != M4NO_ERROR)
+                {
+                    M4OSA_TRACE1_1("M4xVSS_Init:\
+                         M4xVSS_internalConvertToUTF8 returns err: 0x%x",err);
+                    return err;
+                }
+                pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /*End of the conversion: use the decoded path*/
+            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath, &fileProperties);
+
+            /*get input file properties*/
+            /*err = M4xVSS_internalGetProperties(xVSS_context, pSettings->\
+                pClipList[i]->pFile, &fileProperties);*/
+            if(M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGetTargetedTimeScale:\
+                     M4xVSS_internalGetProperties returned: 0x%x", err);
+                return err;
+            }
+            if(fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+            {
+                if(pSettings->pClipList[i]->uiEndCutTime > 0)
+                {
+                    if(tempDuration < (pSettings->pClipList[i]->uiEndCutTime \
+                        - pSettings->pClipList[i]->uiBeginCutTime))
+                    {
+                        tempTimeScale = fileProperties.uiVideoTimeScale;
+                        tempDuration = (pSettings->pClipList[i]->uiEndCutTime\
+                             - pSettings->pClipList[i]->uiBeginCutTime);
+                    }
+                }
+                else
+                {
+                    if(tempDuration < (fileProperties.uiClipDuration\
+                         - pSettings->pClipList[i]->uiBeginCutTime))
+                    {
+                        tempTimeScale = fileProperties.uiVideoTimeScale;
+                        tempDuration = (fileProperties.uiClipDuration\
+                             - pSettings->pClipList[i]->uiBeginCutTime);
+                    }
+                }
+            }
+        }
+        if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_ARGB8888)
+        {
+            /*the timescale is 30 for PTO3GP*/
+            *pTargetedTimeScale = 30;
+            return M4NO_ERROR;
+
+        }
+    }
+
+    if(tempTimeScale >= 30)/*Define a minimum time scale, otherwise if the timescale is not
+    enough, there will be an infinite loop in the shell encoder*/
+    {
+        *pTargetedTimeScale = tempTimeScale;
+    }
+    else
+    {
+        *pTargetedTimeScale = 30;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+                                             M4VIFI_ImagePlane *PlaneIn,
+                                             M4VIFI_ImagePlane *PlaneOut,
+                                             M4VSS3GPP_ExternalProgress *pProgress,
+                                             M4OSA_UInt32 uiEffectKind)
+{
+    M4VIFI_Int32 plane_number;
+    M4VIFI_UInt32 i,j;
+    M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
+    M4xVSS_ColorStruct* ColorContext = (M4xVSS_ColorStruct*)pFunctionContext;
+
+    for (plane_number = 0; plane_number < 3; plane_number++)
+    {
+        p_buf_src = &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
+        p_buf_dest = &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
+        for (i = 0; i < PlaneOut[plane_number].u_height; i++)
+        {
+            /**
+             * Chrominance */
+            if(plane_number==1 || plane_number==2)
+            {
+                //switch ((M4OSA_UInt32)pFunctionContext)
+                // commented because a structure for the effects context exist
+                switch (ColorContext->colorEffectType)
+                {
+                    case M4xVSS_kVideoEffectType_BlackAndWhite:
+                        M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                         PlaneIn[plane_number].u_width, 128);
+                        break;
+                    case M4xVSS_kVideoEffectType_Pink:
+                        M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                         PlaneIn[plane_number].u_width, 255);
+                        break;
+                    case M4xVSS_kVideoEffectType_Green:
+                        M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                         PlaneIn[plane_number].u_width, 0);
+                        break;
+                    case M4xVSS_kVideoEffectType_Sepia:
+                        if(plane_number==1)
+                        {
+                            M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                             PlaneIn[plane_number].u_width, 117);
+                        }
+                        else
+                        {
+                            M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                             PlaneIn[plane_number].u_width, 139);
+                        }
+                        break;
+                    case M4xVSS_kVideoEffectType_Negative:
+                        M4OSA_memcpy((M4OSA_MemAddr8)p_buf_dest,
+                         (M4OSA_MemAddr8)p_buf_src ,PlaneOut[plane_number].u_width);
+                        break;
+
+                    case M4xVSS_kVideoEffectType_ColorRGB16:
+                        {
+                            M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+                            /*first get the r, g, b*/
+                            b = (ColorContext->rgb16ColorData &  0x001f);
+                            g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
+                            r = (ColorContext->rgb16ColorData &  0xf800)>>11;
+
+                            /*keep y, but replace u and v*/
+                            if(plane_number==1)
+                            {
+                                /*then convert to u*/
+                                u = U16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)u);
+                            }
+                            if(plane_number==2)
+                            {
+                                /*then convert to v*/
+                                v = V16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)v);
+                            }
+                        }
+                        break;
+                    case M4xVSS_kVideoEffectType_Gradient:
+                        {
+                            M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+                            /*first get the r, g, b*/
+                            b = (ColorContext->rgb16ColorData &  0x001f);
+                            g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
+                            r = (ColorContext->rgb16ColorData &  0xf800)>>11;
+
+                            /*for color gradation*/
+                            b = (M4OSA_UInt16)( b - ((b*i)/PlaneIn[plane_number].u_height));
+                            g = (M4OSA_UInt16)(g - ((g*i)/PlaneIn[plane_number].u_height));
+                            r = (M4OSA_UInt16)(r - ((r*i)/PlaneIn[plane_number].u_height));
+
+                            /*keep y, but replace u and v*/
+                            if(plane_number==1)
+                            {
+                                /*then convert to u*/
+                                u = U16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)u);
+                            }
+                            if(plane_number==2)
+                            {
+                                /*then convert to v*/
+                                v = V16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)v);
+                            }
+                        }
+                        break;
+                        default:
+                        break;
+                }
+            }
+            /**
+             * Luminance */
+            else
+            {
+                //switch ((M4OSA_UInt32)pFunctionContext)
+                // commented because a structure for the effects context exist
+                switch (ColorContext->colorEffectType)
+                {
+                case M4xVSS_kVideoEffectType_Negative:
+                    for(j=0;j<PlaneOut[plane_number].u_width;j++)
+                    {
+                            p_buf_dest[j] = 255 - p_buf_src[j];
+                    }
+                    break;
+                default:
+                    M4OSA_memcpy((M4OSA_MemAddr8)p_buf_dest,
+                     (M4OSA_MemAddr8)p_buf_src ,PlaneOut[plane_number].u_width);
+                    break;
+                }
+            }
+            p_buf_src += PlaneIn[plane_number].u_stride;
+            p_buf_dest += PlaneOut[plane_number].u_stride;
+        }
+    }
+
+    return M4VIFI_OK;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function add a fixed or animated image on an input YUV420 planar frame
+ * @note
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming( M4OSA_Void *userData,
+                                                M4VIFI_ImagePlane PlaneIn[3],
+                                                M4VIFI_ImagePlane *PlaneOut,
+                                                M4VSS3GPP_ExternalProgress *pProgress,
+                                                M4OSA_UInt32 uiEffectKind )
+{
+    M4VIFI_UInt32 x,y;
+
+    M4VIFI_UInt8 *p_in_Y = PlaneIn[0].pac_data;
+    M4VIFI_UInt8 *p_in_U = PlaneIn[1].pac_data;
+    M4VIFI_UInt8 *p_in_V = PlaneIn[2].pac_data;
+
+    M4xVSS_FramingStruct* Framing = M4OSA_NULL;
+    M4xVSS_FramingStruct* currentFraming = M4OSA_NULL;
+    M4VIFI_UInt8 *FramingRGB = M4OSA_NULL;
+
+    M4VIFI_UInt8 *p_out0;
+    M4VIFI_UInt8 *p_out1;
+    M4VIFI_UInt8 *p_out2;
+
+    M4VIFI_UInt32 topleft[2];
+
+    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+
+#ifndef DECODE_GIF_ON_SAVING
+    Framing = (M4xVSS_FramingStruct *)userData;
+    currentFraming = (M4xVSS_FramingStruct *)Framing->pCurrent;
+    FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+    /*FB*/
+#ifdef DECODE_GIF_ON_SAVING
+    M4OSA_ERR err;
+    Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+#if 0
+    if(Framing == M4OSA_NULL)
+    {
+        ((M4xVSS_FramingContext*)userData)->clipTime = pProgress->uiOutputTime;
+        err = M4xVSS_internalDecodeGIF(userData);
+        if(M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming:\
+             Error in M4xVSS_internalDecodeGIF: 0x%x", err);
+            return err;
+        }
+        Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+        /* Initializes first GIF time */
+        ((M4xVSS_FramingContext*)userData)->current_gif_time = pProgress->uiOutputTime;
+    }
+#endif
+    currentFraming = (M4xVSS_FramingStruct *)Framing;
+    FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+    /*end FB*/
+
+    /**
+     * Initialize input / output plane pointers */
+    p_in_Y += PlaneIn[0].u_topleft;
+    p_in_U += PlaneIn[1].u_topleft;
+    p_in_V += PlaneIn[2].u_topleft;
+
+    p_out0 = PlaneOut[0].pac_data;
+    p_out1 = PlaneOut[1].pac_data;
+    p_out2 = PlaneOut[2].pac_data;
+
+    /**
+     * Depending on time, initialize Framing frame to use */
+    if(Framing->previousClipTime == -1)
+    {
+        Framing->previousClipTime = pProgress->uiOutputTime;
+    }
+
+    /**
+     * If the current clip time has reach the duration of one frame of the framing picture
+     * we need to step to next framing picture */
+#if 0
+    if(((M4xVSS_FramingContext*)userData)->b_animated == M4OSA_TRUE)
+    {
+        while((((M4xVSS_FramingContext*)userData)->current_gif_time + currentFraming->duration)\
+         < pProgress->uiOutputTime)
+        {
+#ifdef DECODE_GIF_ON_SAVING
+            ((M4xVSS_FramingContext*)userData)->clipTime = pProgress->uiOutputTime;
+            err = M4xVSS_internalDecodeGIF(userData);
+            if(M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming:\
+                 Error in M4xVSS_internalDecodeGIF: 0x%x", err);
+                return err;
+            }
+            if(currentFraming->duration != 0)
+            {
+                ((M4xVSS_FramingContext*)userData)->current_gif_time += currentFraming->duration;
+            }
+            else
+            {
+                ((M4xVSS_FramingContext*)userData)->current_gif_time \
+                 += pProgress->uiOutputTime - Framing->previousClipTime;
+            }
+            Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+            currentFraming = (M4xVSS_FramingStruct *)Framing;
+            FramingRGB = Framing->FramingRgb->pac_data;
+#else
+            Framing->pCurrent = currentFraming->pNext;
+            currentFraming = Framing->pCurrent;
+#endif /*DECODE_GIF_ON_SAVING*/
+        }
+    }
+#endif
+
+    Framing->previousClipTime = pProgress->uiOutputTime;
+    FramingRGB = currentFraming->FramingRgb->pac_data;
+    topleft[0] = currentFraming->topleft_x;
+    topleft[1] = currentFraming->topleft_y;
+
+    for( x=0 ;x < PlaneIn[0].u_height ; x++)
+    {
+        for( y=0 ;y < PlaneIn[0].u_width ; y++)
+        {
+            /**
+             * To handle framing with input size != output size
+             * Framing is applyed if coordinates matches between framing/topleft and input plane */
+            if( y < (topleft[0] + currentFraming->FramingYuv[0].u_width)  &&
+                y >= topleft[0] &&
+                x < (topleft[1] + currentFraming->FramingYuv[0].u_height) &&
+                x >= topleft[1])
+            {
+                /*Alpha blending support*/
+                M4OSA_Float alphaBlending = 1;
+                M4xVSS_internalEffectsAlphaBlending*  alphaBlendingStruct =\
+                 (M4xVSS_internalEffectsAlphaBlending*)\
+                    ((M4xVSS_FramingContext*)userData)->alphaBlendingStruct;
+
+                if(alphaBlendingStruct != M4OSA_NULL)
+                {
+                    if(pProgress->uiProgress >= 0 && pProgress->uiProgress \
+                    < (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10))
+                    {
+                        alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle\
+                         - alphaBlendingStruct->m_start)\
+                            *pProgress->uiProgress/(alphaBlendingStruct->m_fadeInTime*10));
+                        alphaBlending += alphaBlendingStruct->m_start;
+                        alphaBlending /= 100;
+                    }
+                    else if(pProgress->uiProgress >= (M4OSA_UInt32)(alphaBlendingStruct->\
+                    m_fadeInTime*10) && pProgress->uiProgress < 1000\
+                     - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
+                    {
+                        alphaBlending = (M4OSA_Float)\
+                        ((M4OSA_Float)alphaBlendingStruct->m_middle/100);
+                    }
+                    else if(pProgress->uiProgress >= 1000 - (M4OSA_UInt32)\
+                    (alphaBlendingStruct->m_fadeOutTime*10))
+                    {
+                        alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle \
+                        - alphaBlendingStruct->m_end))*(1000 - pProgress->uiProgress)\
+                        /(alphaBlendingStruct->m_fadeOutTime*10);
+                        alphaBlending += alphaBlendingStruct->m_end;
+                        alphaBlending /= 100;
+                    }
+                }
+                /**/
+
+                if((*(FramingRGB)==transparent1) && (*(FramingRGB+1)==transparent2))
+                {
+                    *( p_out0+y+x*PlaneOut[0].u_stride)=(*(p_in_Y+y+x*PlaneIn[0].u_stride));
+                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+                        (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride));
+                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+                        (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride));
+                }
+                else
+                {
+                    *( p_out0+y+x*PlaneOut[0].u_stride)=
+                        (*(currentFraming->FramingYuv[0].pac_data+(y-topleft[0])\
+                            +(x-topleft[1])*currentFraming->FramingYuv[0].u_stride))*alphaBlending;
+                    *( p_out0+y+x*PlaneOut[0].u_stride)+=
+                        (*(p_in_Y+y+x*PlaneIn[0].u_stride))*(1-alphaBlending);
+                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+                        (*(currentFraming->FramingYuv[1].pac_data+((y-topleft[0])>>1)\
+                            +((x-topleft[1])>>1)*currentFraming->FramingYuv[1].u_stride))\
+                                *alphaBlending;
+                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)+=
+                        (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride))*(1-alphaBlending);
+                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+                        (*(currentFraming->FramingYuv[2].pac_data+((y-topleft[0])>>1)\
+                            +((x-topleft[1])>>1)*currentFraming->FramingYuv[2].u_stride))\
+                                *alphaBlending;
+                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)+=
+                        (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride))*(1-alphaBlending);
+                }
+                if( PlaneIn[0].u_width < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
+                    y == PlaneIn[0].u_width-1)
+                {
+                    FramingRGB = FramingRGB + 2 \
+                        * (topleft[0] + currentFraming->FramingYuv[0].u_width \
+                            - PlaneIn[0].u_width + 1);
+                }
+                else
+                {
+                    FramingRGB = FramingRGB + 2;
+                }
+            }
+            /**
+             * Just copy input plane to output plane */
+            else
+            {
+                *( p_out0+y+x*PlaneOut[0].u_stride)=*(p_in_Y+y+x*PlaneIn[0].u_stride);
+                *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+                    *(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride);
+                *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+                    *(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride);
+            }
+        }
+    }
+
+#ifdef DECODE_GIF_ON_SAVING
+#if 0
+    if(pProgress->bIsLast == M4OSA_TRUE
+        && (M4OSA_Bool)((M4xVSS_FramingContext*)userData)->b_IsFileGif == M4OSA_TRUE)
+    {
+        M4xVSS_internalDecodeGIF_Cleaning((M4xVSS_FramingContext*)userData);
+    }
+#endif
+#endif /*DECODE_GIF_ON_SAVING*/
+
+    return M4VIFI_OK;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function make a video look as if it was taken in the fifties
+ * @note
+ * @param    pUserData       (IN) Context
+ * @param    pPlaneIn        (IN) Input YUV420 planar
+ * @param    pPlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:            No error
+ * @return  M4ERR_PARAMETER:    pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties( M4OSA_Void *pUserData,
+                                                M4VIFI_ImagePlane *pPlaneIn,
+                                                M4VIFI_ImagePlane *pPlaneOut,
+                                                M4VSS3GPP_ExternalProgress *pProgress,
+                                                M4OSA_UInt32 uiEffectKind )
+{
+    M4VIFI_UInt32 x, y, xShift;
+    M4VIFI_UInt8 *pInY = pPlaneIn[0].pac_data;
+    M4VIFI_UInt8 *pOutY, *pInYbegin;
+    M4VIFI_UInt8 *pInCr,* pOutCr;
+    M4VIFI_Int32 plane_number;
+
+    /* Internal context*/
+    M4xVSS_FiftiesStruct* p_FiftiesData = (M4xVSS_FiftiesStruct *)pUserData;
+
+    /* Check the inputs (debug only) */
+    M4OSA_DEBUG_IF2((pFiftiesData == M4OSA_NULL),M4ERR_PARAMETER,
+         "xVSS: p_FiftiesData is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+    M4OSA_DEBUG_IF2((pPlaneOut == M4OSA_NULL),M4ERR_PARAMETER,
+         "xVSS: p_PlaneOut is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+    M4OSA_DEBUG_IF2((pProgress == M4OSA_NULL),M4ERR_PARAMETER,
+        "xVSS: p_Progress is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+
+    /* Initialize input / output plane pointers */
+    pInY += pPlaneIn[0].u_topleft;
+    pOutY = pPlaneOut[0].pac_data;
+    pInYbegin  = pInY;
+
+    /* Initialize the random */
+    if(p_FiftiesData->previousClipTime < 0)
+    {
+        M4OSA_randInit();
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+    }
+
+    /* Choose random values if we have reached the duration of a partial effect */
+    else if( (pProgress->uiOutputTime - p_FiftiesData->previousClipTime)\
+         > p_FiftiesData->fiftiesEffectDuration)
+    {
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+    }
+
+    /* Put in Sepia the chrominance */
+    for (plane_number = 1; plane_number < 3; plane_number++)
+    {
+        pInCr  = pPlaneIn[plane_number].pac_data  + pPlaneIn[plane_number].u_topleft;
+        pOutCr = pPlaneOut[plane_number].pac_data + pPlaneOut[plane_number].u_topleft;
+
+        for (x = 0; x < pPlaneOut[plane_number].u_height; x++)
+        {
+            if (1 == plane_number)
+                M4OSA_memset((M4OSA_MemAddr8)pOutCr, pPlaneIn[plane_number].u_width,
+                     117); /* U value */
+            else
+                M4OSA_memset((M4OSA_MemAddr8)pOutCr, pPlaneIn[plane_number].u_width,
+                     139); /* V value */
+
+            pInCr  += pPlaneIn[plane_number].u_stride;
+            pOutCr += pPlaneOut[plane_number].u_stride;
+        }
+    }
+
+    /* Compute the new pixels values */
+    for( x = 0 ; x < pPlaneIn[0].u_height ; x++)
+    {
+        M4VIFI_UInt8 *p_outYtmp, *p_inYtmp;
+
+        /* Compute the xShift (random value) */
+        if (0 == (p_FiftiesData->shiftRandomValue % 5 ))
+            xShift = (x + p_FiftiesData->shiftRandomValue ) % (pPlaneIn[0].u_height - 1);
+        else
+            xShift = (x + (pPlaneIn[0].u_height - p_FiftiesData->shiftRandomValue) ) \
+                % (pPlaneIn[0].u_height - 1);
+
+        /* Initialize the pointers */
+        p_outYtmp = pOutY + 1;                                    /* yShift of 1 pixel */
+        p_inYtmp  = pInYbegin + (xShift * pPlaneIn[0].u_stride);  /* Apply the xShift */
+
+        for( y = 0 ; y < pPlaneIn[0].u_width ; y++)
+        {
+            /* Set Y value */
+            if (xShift > (pPlaneIn[0].u_height - 4))
+                *p_outYtmp = 40;        /* Add some horizontal black lines between the
+                                        two parts of the image */
+            else if ( y == p_FiftiesData->stripeRandomValue)
+                *p_outYtmp = 90;        /* Add a random vertical line for the bulk */
+            else
+                *p_outYtmp = *p_inYtmp;
+
+
+            /* Go to the next pixel */
+            p_outYtmp++;
+            p_inYtmp++;
+
+            /* Restart at the beginning of the line for the last pixel*/
+            if (y == (pPlaneIn[0].u_width - 2))
+                p_outYtmp = pOutY;
+        }
+
+        /* Go to the next line */
+        pOutY += pPlaneOut[0].u_stride;
+    }
+
+    return M4VIFI_OK;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom( )
+ * @brief    Zoom in/out video effect functions.
+ * @note    The external video function is used only if VideoEffectType is set to
+ * M4VSS3GPP_kVideoEffectType_ZoomIn or M4VSS3GPP_kVideoEffectType_ZoomOut.
+ *
+ * @param   pFunctionContext    (IN) The function context, previously set by the integrator
+ * @param    pInputPlanes        (IN) Input YUV420 image: pointer to an array of three valid
+ *                                    image planes (Y, U and V)
+ * @param    pOutputPlanes        (IN/OUT) Output (filtered) YUV420 image: pointer to an array of
+ *                                        three valid image planes (Y, U and V)
+ * @param    pProgress            (IN) Set of information about the video transition progress.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom(
+    M4OSA_Void *pFunctionContext,
+    M4VIFI_ImagePlane *pInputPlanes,
+    M4VIFI_ImagePlane *pOutputPlanes,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiEffectKind
+)
+{
+    M4OSA_UInt32 boxWidth;
+    M4OSA_UInt32 boxHeight;
+    M4OSA_UInt32 boxPosX;
+    M4OSA_UInt32 boxPosY;
+    M4OSA_UInt32 ratio = 0;
+    /*  * 1.189207 between ratio */
+    /* zoom between x1 and x16 */
+    M4OSA_UInt32 ratiotab[17] ={1024,1218,1448,1722,2048,2435,2896,3444,4096,4871,5793,\
+                                6889,8192,9742,11585,13777,16384};
+    M4OSA_UInt32 ik;
+
+    M4VIFI_ImagePlane boxPlane[3];
+
+    if(M4xVSS_kVideoEffectType_ZoomOut == (M4OSA_UInt32)pFunctionContext)
+    {
+        //ratio = 16 - (15 * pProgress->uiProgress)/1000;
+        ratio = 16 - pProgress->uiProgress / 66 ;
+    }
+    else if(M4xVSS_kVideoEffectType_ZoomIn == (M4OSA_UInt32)pFunctionContext)
+    {
+        //ratio = 1 + (15 * pProgress->uiProgress)/1000;
+        ratio = 1 + pProgress->uiProgress / 66 ;
+    }
+
+    for(ik=0;ik<3;ik++){
+
+        boxPlane[ik].u_stride = pInputPlanes[ik].u_stride;
+        boxPlane[ik].pac_data = pInputPlanes[ik].pac_data;
+
+        boxHeight = ( pInputPlanes[ik].u_height << 10 ) / ratiotab[ratio];
+        boxWidth = ( pInputPlanes[ik].u_width << 10 ) / ratiotab[ratio];
+        boxPlane[ik].u_height = (boxHeight)&(~1);
+        boxPlane[ik].u_width = (boxWidth)&(~1);
+
+        boxPosY = (pInputPlanes[ik].u_height >> 1) - (boxPlane[ik].u_height >> 1);
+        boxPosX = (pInputPlanes[ik].u_width >> 1) - (boxPlane[ik].u_width >> 1);
+        boxPlane[ik].u_topleft = boxPosY * boxPlane[ik].u_stride + boxPosX;
+    }
+
+    M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL, (M4VIFI_ImagePlane*)&boxPlane, pOutputPlanes);
+
+    /**
+     * Return */
+    return(M4NO_ERROR);
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_AlphaMagic( M4OSA_Void *userData,
+ *                                    M4VIFI_ImagePlane PlaneIn1[3],
+ *                                    M4VIFI_ImagePlane PlaneIn2[3],
+ *                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                    M4OSA_UInt32 uiTransitionKind)
+ *
+ * @brief    This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param    userData        (IN) Contains a pointer on a settings structure
+ * @param    PlaneIn1        (IN) Input YUV420 planar from video 1
+ * @param    PlaneIn2        (IN) Input YUV420 planar from video 2
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiTransitionKind(IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_AlphaMagic( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                             M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                             M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiTransitionKind)
+{
+
+    M4OSA_ERR err;
+
+    M4xVSS_internal_AlphaMagicSettings* alphaContext;
+    M4VIFI_Int32 alphaProgressLevel;
+
+    M4VIFI_ImagePlane* planeswap;
+    M4VIFI_UInt32 x,y;
+
+    M4VIFI_UInt8 *p_out0;
+    M4VIFI_UInt8 *p_out1;
+    M4VIFI_UInt8 *p_out2;
+    M4VIFI_UInt8 *alphaMask;
+    /* "Old image" */
+    M4VIFI_UInt8 *p_in1_Y;
+    M4VIFI_UInt8 *p_in1_U;
+    M4VIFI_UInt8 *p_in1_V;
+    /* "New image" */
+    M4VIFI_UInt8 *p_in2_Y;
+    M4VIFI_UInt8 *p_in2_U;
+    M4VIFI_UInt8 *p_in2_V;
+
+    err = M4NO_ERROR;
+
+    alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
+
+    alphaProgressLevel = (pProgress->uiProgress * 255)/1000;
+
+    if( alphaContext->isreverse != M4OSA_FALSE)
+    {
+        alphaProgressLevel = 255 - alphaProgressLevel;
+        planeswap = PlaneIn1;
+        PlaneIn1 = PlaneIn2;
+        PlaneIn2 = planeswap;
+    }
+
+    p_out0 = PlaneOut[0].pac_data;
+    p_out1 = PlaneOut[1].pac_data;
+    p_out2 = PlaneOut[2].pac_data;
+
+    alphaMask = alphaContext->pPlane->pac_data;
+
+    /* "Old image" */
+    p_in1_Y = PlaneIn1[0].pac_data;
+    p_in1_U = PlaneIn1[1].pac_data;
+    p_in1_V = PlaneIn1[2].pac_data;
+    /* "New image" */
+    p_in2_Y = PlaneIn2[0].pac_data;
+    p_in2_U = PlaneIn2[1].pac_data;
+    p_in2_V = PlaneIn2[2].pac_data;
+
+     /**
+     * For each column ... */
+    for( y=0; y<PlaneOut->u_height; y++ )
+    {
+        /**
+         * ... and each row of the alpha mask */
+        for( x=0; x<PlaneOut->u_width; x++ )
+        {
+            /**
+             * If the value of the current pixel of the alpha mask is > to the current time
+             * ( current time is normalized on [0-255] ) */
+            if( alphaProgressLevel < alphaMask[x+y*PlaneOut->u_width] )
+            {
+                /* We keep "old image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
+            }
+            else
+            {
+                /* We take "new image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
+            }
+        }
+    }
+
+    return(err);
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_AlphaMagicBlending( M4OSA_Void *userData,
+ *                                    M4VIFI_ImagePlane PlaneIn1[3],
+ *                                    M4VIFI_ImagePlane PlaneIn2[3],
+ *                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                    M4OSA_UInt32 uiTransitionKind)
+ *
+ * @brief    This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param    userData        (IN) Contains a pointer on a settings structure
+ * @param    PlaneIn1        (IN) Input YUV420 planar from video 1
+ * @param    PlaneIn2        (IN) Input YUV420 planar from video 2
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiTransitionKind(IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_AlphaMagicBlending( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                     M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                                     M4VSS3GPP_ExternalProgress *pProgress,
+                                     M4OSA_UInt32 uiTransitionKind)
+{
+    M4OSA_ERR err;
+
+    M4xVSS_internal_AlphaMagicSettings* alphaContext;
+    M4VIFI_Int32 alphaProgressLevel;
+    M4VIFI_Int32 alphaBlendLevelMin;
+    M4VIFI_Int32 alphaBlendLevelMax;
+    M4VIFI_Int32 alphaBlendRange;
+
+    M4VIFI_ImagePlane* planeswap;
+    M4VIFI_UInt32 x,y;
+    M4VIFI_Int32 alphaMaskValue;
+
+    M4VIFI_UInt8 *p_out0;
+    M4VIFI_UInt8 *p_out1;
+    M4VIFI_UInt8 *p_out2;
+    M4VIFI_UInt8 *alphaMask;
+    /* "Old image" */
+    M4VIFI_UInt8 *p_in1_Y;
+    M4VIFI_UInt8 *p_in1_U;
+    M4VIFI_UInt8 *p_in1_V;
+    /* "New image" */
+    M4VIFI_UInt8 *p_in2_Y;
+    M4VIFI_UInt8 *p_in2_U;
+    M4VIFI_UInt8 *p_in2_V;
+
+
+    err = M4NO_ERROR;
+
+    alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
+
+    alphaProgressLevel = (pProgress->uiProgress * 255)/1000;
+
+    if( alphaContext->isreverse != M4OSA_FALSE)
+    {
+        alphaProgressLevel = 255 - alphaProgressLevel;
+        planeswap = PlaneIn1;
+        PlaneIn1 = PlaneIn2;
+        PlaneIn2 = planeswap;
+    }
+
+    alphaBlendLevelMin = alphaProgressLevel-alphaContext->blendingthreshold;
+
+    alphaBlendLevelMax = alphaProgressLevel+alphaContext->blendingthreshold;
+
+    alphaBlendRange = (alphaContext->blendingthreshold)*2;
+
+    p_out0 = PlaneOut[0].pac_data;
+    p_out1 = PlaneOut[1].pac_data;
+    p_out2 = PlaneOut[2].pac_data;
+
+    alphaMask = alphaContext->pPlane->pac_data;
+
+    /* "Old image" */
+    p_in1_Y = PlaneIn1[0].pac_data;
+    p_in1_U = PlaneIn1[1].pac_data;
+    p_in1_V = PlaneIn1[2].pac_data;
+    /* "New image" */
+    p_in2_Y = PlaneIn2[0].pac_data;
+    p_in2_U = PlaneIn2[1].pac_data;
+    p_in2_V = PlaneIn2[2].pac_data;
+
+    /* apply Alpha Magic on each pixel */
+       for( y=0; y<PlaneOut->u_height; y++ )
+    {
+        for( x=0; x<PlaneOut->u_width; x++ )
+        {
+            alphaMaskValue = alphaMask[x+y*PlaneOut->u_width];
+            if( alphaBlendLevelMax < alphaMaskValue )
+            {
+                /* We keep "old image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
+            }
+            else if( (alphaBlendLevelMin < alphaMaskValue)&&
+                    (alphaMaskValue <= alphaBlendLevelMax ) )
+            {
+                /* We blend "old and new image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=(M4VIFI_UInt8)
+                    (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_Y+x+y*PlaneIn1[0].u_stride))
+                        +(alphaBlendLevelMax-alphaMaskValue)\
+                            *( *(p_in2_Y+x+y*PlaneIn2[0].u_stride)) )/alphaBlendRange );
+
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=(M4VIFI_UInt8)\
+                    (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_U+(x>>1)+(y>>1)\
+                        *PlaneIn1[1].u_stride))
+                            +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_U+(x>>1)+(y>>1)\
+                                *PlaneIn2[1].u_stride)) )/alphaBlendRange );
+
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    (M4VIFI_UInt8)(( (alphaMaskValue-alphaBlendLevelMin)\
+                        *( *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride))
+                                +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_V+(x>>1)+(y>>1)\
+                                    *PlaneIn2[2].u_stride)) )/alphaBlendRange );
+
+            }
+            else
+            {
+                /* We take "new image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
+            }
+        }
+    }
+
+    return(err);
+}
+
+#define M4XXX_SampleAddress(plane, x, y)  ( (plane).pac_data + (plane).u_topleft + (y)\
+     * (plane).u_stride + (x) )
+
+static void M4XXX_CopyPlane(M4VIFI_ImagePlane* dest, M4VIFI_ImagePlane* source)
+{
+    M4OSA_UInt32    height, width, sourceStride, destStride, y;
+    M4OSA_MemAddr8    sourceWalk, destWalk;
+
+    /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
+     recomputed from memory. */
+    height = dest->u_height;
+    width = dest->u_width;
+
+    sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*source, 0, 0);
+    sourceStride = source->u_stride;
+
+    destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*dest, 0, 0);
+    destStride = dest->u_stride;
+
+    for (y=0; y<height; y++)
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8)destWalk, (M4OSA_MemAddr8)sourceWalk, width);
+        destWalk += destStride;
+        sourceWalk += sourceStride;
+    }
+}
+
+static M4OSA_ERR M4xVSS_VerticalSlideTransition(M4VIFI_ImagePlane* topPlane,
+                                                M4VIFI_ImagePlane* bottomPlane,
+                                                M4VIFI_ImagePlane *PlaneOut,
+                                                M4OSA_UInt32    shiftUV)
+{
+    M4OSA_UInt32 i;
+
+    /* Do three loops, one for each plane type, in order to avoid having too many buffers
+    "hot" at the same time (better for cache). */
+    for (i=0; i<3; i++)
+    {
+        M4OSA_UInt32    topPartHeight, bottomPartHeight, width, sourceStride, destStride, y;
+        M4OSA_MemAddr8    sourceWalk, destWalk;
+
+        /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
+         recomputed from memory. */
+        if (0 == i) /* Y plane */
+        {
+            bottomPartHeight = 2*shiftUV;
+        }
+        else /* U and V planes */
+        {
+            bottomPartHeight = shiftUV;
+        }
+        topPartHeight = PlaneOut[i].u_height - bottomPartHeight;
+        width = PlaneOut[i].u_width;
+
+        sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(topPlane[i], 0, bottomPartHeight);
+        sourceStride = topPlane[i].u_stride;
+
+        destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
+        destStride = PlaneOut[i].u_stride;
+
+        /* First the part from the top source clip frame. */
+        for (y=0; y<topPartHeight; y++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalk, (M4OSA_MemAddr8)sourceWalk, width);
+            destWalk += destStride;
+            sourceWalk += sourceStride;
+        }
+
+        /* and now change the vars to copy the part from the bottom source clip frame. */
+        sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(bottomPlane[i], 0, 0);
+        sourceStride = bottomPlane[i].u_stride;
+
+        /* destWalk is already at M4XXX_SampleAddress(PlaneOut[i], 0, topPartHeight) */
+
+        for (y=0; y<bottomPartHeight; y++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalk, (M4OSA_MemAddr8)sourceWalk, width);
+            destWalk += destStride;
+            sourceWalk += sourceStride;
+        }
+    }
+    return M4NO_ERROR;
+}
+
+static M4OSA_ERR M4xVSS_HorizontalSlideTransition(M4VIFI_ImagePlane* leftPlane,
+                                                  M4VIFI_ImagePlane* rightPlane,
+                                                  M4VIFI_ImagePlane *PlaneOut,
+                                                  M4OSA_UInt32    shiftUV)
+{
+    M4OSA_UInt32 i, y;
+    /* If we shifted by exactly 0, or by the width of the target image, then we would get the left
+    frame or the right frame, respectively. These cases aren't handled too well by the general
+    handling, since they result in 0-size memcopies, so might as well particularize them. */
+
+    if (0 == shiftUV)    /* output left frame */
+    {
+        for (i = 0; i<3; i++) /* for each YUV plane */
+        {
+            M4XXX_CopyPlane(&(PlaneOut[i]), &(leftPlane[i]));
+        }
+
+        return M4NO_ERROR;
+    }
+
+    if (PlaneOut[1].u_width == shiftUV) /* output right frame */
+    {
+        for (i = 0; i<3; i++) /* for each YUV plane */
+        {
+            M4XXX_CopyPlane(&(PlaneOut[i]), &(rightPlane[i]));
+        }
+
+        return M4NO_ERROR;
+    }
+
+
+    /* Do three loops, one for each plane type, in order to avoid having too many buffers
+    "hot" at the same time (better for cache). */
+    for (i=0; i<3; i++)
+    {
+        M4OSA_UInt32    height, leftPartWidth, rightPartWidth;
+        M4OSA_UInt32    leftStride,    rightStride,    destStride;
+        M4OSA_MemAddr8    leftWalk,    rightWalk,    destWalkLeft, destWalkRight;
+
+        /* cache the vars used in the loop so as to avoid them being repeatedly fetched
+        and recomputed from memory. */
+        height = PlaneOut[i].u_height;
+
+        if (0 == i) /* Y plane */
+        {
+            rightPartWidth = 2*shiftUV;
+        }
+        else /* U and V planes */
+        {
+            rightPartWidth = shiftUV;
+        }
+        leftPartWidth = PlaneOut[i].u_width - rightPartWidth;
+
+        leftWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(leftPlane[i], rightPartWidth, 0);
+        leftStride = leftPlane[i].u_stride;
+
+        rightWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(rightPlane[i], 0, 0);
+        rightStride = rightPlane[i].u_stride;
+
+        destWalkLeft = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
+        destWalkRight = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], leftPartWidth, 0);
+        destStride = PlaneOut[i].u_stride;
+
+        for (y=0; y<height; y++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalkLeft, (M4OSA_MemAddr8)leftWalk, leftPartWidth);
+            leftWalk += leftStride;
+
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalkRight, (M4OSA_MemAddr8)rightWalk, rightPartWidth);
+            rightWalk += rightStride;
+
+            destWalkLeft += destStride;
+            destWalkRight += destStride;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+
+M4OSA_ERR M4xVSS_SlideTransition( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                  M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                                  M4VSS3GPP_ExternalProgress *pProgress,
+                                  M4OSA_UInt32 uiTransitionKind)
+{
+    M4xVSS_internal_SlideTransitionSettings* settings =
+         (M4xVSS_internal_SlideTransitionSettings*)userData;
+    M4OSA_UInt32    shiftUV;
+
+    M4OSA_TRACE1_0("inside M4xVSS_SlideTransition");
+    if ((M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
+        || (M4xVSS_SlideTransition_LeftOutRightIn == settings->direction) )
+    {
+        /* horizontal slide */
+        shiftUV = ((PlaneOut[1]).u_width * pProgress->uiProgress)/1000;
+        M4OSA_TRACE1_2("M4xVSS_SlideTransition upper: shiftUV = %d,progress = %d",
+            shiftUV,pProgress->uiProgress );
+        if (M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
+        {
+            /* Put the previous clip frame right, the next clip frame left, and reverse shiftUV
+            (since it's a shift from the left frame) so that we start out on the right
+            (i.e. not left) frame, it
+            being from the previous clip. */
+            return M4xVSS_HorizontalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
+                 (PlaneOut[1]).u_width - shiftUV);
+        }
+        else /* Left out, right in*/
+        {
+            return M4xVSS_HorizontalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
+        }
+    }
+    else
+    {
+        /* vertical slide */
+        shiftUV = ((PlaneOut[1]).u_height * pProgress->uiProgress)/1000;
+        M4OSA_TRACE1_2("M4xVSS_SlideTransition bottom: shiftUV = %d,progress = %d",shiftUV,
+            pProgress->uiProgress );
+        if (M4xVSS_SlideTransition_TopOutBottomIn == settings->direction)
+        {
+            /* Put the previous clip frame top, the next clip frame bottom. */
+            return M4xVSS_VerticalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
+        }
+        else /* Bottom out, top in */
+        {
+            return M4xVSS_VerticalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
+                (PlaneOut[1]).u_height - shiftUV);
+        }
+    }
+
+    /* Note: it might be worthwhile to do some parameter checking, see if dimensions match, etc.,
+    at least in debug mode. */
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_FadeBlackTransition(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function apply a fade to black and then a fade from black
+ * @note
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_FadeBlackTransition(M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                     M4VIFI_ImagePlane PlaneIn2[3],
+                                     M4VIFI_ImagePlane *PlaneOut,
+                                     M4VSS3GPP_ExternalProgress *pProgress,
+                                     M4OSA_UInt32 uiTransitionKind)
+{
+    M4OSA_Int32 tmp = 0;
+    M4OSA_ERR err = M4NO_ERROR;
+
+
+    if((pProgress->uiProgress) < 500)
+    {
+        /**
+         * Compute where we are in the effect (scale is 0->1024) */
+        tmp = (M4OSA_Int32)((1.0 - ((M4OSA_Float)(pProgress->uiProgress*2)/1000)) * 1024 );
+
+        /**
+         * Apply the darkening effect */
+        err = M4VFL_modifyLumaWithScale( (M4ViComImagePlane*)PlaneIn1,
+             (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition: M4VFL_modifyLumaWithScale returns\
+                 error 0x%x, returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
+            return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+        }
+    }
+    else
+    {
+        /**
+         * Compute where we are in the effect (scale is 0->1024). */
+        tmp = (M4OSA_Int32)( (((M4OSA_Float)(((pProgress->uiProgress-500)*2))/1000)) * 1024 );
+
+        /**
+         * Apply the darkening effect */
+        err = M4VFL_modifyLumaWithScale((M4ViComImagePlane*)PlaneIn2,
+             (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition:\
+                 M4VFL_modifyLumaWithScale returns error 0x%x,\
+                     returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
+            return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+        }
+    }
+
+
+    return M4VIFI_OK;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext,
+ *                                                        M4OSA_Void* pBufferIn,
+ *                                                        M4OSA_Void* pBufferOut,
+ *                                                        M4OSA_UInt32* convertedSize)
+ *
+ * @brief    This function convert from the customer format to UTF8
+ * @note
+ * @param    pContext        (IN)    The integrator own context
+ * @param    pBufferIn        (IN)    Buffer to convert
+ * @param    pBufferOut        (OUT)    Converted buffer
+ * @param    convertedSize    (OUT)    Size of the converted buffer
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+                                       M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    pBufferOut = pBufferIn;
+    if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
+
+        M4OSA_memset((M4OSA_MemAddr8)xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            ,(M4OSA_UInt32)xVSS_context->UTFConversionContext.m_TempOutConversionSize,0);
+
+        err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
+            (M4OSA_UInt8*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                 (M4OSA_UInt32*)&ConvertedSize);
+        if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
+        {
+            M4OSA_TRACE2_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+
+            /*free too small buffer*/
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer);
+
+            /*re-allocate the buffer*/
+            xVSS_context->UTFConversionContext.pTempOutConversionBuffer    =
+                 (M4OSA_Void*)M4OSA_malloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
+                     (M4OSA_Char *)"M4xVSS_internalConvertToUTF8: UTF conversion buffer");
+            if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertToUTF8");
+                return M4ERR_ALLOC;
+            }
+            xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
+
+            M4OSA_memset((M4OSA_MemAddr8)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer,(M4OSA_UInt32)xVSS_context->\
+                    UTFConversionContext.m_TempOutConversionSize,0);
+
+            err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
+                (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                    (M4OSA_UInt32*)&ConvertedSize);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+                return err;
+            }
+        }
+        else if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+            return err;
+        }
+        /*decoded path*/
+        pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        (*convertedSize) = ConvertedSize;
+    }
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext)
+ *
+ * @brief    This function convert from UTF8 to the customer format
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ * @param    pBufferIn        (IN)    Buffer to convert
+ * @param    pBufferOut        (OUT)    Converted buffer
+ * @param    convertedSize    (OUT)    Size of the converted buffer
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+                                        M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    pBufferOut = pBufferIn;
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
+
+        M4OSA_memset((M4OSA_MemAddr8)xVSS_context->\
+            UTFConversionContext.pTempOutConversionBuffer,(M4OSA_UInt32)xVSS_context->\
+                UTFConversionContext.m_TempOutConversionSize,0);
+
+        err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct\
+            ((M4OSA_Void*)pBufferIn,(M4OSA_UInt8*)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer, (M4OSA_UInt32*)&ConvertedSize);
+        if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
+        {
+            M4OSA_TRACE2_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+
+            /*free too small buffer*/
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer);
+
+            /*re-allocate the buffer*/
+            xVSS_context->UTFConversionContext.pTempOutConversionBuffer    =
+                (M4OSA_Void*)M4OSA_malloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
+                     (M4OSA_Char *)"M4xVSS_internalConvertFromUTF8: UTF conversion buffer");
+            if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertFromUTF8");
+                return M4ERR_ALLOC;
+            }
+            xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
+
+            M4OSA_memset((M4OSA_MemAddr8)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer,(M4OSA_UInt32)xVSS_context->\
+                    UTFConversionContext.m_TempOutConversionSize,0);
+
+            err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct((M4OSA_Void*)pBufferIn,
+                (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                     (M4OSA_UInt32*)&ConvertedSize);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+                return err;
+            }
+        }
+        else if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+            return err;
+        }
+        /*decoded path*/
+        pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        (*convertedSize) = ConvertedSize;
+    }
+
+
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/glvaudioresampler.c b/libvideoeditor/vss/src/glvaudioresampler.c
new file mode 100755
index 0000000..852329e
--- /dev/null
+++ b/libvideoeditor/vss/src/glvaudioresampler.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    glvaudioresampler.c
+ * @brief
+ * @note
+ ******************************************************************************
+ */
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h"        /**< OSAL memory management */
+#include "M4OSA_Debug.h"        /**< OSAL debug management */
+#include "M4OSA_CoreID.h"
+#include "gLVAudioResampler.h"
+
+
+static void resampleStereo16(int32_t* out, int16_t* input, long outFrameCount,
+                                LVAudioResampler *resampler) ;
+static void resampleMono16(int32_t* out, int16_t* input, long outFrameCount,
+                             LVAudioResampler *resampler) ;
+
+int32_t LVAudioResamplerCreate(int bitDepth, int inChannelCount,
+        int32_t sampleRate, int quality)
+{
+    int32_t context;
+    LVAudioResampler *resampler;
+
+    resampler = (LVAudioResampler *)M4OSA_malloc(sizeof(LVAudioResampler), M4VSS3GPP,
+         (M4OSA_Char *)"LVAudioResampler");
+    context = (int32_t)resampler;
+
+    if (quality == DEFAULT)
+        quality = LOW_QUALITY;
+
+
+    switch (quality) {
+    default:
+    case LOW_QUALITY:
+        resampler->mQuality = LOW_QUALITY;
+        LVResampler_LowQualityInit(bitDepth, inChannelCount, sampleRate, context);
+        break;
+    case MED_QUALITY:
+        resampler->mQuality = MED_QUALITY;
+        break;
+    case HIGH_QUALITY:
+        resampler->mQuality = HIGH_QUALITY;
+        break;
+    }
+
+    return (context);
+}
+
+static int32_t Interp(int32_t x0, int32_t x1, uint32_t f) {
+    int32_t t_datta;
+    t_datta = x0 + (((x1 - x0) * (int32_t)(f >> kPreInterpShift)) >> kNumInterpBits);
+    return t_datta;
+}
+static void Advance(long* index, uint32_t* frac, uint32_t inc) {
+    *frac += inc;
+    *index += (long)(*frac >> kNumPhaseBits);
+    *frac &= kPhaseMask;
+}
+
+void LVResampler_LowQualityInit(int bitDepth, int inChannelCount,
+        int32_t sampleRate, int32_t context )
+{
+    LVAudioResampler *resampler = (LVAudioResampler *) context;
+    resampler->mBitDepth = bitDepth;
+    resampler->mChannelCount = inChannelCount;
+    resampler->mSampleRate = sampleRate;
+    resampler->mInSampleRate = sampleRate;
+    resampler->mInputIndex = 0;
+    resampler->mPhaseFraction = 0;
+    // sanity check on format
+    if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2))
+    {
+        //LOGE("Unsupported sample format, %d bits, %d channels", bitDepth,
+        //  inChannelCount);
+        // LOG_ASSERT(0);
+    }
+    // initialize common members
+    resampler->mVolume[0] =
+        resampler->mVolume[1] = 0;
+    resampler->mBuffer.frameCount = 0;
+    // save format for quick lookup
+    if (inChannelCount == 1)
+    {
+        resampler->mFormat = 1;//MONO_16_BIT;
+    }
+    else
+    {
+        resampler->mFormat = 2;//STEREO_16_BIT;
+    }
+}
+
+void LVAudiosetSampleRate(int32_t context,int32_t inSampleRate)
+{
+    LVAudioResampler *resampler = (LVAudioResampler *)context;
+    long temp;
+    temp = kPhaseMultiplier;
+
+    resampler->mInSampleRate = inSampleRate;
+    resampler->mPhaseIncrement = (uint32_t)((temp / resampler->mSampleRate)* inSampleRate );
+}
+void LVAudiosetVolume(int32_t context, int16_t left, int16_t right)
+{
+    LVAudioResampler *resampler = (LVAudioResampler *)context;
+    // TODO: Implement anti-zipper filter
+    resampler->mVolume[0] = left;
+    resampler->mVolume[1] = right;
+}
+
+
+
+static  int16_t clamp16(int32_t sample)
+{
+    if ((sample>>15) ^ (sample>>31))
+        sample = 0x7FFF ^ (sample>>31);
+    return sample;
+}
+
+
+static void DitherAndClamp(int32_t* out, int32_t const *sums, long c)
+{
+    long i;
+        //ditherAndClamp((int32_t*)reSampledBuffer, pTmpBuffer, outBufferSize/2);
+    for ( i=0 ; i<c ; i++)
+    {
+        int32_t l = *sums++;
+        int32_t r = *sums++;
+        int32_t nl = l >> 12;
+        int32_t nr = r >> 12;
+        l = clamp16(nl);
+        r = clamp16(nr);
+        *out++ = (r<<16) | (l & 0xFFFF);
+    }
+
+}
+
+void LVAudioresample_LowQuality(int16_t* out,
+                                int16_t* input,
+                                long outFrameCount,
+                                int32_t context)
+{
+    LVAudioResampler *resampler = (LVAudioResampler *)context;
+
+    int32_t     *tempBuff = (int32_t *)M4OSA_malloc(
+                (outFrameCount * sizeof(int32_t) * 2),
+                M4VSS3GPP, (M4OSA_Char *)"tempBuff");
+
+    M4OSA_memset((M4OSA_MemAddr8)tempBuff,
+                (outFrameCount * sizeof(int32_t) * 2), 0);
+
+    switch (resampler->mChannelCount)
+    {
+        case 1:
+             resampleMono16(tempBuff, input, outFrameCount, resampler);
+            break;
+        case 2:
+            resampleStereo16(tempBuff, input, outFrameCount, resampler);
+            break;
+    }
+
+    // Dither and Clamp
+    DitherAndClamp((int32_t*)out, tempBuff, outFrameCount);
+
+    M4OSA_free((M4OSA_MemAddr32)tempBuff);
+}
+
+void resampleStereo16(int32_t* out, int16_t* input,long outFrameCount,
+                        LVAudioResampler *resampler)
+{
+
+    int32_t vl = resampler->mVolume[0];
+    int32_t vr = resampler->mVolume[1];
+
+    long inputIndex = resampler->mInputIndex;
+    uint32_t phaseFraction = resampler->mPhaseFraction;
+    uint32_t phaseIncrement = resampler->mPhaseIncrement;
+    long outputIndex = 0;
+
+
+    long outputSampleCount = outFrameCount * 2;
+    long inFrameCount = (outFrameCount* resampler->mInSampleRate)/resampler->mSampleRate;
+    int16_t *in;
+
+    resampler->mBuffer.i16 = input;
+
+    // LOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d\n",
+    //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
+
+    while (outputIndex < outputSampleCount)
+    {
+        resampler->mBuffer.frameCount = inFrameCount;
+        resampler->mX0L = 0;
+        resampler->mX0R = 0;
+        inputIndex = 0;
+
+        in = resampler->mBuffer.i16;
+
+        // handle boundary case
+        while (inputIndex == 0) {
+            // LOGE("boundary case\n");
+            out[outputIndex++] += vl * Interp(resampler->mX0L, in[0], phaseFraction);
+            out[outputIndex++] += vr * Interp(resampler->mX0R, in[1], phaseFraction);
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+            if (outputIndex == outputSampleCount)
+                break;
+        }
+
+        // process input samples
+        while (outputIndex < outputSampleCount && inputIndex < resampler->mBuffer.frameCount) {
+            out[outputIndex++] += vl * Interp(in[inputIndex*2-2],
+                    in[inputIndex*2], phaseFraction);
+            out[outputIndex++] += vr * Interp(in[inputIndex*2-1],
+                    in[inputIndex*2+1], phaseFraction);
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+        }
+
+        resampler->mX0L = resampler->mBuffer.i16[resampler->mBuffer.frameCount*2-2];
+        resampler->mX0R = resampler->mBuffer.i16[resampler->mBuffer.frameCount*2-1];
+    }
+
+resampleStereo16_exit:
+    // save state
+    resampler->mInputIndex = inputIndex;
+    resampler->mPhaseFraction = phaseFraction;
+}
+
+
+void resampleMono16(int32_t* out, int16_t* input,long outFrameCount, LVAudioResampler *resampler/*,
+        AudioBufferProvider* provider*/)
+{
+
+    int32_t vl = resampler->mVolume[0];
+    int32_t vr = resampler->mVolume[1];
+    int16_t *in;
+
+    long inputIndex = resampler->mInputIndex;
+    uint32_t phaseFraction = resampler->mPhaseFraction;
+    uint32_t phaseIncrement = resampler->mPhaseIncrement;
+    long outputIndex = 0;
+    long outputSampleCount = outFrameCount * 2;
+    long inFrameCount = (outFrameCount*resampler->mInSampleRate)/resampler->mSampleRate;
+
+    resampler->mBuffer.i16 = input;
+    resampler->mBuffer.i8 = (int8_t *)input;
+    resampler->mBuffer.raw = (void *)input;
+
+    // LOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d\n",
+    //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
+    while (outputIndex < outputSampleCount) {
+        // buffer is empty, fetch a new one
+        while (resampler->mBuffer.frameCount == 0) {
+            resampler->mBuffer.frameCount = inFrameCount;
+            //provider->getNextBuffer(&mBuffer);
+
+            if (resampler->mBuffer.raw == M4OSA_NULL) {
+                resampler->mInputIndex = inputIndex;
+                resampler->mPhaseFraction = phaseFraction;
+                goto resampleMono16_exit;
+            }
+            resampler->mX0L = 0;
+            // LOGE("New buffer fetched: %d frames\n", mBuffer.frameCount);
+            if (resampler->mBuffer.frameCount >  inputIndex)
+                break;
+
+            inputIndex -= resampler->mBuffer.frameCount;
+            resampler->mX0L = resampler->mBuffer.i16[resampler->mBuffer.frameCount-1];
+            //provider->releaseBuffer(&resampler->mBuffer);
+            // mBuffer.frameCount == 0 now so we reload a new buffer
+        }
+
+        in = resampler->mBuffer.i16;
+
+        // handle boundary case
+        while (inputIndex == 0) {
+            // LOGE("boundary case\n");
+            int32_t sample = Interp(resampler->mX0L, in[0], phaseFraction);
+            out[outputIndex++] += vl * sample;
+            out[outputIndex++] += vr * sample;
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+            if (outputIndex == outputSampleCount)
+                break;
+        }
+
+        // process input samples
+        while (outputIndex < outputSampleCount && inputIndex < resampler->mBuffer.frameCount) {
+            int32_t sample = Interp(in[inputIndex-1], in[inputIndex],
+                    phaseFraction);
+            out[outputIndex++] += vl * sample;
+            out[outputIndex++] += vr * sample;
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+        }
+
+        // LOGE("loop done - outputIndex=%d, inputIndex=%d\n", outputIndex, inputIndex);
+        // if done with buffer, save samples
+        if (inputIndex >= resampler->mBuffer.frameCount) {
+            inputIndex -= resampler->mBuffer.frameCount;
+
+            // LOGE("buffer done, new input index %d", inputIndex);
+            resampler->mX0L = resampler->mBuffer.i16[resampler->mBuffer.frameCount-1];
+        }
+    }
+
+resampleMono16_exit:
+    // save state
+    resampler->mInputIndex = inputIndex;
+    resampler->mPhaseFraction = phaseFraction;
+}
+