Removed unwanted line in M4READER_Amr.h

vss core files upload on honeycomb

Change-Id: I61206ae2398ce8ac544c6fb01a76fe8917bce75b
diff --git a/libvideoeditor/Android.mk b/libvideoeditor/Android.mk
new file mode 100755
index 0000000..7ae3545
--- /dev/null
+++ b/libvideoeditor/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)

diff --git a/libvideoeditor/vss/3gpwriter/Android.mk b/libvideoeditor/vss/3gpwriter/Android.mk
new file mode 100755
index 0000000..5053e7d
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h
new file mode 100755
index 0000000..9eb3166
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Types.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4MP4W_Types.h
+ * @brief   Definition of types for the core MP4 writer
+ ******************************************************************************
+ */
+
+#ifndef M4MP4W_TYPES_H
+#define M4MP4W_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+/* includes */
+#include "M4OSA_Types.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_Stream.h"
+
+/**
+ ******************************************************************************
+ * structure    M4MP4C_FtypBox
+ * @brief       Information to build the 'ftyp' atom
+ ******************************************************************************
+ */
+#define M4MPAC_FTYP_TAG 0x66747970 /* 'ftyp' */
+#define M4MPAC_MAX_COMPATIBLE_BRANDS 10
+typedef struct
+{
+    /* All brand fields are actually char[4] stored in big-endian integer format */
+
+    M4OSA_UInt32    major_brand;         /* generally '3gp4'            */
+    M4OSA_UInt32    minor_version;       /* generally '0000' or 'x.x '  */
+    M4OSA_UInt32    nbCompatibleBrands;  /* number of compatible brands */
+    M4OSA_UInt32    compatible_brands[M4MPAC_MAX_COMPATIBLE_BRANDS];   /* array of max compatible
+                                                                       brands */
+} M4MP4C_FtypBox;
+
+
+/**
+ ******************************************************************************
+ * structure    M4MP4W_memAddr
+ * @brief        Buffer structure for the MP4 writer
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4OSA_UInt32        size;
+    M4OSA_MemAddr32    addr;
+} M4MP4W_memAddr;
+
+/**
+ ******************************************************************************
+ * Time type for the core MP4 writer
+ ******************************************************************************
+ */
+typedef M4OSA_UInt32 M4MP4W_Time32;
+
+/**
+ ******************************************************************************
+ * enumeration   M4MP4W_State
+ * @brief        This enum defines the core MP4 writer states
+ * @note         These states are used internaly, but can be retrieved from outside
+ *               the writer.
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MP4W_opened            = 0x100,
+    M4MP4W_ready             = 0x200,
+    M4MP4W_writing           = 0x300,
+    M4MP4W_writing_startAU   = 0x301,
+    M4MP4W_closed            = 0x400
+} M4MP4W_State;
+
+/**
+ ******************************************************************************
+ * enumeration    M4MP4W_OptionID
+ * @brief        This enum defines the core MP4 writer options
+ * @note        These options give parameters for the core MP4 writer
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MP4W_maxAUperChunk        = 0xC101,
+    M4MP4W_maxChunkSize         = 0xC102,
+    M4MP4W_maxChunkInter        = 0xC103,
+    M4MP4W_preWriteCallBack     = 0xC104,
+    M4MP4W_postWriteCallBack    = 0xC105,
+    M4MP4W_maxAUsize            = 0xC106,
+    M4MP4W_IOD                  = 0xC111,
+    M4MP4W_ESD                  = 0xC112,
+    M4MP4W_SDP                  = 0xC113,
+    M4MP4W_trackSize            = 0xC114,
+    M4MP4W_MOOVfirst            = 0xC121,
+    M4MP4W_V2_MOOF              = 0xC131,
+    M4MP4W_V2_tblCompres        = 0xC132,
+    /*warning: unspecified options:*/
+    M4MP4W_maxFileSize          = 0xC152,
+    M4MP4W_CamcoderVersion      = 0xC153, /*000 to 999 !*/
+    M4MP4W_estimateAudioSize    = 0xC154, /*audio AUs are processed after the video, */
+    /*this option MUST NOT be set if non constant audio
+    frame size (e.g. if SID)*/
+    M4MP4W_embeddedString       = 0xC155,
+    M4MP4W_integrationTag       = 0xC156,
+    M4MP4W_maxFileDuration      = 0xC157,
+    M4MP4W_setFtypBox           = 0xC158,
+    M4MP4W_DSI                  = 0xC159,
+    /* H.264 trimming */
+    M4MP4W_MUL_PPS_SPS          = 0xC160,
+    /* H.264 trimming */
+} M4MP4W_OptionID;
+
+/**
+ ******************************************************************************
+ * Audio & video stream IDs
+ ******************************************************************************
+ */
+#define AudioStreamID 1
+#define VideoStreamID 2
+
+/**
+ ******************************************************************************
+ * Default parameters values, that can be modified by M4MP4W_setOption
+ ******************************************************************************
+ */
+#define M4MP4W_DefaultWidth 320
+#define M4MP4W_DefaultHeight 240
+#define M4MP4W_DefaultMaxAuSize  4096 /*bytes*/
+#define M4MP4W_DefaultMaxChunkSize 100000 /*bytes*/
+#define M4MP4W_DefaultInterleaveDur 0 /*bytes*/
+
+
+/**
+ ******************************************************************************
+ * structure    M4MP4W_StreamIDsize
+ * @brief        Video plane size
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4SYS_StreamID streamID;
+    M4OSA_UInt16    height;
+    M4OSA_UInt16    width;
+} M4MP4W_StreamIDsize;
+
+/**
+ ******************************************************************************
+ * structure    M4MP4W_TrackData
+ * @brief       Internal core MP4 writer track structure
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4SYS_StreamType    trackType;
+    M4OSA_UInt32        timescale;          /* T (video=1000), (AMR8=8000), (AMR16=16000)*/
+    M4OSA_UInt32        sampleSize;         /* S (video=0)*/
+    M4OSA_UInt32        sttsTableEntryNb;   /* J (audio=1)*/
+    M4MP4W_Time32        lastCTS;           /* CTS of the previous AU,
+                                               init to 0.Gives duration at the end.*/
+    M4OSA_UInt32        sampleNb;           /* K (audio=F)*/
+} M4MP4W_TrackData;
+
+/**
+ ******************************************************************************
+ * structure    M4MP4W_AudioTrackData
+ * @brief       Internal core MP4 writer audio specific structure
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4MP4W_State            microState;
+    M4MP4W_TrackData        CommonData;
+    M4OSA_UChar**           Chunk;
+    M4OSA_UInt32*           chunkSizeTable;
+#ifndef _M4MP4W_MOOV_FIRST
+    M4OSA_UInt32*           chunkOffsetTable;
+#endif /*_M4MP4W_MOOV_FIRST*/
+    M4OSA_UInt32*           chunkSampleNbTable;
+    M4OSA_UInt32*           chunkTimeMsTable;
+    M4OSA_UInt32            currentChunk;       /* Init to 0*/
+    M4OSA_UInt32            currentPos;         /* Init to 0 */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+    M4OSA_UInt32            currentStsc;        /* Init to 0 */
+#endif
+    M4MP4W_Time32           sampleDuration;     /* Check (AMR8=160), (AMR16=320)*/
+    M4OSA_UInt32            MaxChunkSize;       /* Init to M4MP4W_Mp4FileData.MaxChunkSize*/
+    M4OSA_UInt32            MaxAUSize;          /* Init to M4MP4W_Mp4FileData.MaxAUSize*/
+    M4OSA_UInt32            LastAllocatedChunk;
+    /* previously, audio au size was supposed constant,
+     * which is actually not the case if silences (sid).*/
+    /* at first audio au, sampleSize is set. It is later reset to 0 if non constant size.*/
+    /* So sampleSize should be tested to know weither or not there is a TABLE_STSZ. */
+    M4OSA_UInt32*           TABLE_STSZ; /* table size is 4K*/
+    M4OSA_UInt32            nbOfAllocatedStszBlocks;
+    M4OSA_UInt32*           TABLE_STTS;
+    M4OSA_UInt32            nbOfAllocatedSttsBlocks;
+    M4OSA_UInt32            maxBitrate;     /*not used in amr case*/
+    M4OSA_UInt32            avgBitrate;     /*not used in amr case*/
+    M4OSA_UChar*            DSI;            /* Decoder Specific Info: May be M4OSA_NULL
+                                            (defaulted) for AMR */
+    M4OSA_UInt8             dsiSize;        /* DSI size, always 9 bytes for AMR */
+} M4MP4W_AudioTrackData;
+
+
+/**
+ ******************************************************************************
+ * structure    M4MP4W_VideoTrackData
+ * @brief        Internal core MP4 writer video specific structure
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4MP4W_State            microState;
+    M4MP4W_TrackData        CommonData;
+    M4OSA_UChar**           Chunk;
+    M4OSA_UInt32*           chunkSizeTable;
+#ifndef _M4MP4W_MOOV_FIRST
+    M4OSA_UInt32*           chunkOffsetTable;
+#endif /*_M4MP4W_MOOV_FIRST*/
+    M4OSA_UInt32*           chunkSampleNbTable;
+    M4MP4W_Time32*          chunkTimeMsTable;
+    M4OSA_UInt32            currentChunk;            /* Init to 0*/
+    M4OSA_UInt32            currentPos ;             /* Init to 0*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+    M4OSA_UInt32            currentStsc;             /* Init to 0*/
+#endif
+    M4OSA_UInt32            stssTableEntryNb ;       /* N*/
+    M4OSA_UInt16            width;                   /* X*/
+    M4OSA_UInt16            height;                  /* Y*/
+    M4OSA_UInt32*           TABLE_STTS;              /* table size is J*/
+    M4OSA_UInt32            nbOfAllocatedSttsBlocks;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+    M4OSA_UInt16*           TABLE_STSZ;              /* table size is 2K*/
+#else
+    M4OSA_UInt32*           TABLE_STSZ;              /* table size is 4K*/
+#endif
+    M4OSA_UInt32            nbOfAllocatedStszBlocks;
+    M4OSA_UInt32*           TABLE_STSS;              /* table size is N*/
+    M4OSA_UInt32            nbOfAllocatedStssBlocks;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+    M4OSA_UInt32            MaxAUperChunk;           /*Init to 0, i.e. not used*/
+#endif
+    M4OSA_UInt32            MaxChunkSize;            /*Init to M4MP4W_Mp4FileData.MaxChunkSize*/
+    M4OSA_UInt32            MaxAUSize;               /*Init to M4MP4W_Mp4FileData.MaxAUSize*/
+    M4OSA_UInt32            LastAllocatedChunk;
+    M4OSA_UInt32            maxBitrate;
+    M4OSA_UInt32            avgBitrate;
+    M4OSA_UChar*            DSI;            /* Decoder Specific Info: May be M4OSA_NULL
+                                            (defaulted) for H263*/
+    M4OSA_UInt8             dsiSize;        /* DSI size, always 7 bytes for H263 */
+} M4MP4W_VideoTrackData;
+
+/**
+ ******************************************************************************
+ * structure    M4MP4W_Mp4FileData
+ * @brief       Internal core MP4 writer private context structure
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4MP4W_State                  state;
+    M4OSA_Char*                   url;
+    M4OSA_UInt32                  duration;    /* D in ms, max duration of audio&video*/
+    M4OSA_UInt32                  filesize;    /* actual filesize in bytes*/
+    M4MP4W_AudioTrackData*        audioTrackPtr;
+    M4OSA_Bool                    hasAudio;
+    M4MP4W_VideoTrackData*        videoTrackPtr;
+    M4OSA_Bool                    hasVideo;
+    M4OSA_UInt32                  MaxChunkSize;       /* Init to 100000*/
+    M4OSA_UInt32                  MaxAUSize;          /* Init to 4096*/
+    M4OSA_UInt32                  MaxFileSize;        /* Init to 0, i.e. not used*/
+    M4MP4W_Time32                 InterleaveDur;      /* Init to 0, i.e. not used, ms*/
+    /* M4MP4W_WriteCallBack            PreWriteCallBack;*/    /*Init to M4OSA_NULL*/
+    /* M4MP4W_WriteCallBack            PostWriteCallBack;*/ /*Init to M4OSA_NULL*/
+    M4OSA_FileWriterPointer*      fileWriterFunctions;
+    M4OSA_FileReadPointer*        fileReaderFunctions;
+    M4OSA_UInt32                  camcoderVersion;
+    M4OSA_Bool                    estimateAudioSize;  /* default is false*/
+    M4OSA_UInt32                  audioMsChunkDur;    /* in ms, set only if estimateAudioSize
+                                                         is true*/
+    M4OSA_UInt32                  audioMsStopTime;    /* time to stop audio, set only if
+                                                         estimateAudioSize is true*/
+    M4OSA_Context                 fileWriterContext;
+#ifndef _M4MP4W_MOOV_FIRST
+    M4OSA_UInt32                  absoluteCurrentPos; /* new field for offset update*/
+#endif /*_M4MP4W_MOOV_FIRST*/
+    M4OSA_UChar*                  embeddedString;     /* 16 bytes string, default value
+                                                         writen if NULL*/
+    M4OSA_UChar*                  integrationTag;     /* 60 bytes string, memset to 0 if NULL */
+    M4OSA_UInt32                  MaxFileDuration;    /* Init to 0, i.e. not used*/
+    M4MP4C_FtypBox                ftyp;               /* ftyp atom, if not defined set major_brand
+                                                            = 0, will use default box */
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+    M4OSA_Char*                    safetyFileUrl;
+    M4OSA_Bool                        cleanSafetyFile;
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+    M4OSA_Bool                               bMULPPSSPS;
+} M4MP4W_Mp4FileData;
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*M4MP4W_TYPES_H*/
+
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h
new file mode 100755
index 0000000..c7d55f2
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Utils.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4MP4W_Utils.h
+ * @brief   Utilities and private functions declaration for the MP4 writer
+ ******************************************************************************
+ */
+
+#ifndef M4MP4W_UTILS_H
+#define M4MP4W_UTILS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+/* includes */
+#include "M4OSA_Types.h"
+#include "M4OSA_FileWriter.h"
+
+
+/**
+ ******************************************************************************
+ * Utility functions to write data in big endian
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_putByte(M4OSA_UChar c,    M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context);
+M4OSA_ERR M4MP4W_putBE16(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context);
+M4OSA_ERR M4MP4W_putBE24(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context);
+M4OSA_ERR M4MP4W_putBE32(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * Write a bulk of data into the specified file, size is given in bytes
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_putBlock(const M4OSA_UChar* Block, M4OSA_UInt32 size,
+                          M4OSA_FileWriterPointer* fileFunction, M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * Convert the 'nb' unsigned integers in 'tab' table from LE into BE
+ ******************************************************************************
+ */
+void M4MP4W_table32ToBE(M4OSA_UInt32* tab, M4OSA_UInt32 nb);
+
+/**
+ ******************************************************************************
+ * Convert an unsigned 32 bits integer from LE into BE
+ ******************************************************************************
+ */
+void M4MP4W_convertInt32BE(M4OSA_UInt32* valPtr);
+
+/**
+ ******************************************************************************
+ * Re-allocation function
+ ******************************************************************************
+ */
+void* M4MP4W_realloc(M4OSA_MemAddr32 ptr, M4OSA_UInt32 oldSize, M4OSA_UInt32 newSize);
+
+/**
+ ******************************************************************************
+ * De-allocate the context
+ * This method is no longer in the writer external interface, but is called from
+ * the function M4MP4W_closeWrite
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_freeContext(M4OSA_Context context);
+
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+/**
+ ******************************************************************************
+ * Put Hi and Lo u16 part in a u32 variable
+ ******************************************************************************
+ */
+M4OSA_Void M4MP4W_put32_Hi(M4OSA_UInt32* tab, M4OSA_UInt16 Hi);
+M4OSA_Void M4MP4W_put32_Lo(M4OSA_UInt32* tab, M4OSA_UInt16 Lo);
+M4OSA_UInt16 M4MP4W_get32_Hi(M4OSA_UInt32* tab);
+M4OSA_UInt16 M4MP4W_get32_Lo(M4OSA_UInt32* tab);
+#endif
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*M4MP4W_UTILS_H*/
+
diff --git a/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h
new file mode 100755
index 0000000..715a7c6
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/inc/M4MP4W_Writer.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4MP4W_Writer.h
+ * @brief   Core MP4 writer interface
+ * @note    This file declares the MP4 writer interface functions.
+ *          The MP4 writer specific types are defined in file M4MP4W_Types.h
+ ******************************************************************************
+ */
+#ifndef M4MP4W_WRITER_H
+#define M4MP4W_WRITER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+/* includes */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_AccessUnit.h"
+#include "M4MP4W_Types.h"
+
+/**
+ ******************************************************************************
+ * MP4W Errors & Warnings definition
+ ******************************************************************************
+ */
+#define M4WAR_MP4W_OVERSIZE         M4OSA_ERR_CREATE(M4_WAR, M4MP4_WRITER ,0x000001)
+#define M4WAR_MP4W_NOT_EVALUABLE    M4OSA_ERR_CREATE(M4_WAR, M4MP4_WRITER ,0x000002)
+
+/**
+ ******************************************************************************
+ * @brief    Get MP4W version
+ * @param    major            (OUT) Pointer to the 'major' version number.
+ * @param    minor            (OUT) Pointer to the 'minor' version number.
+ * @param    revision         (OUT) Pointer to the 'revision' number.
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    At least one parameter is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getVersion(M4OSA_UInt8* major,
+                            M4OSA_UInt8* minor,
+                            M4OSA_UInt8* revision);
+
+/**
+ ******************************************************************************
+ * @brief    Initiation of the MP4 file creation
+ * @param    contextPtr             (OUT) Pointer to the MP4 writer context to create.
+ * @param    outputFileDescriptor   (IN)  Descriptor of the output file to open.
+ * @param    fileWriterFunction     (IN)  Pointer to structure containing the set of
+ *                                          OSAL file write functions.
+ * @param    tempFileDescriptor     (IN)  Descriptor of the temporary file to open.
+ * @param    fileReaderFunction     (IN)  Pointer to structure containing the set of
+ *                                          OSAL file read functions.
+ * @return    M4NO_ERROR:         No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
+ * @return    M4ERR_ALLOC:        Memory allocation failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_openWrite( M4OSA_Context*                  contextPtr,
+                            void*                           outputFileDescriptor,
+                            M4OSA_FileWriterPointer*        fileWriterFunction,
+                            void*                           tempFileDescriptor,
+                            M4OSA_FileReadPointer*          fileReaderFunction );
+
+/**
+ ******************************************************************************
+ * @brief    Add a new track
+ * @param    context              (IN/OUT)  MP4 writer context.
+ * @param    streamDescPtr        (IN)      Pointer to the structure containing the
+                                            parameters for the new track.
+ * @return    M4NO_ERROR:         No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
+ * @return    M4ERR_ALLOC:        Memory allocation failed
+ * @return    M4ERR_STATE:        Invalid state
+ * @return    M4ERR_BAD_CONTEXT:  An audio (resp.video) stream has already been added
+ *                                to this context while attempting to add another one,
+ *                                which is forbidden.
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_addStream( M4OSA_Context                context,
+                            M4SYS_StreamDescription*     streamDescPtr);
+
+/**
+ ******************************************************************************
+ * @brief   Signal to the core MP4 writer that there is no more tracks to add
+ * @param   context             (IN/OUT) MP4 writer context.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is null or incorrect
+ * @return  M4ERR_ALLOC:        Memory allocation failed
+ * @return  M4ERR_STATE:        Invalid state
+ * @return  M4ERR_BAD_CONTEXT:  Audio size estimation is required but not two streams
+ *                              have been added.
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_startWriting( M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * @brief   Asks the core MP4 writer to initiate the access unit creation in
+ *          the streamID track
+ * @param   context             (IN/OUT) MP4 writer context.
+ * @param   streamID            (IN) Stream ID of the track.
+ * @param   auPtr               (IN/OUT) Access unit.
+ * @return    M4NO_ERROR:         No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
+ * @return    M4ERR_BAD_STREAM_ID:Unknown stream ID
+ * @return    M4ERR_ALLOC:        Memory allocation failed
+ * @return    M4ERR_STATE:        Invalid state
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_startAU( M4OSA_Context        context,
+                          M4SYS_StreamID       streamID,
+                          M4SYS_AccessUnit*    auPtr);
+
+/**
+ ******************************************************************************
+ * @brief   Ask the core MP4 writer to write the access unit in the streamID track
+ * @note    If M4MP4W_WAR_OVERSIZE is returned, M4MP4W_startAU must not be called anymore,
+ *          but directly M4MP4W_closeWrite().
+ * @param   context             (IN/OUT)   MP4 writer context.
+ * @param   streamID            (IN)       Stream ID of the track.
+ * @param   auPtr               (IN/OUT)   Access unit.
+ * @return    M4NO_ERROR:                 No error
+ * @return    M4ERR_PARAMETER:            At least one parameter is null or incorrect
+ * @return    M4ERR_BAD_STREAM_ID:        Unknown stream ID
+ * @return    M4ERR_ALLOC:                Memory allocation failed
+ * @return    M4ERR_STATE:                Invalid state
+ * @return    M4WAR_MP4W_NOT_EVALUABLE:   It is not possible to evaluate audio size if audio
+ *                                        samples don't have a constant size.
+ * @return    M4WAR_MP4W_OVERSIZE:        Max file size was reached
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_processAU( M4OSA_Context        context,
+                            M4SYS_StreamID       streamID,
+                            M4SYS_AccessUnit*    auPtr);
+
+/**
+ ******************************************************************************
+ * @brief     Close the MP4 file
+ * @note      In previous versions of the MP4 writer, the M4MP4W_freeContext method
+ *            was in the interface, which is not the case anymore.
+ *            The context is now always deallocated in the M4MP4W_closeWrite function.
+ * @param     context             (IN/OUT) MP4 writer context.
+ * @return    M4NO_ERROR:         No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is null or incorrect
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_closeWrite( M4OSA_Context context);
+
+/**
+ ******************************************************************************
+ * @brief    Ask the core MP4 writer to return the value associated with the optionID
+ * @param    context                (IN)    MP4 writer context.
+ * @param    option                 (IN)    Option ID.
+ * @param    valuePtr               (OUT)   Pointer to the option value.
+ * @return    M4NO_ERROR:             No error
+ * @return    M4ERR_PARAMETER:        At least one parameter is null or incorrect
+ * @return    M4ERR_NOT_IMPLEMENTED:  Not implemented in the current version
+ * @return    M4ERR_BAD_OPTION_ID:    Unknown optionID
+ * @return    M4ERR_BAD_STREAM_ID:    Bad stream ID in the option value
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getOption( M4OSA_Context        context,
+                            M4OSA_OptionID        option,
+                            M4OSA_DataOption    *valuePtr);
+
+/**
+ ******************************************************************************
+ * @brief    Ask the core MP4 writer to set the value associated with the optionID.
+ * @param    context              (IN/OUT)  MP4 writer context.
+ * @param    option               (IN)      Option ID.
+ * @param    value                (IN)      Option value.
+ * @return    M4NO_ERROR:             No error
+ * @return    M4ERR_PARAMETER:        At least one parameter is null or incorrect
+ * @return    M4ERR_NOT_IMPLEMENTED:  Not implemented in the current version
+ * @return    M4ERR_BAD_OPTION_ID:    Unknown optionID
+ * @return    M4ERR_BAD_STREAM_ID:    Bad stream ID in the option value
+ * @return    M4ERR_ALLOC:            A memory allocation failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_setOption( M4OSA_Context       context,
+                            M4OSA_OptionID      option,
+                            M4OSA_DataOption    value);
+
+/**
+ ******************************************************************************
+ * @brief    Ask the core MP4 writer to return its state.
+ * @note     By selecting a specific streamID (not null), the caller can obtain
+ *           the state of a specific stream. By using 0 as streamID the returned
+ *           state is not stream specific.
+ * @param    context                (IN/OUT) MP4 writer context.
+ * @param    context                (IN)     Pointer to the state enumeration.
+ * @param    context                (IN/OUT) streamID of the stream to retrieve the
+ *                                           micro-state (0 for global state).
+ * @return    M4NO_ERROR:             No error
+ * @return    M4ERR_BAD_STREAM_ID:    Unknown stream ID
+ * @return    M4ERR_PARAMETER:        At least one parameter is null or incorrect
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getState( M4OSA_Context    context,
+                           M4MP4W_State*    statePtr,
+                           M4SYS_StreamID   streamID);
+
+/**
+ ******************************************************************************
+ * @brief    Get the currently expected file size
+ * @param    context             (IN/OUT) MP4 writer context.
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    At least one parameter is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4MP4W_getCurrentFileSize( M4OSA_Context        context,
+                                     M4OSA_UInt32*        currentFileSize);
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+#endif /*M4MP4W_WRITER_H*/
+
diff --git a/libvideoeditor/vss/3gpwriter/src/Android.mk b/libvideoeditor/vss/3gpwriter/src/Android.mk
new file mode 100755
index 0000000..ff7e005
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/Android.mk
@@ -0,0 +1,62 @@
+#
+# Copyright (C) 2011 NXP Software
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# lib3gpwriter
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_3gpwriter
+
+LOCAL_SRC_FILES:=          \
+      M4MP4W_Interface.c \
+      M4MP4W_Utils.c \
+      M4MP4W_Writer.c
+
+LOCAL_MODULE_TAGS := development
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+    libvideoeditor_osal
+
+LOCAL_C_INCLUDES += \
+    $(TOP)/frameworks/media/libvideoeditor/osal/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/3gpwriter/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/common/inc
+
+ifeq ($(TARGET_SIMULATOR),true)
+else
+    LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+    -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+    -DDUPLICATE_STTS_IN_LAST_AU
+
+# Don't prelink this library.  For more efficient code, you may want
+# to add this library to the prelink map and set this to true.
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c
new file mode 100755
index 0000000..c719fe6
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Interface.c
@@ -0,0 +1,916 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4MP4W_Interface.c
+ * @brief    3GPP file writer interface
+ * @note    This implementation follows the common interface defined
+ *          in "M4WRITER_common.h".
+ ******************************************************************************
+*/
+
+#include "NXPSW_CompilerSwitches.h"
+
+/**
+ * OSAL includes */
+#include "M4OSA_Types.h"            /**< OSAL basic types definiton */
+#include "M4OSA_FileWriter.h"        /**< Include for OSAL file accesses implementation */
+#include "M4OSA_Memory.h"            /**< Include for OSAL memory accesses implementation */
+#include "M4OSA_Debug.h"            /**< OSAL debug tools */
+#include "M4OSA_CharStar.h"            /**< For M4OSA_chrLength() */
+
+/**
+ * Writer includes */
+#include "M4WRITER_common.h"        /**< Definition of the writer common interface that
+                                          this module follows */
+
+#ifdef _M4MP4W_USE_CST_MEMORY_WRITER
+#include "M4MP4W_Types_CstMem.h"    /**< MP4/3GP core writer types */
+#include "M4MP4W_Writer_CstMem.h"    /**< MP4/3GP core writer functions */
+#else
+#include "M4MP4W_Types.h"            /**< MP4/3GP core writer types */
+#include "M4MP4W_Writer.h"            /**< MP4/3GP core writer functions */
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
+/**
+ * Specific errors for this module */
+#define M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE \
+                M4OSA_ERR_CREATE(M4_ERR, M4WRITER_3GP, 0x000001)
+
+
+/**
+ ******************************************************************************
+ * structure    M4WRITER_3GP_InternalContext
+ * @brief        This structure defines the writer context (private)
+ * @note        This structure is used for all writer calls to store the context
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Context    pMP4Context;    /**< MP4 writer context */
+    M4OSA_UInt32    maxAUsizes;        /**< the maximum AU size possible */
+} M4WRITER_3GP_InternalContext;
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_openWrite(M4WRITER_Context* pContext, void* pWhat,
+ *                                   M4OSA_FileWriterPointer* pFileWriterPointer)
+ * @brief    Open a writer session.
+ * @note
+ * @param    pContext:     (OUT) Execution context of the 3GP writer, allocated by this function.
+ * @param    outputFileDescriptor (IN)  Descriptor of the output file to create.
+ * @param    fileWriterFunction     (IN)  Pointer to structure containing the set of OSAL
+ *                                       file write functions.
+ * @param    tempFileDescriptor     (IN)  Descriptor of the temporary file to open
+ *                                        (NULL if not used)
+ * @param    fileReaderFunction     (IN)  Pointer to structure containing the set of OSAL file read
+ *                                      functions (NULL if not used)
+ * @return    M4NO_ERROR:  there is no error
+ * @return    M4ERR_ALLOC: there is no more available memory
+ * @return    M4ERR_PARAMETER: pContext or pFilePtrFct is M4OSA_NULL (debug only)
+ * @return    any error returned by the MP4 core writer openWrite (Its coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_openWrite( M4WRITER_Context* pContext,
+                                  void* outputFileDescriptor,
+                                  M4OSA_FileWriterPointer* pFileWriterPointer,
+                                  void* tempFileDescriptor,
+                                  M4OSA_FileReadPointer* pFileReaderPointer )
+{
+    M4WRITER_3GP_InternalContext* apContext;
+    M4OSA_ERR err;
+
+    M4OSA_TRACE1_0("M4WRITER_3GP_openWrite");
+
+    /**
+     *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext),M4ERR_PARAMETER,
+         "M4WRITER_3GP_openWrite: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWriterPointer),M4ERR_PARAMETER,
+         "M4WRITER_3GP_openWrite: pFileWriterPointer is M4OSA_NULL");
+
+    /**
+     *    Allocate memory for the context */
+    *pContext=M4OSA_NULL;
+    apContext = (M4WRITER_3GP_InternalContext*)M4OSA_malloc(
+                    sizeof(M4WRITER_3GP_InternalContext),
+                    M4WRITER_3GP,
+                    (M4OSA_Char *)"M4WRITER_3GP_InternalContext");
+
+    if (M4OSA_NULL == apContext)
+    {
+        M4OSA_TRACE1_0("M4WRITER_3GP_openWrite:\
+             unable to allocate context, returning M4ERR_ALLOC");
+        return (M4OSA_ERR)M4ERR_ALLOC;
+    }
+
+    /**
+     *    Reset context variables */
+    apContext->pMP4Context = M4OSA_NULL;
+    apContext->maxAUsizes = 0;
+
+    /**
+     *    Return the writer context */
+    *pContext = (M4WRITER_Context *)apContext;
+
+    /**
+     *    Launch the openWrite of the MP4 writer */
+    M4OSA_TRACE3_0("M4WRITER_3GP_openWrite: calling M4MP4W_openWrite()");
+
+    err = M4MP4W_openWrite(&apContext->pMP4Context, outputFileDescriptor,
+            pFileWriterPointer, tempFileDescriptor, pFileReaderPointer );
+
+    if (M4OSA_ERR_IS_ERROR(err))
+    {
+        M4OSA_TRACE1_1("M4WRITER_3GP_openWrite: "
+                       "M4MP4W_openWrite returns error 0x%x", err);
+    }
+
+    M4OSA_TRACE2_1("M4WRITER_3GP_openWrite: returning 0x%x", err);
+
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_startWriting(M4WRITER_Context pContext)
+ * @brief    Indicates to the writer that the setup session is ended and that
+ *          we will start to write.
+ * @note
+ * @param     pContext:   (IN) Execution context of the 3GP writer,
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return    any error returned by the MP4 core writer startWriting (Its
+ *            coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_startWriting(M4WRITER_Context pContext)
+{
+    M4WRITER_3GP_InternalContext* apContext =
+                (M4WRITER_3GP_InternalContext*)pContext;
+
+    M4OSA_ERR err;
+
+    M4OSA_TRACE1_1("M4WRITER_3GP_startWriting: pContext=0x%x", pContext);
+
+    /**
+     *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+         "M4WRITER_3GP_startWriting: pContext is M4OSA_NULL");
+
+    /**
+     *    Call the MP4 core writer */
+    M4OSA_TRACE3_0("M4WRITER_3GP_startWriting: calling M4MP4W_startWriting()");
+    err = M4MP4W_startWriting(apContext->pMP4Context);
+    if (M4OSA_ERR_IS_ERROR(err))
+    {
+        M4OSA_TRACE1_1("M4MP4W_startWriting returns error 0x%x", err);
+    }
+
+    M4OSA_TRACE2_1("M4WRITER_3GP_startWriting: returning 0x%x", err);
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_addStream(
+ *     M4WRITER_Context pContext,
+ *     M4SYS_StreamDescription *pStreamDescription)
+ * @brief     Add a stream (audio or video).
+ * @note      Decoder specific info properties are correctly set before calling
+ *            the core writer add function
+ * @param     pContext:   (IN) Execution context of the 3GP writer,
+ * @param     streamDescription:    (IN) stream description.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pStreamDescription is M4OSA_NULL
+ *            (debug only)
+ * @return    any error returned by the MP4 core writer addStream
+ *            (Its coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_addStream(M4WRITER_Context pContext,
+                                 M4SYS_StreamDescription* pStreamDescription)
+{
+    M4WRITER_3GP_InternalContext *apContext =
+        (M4WRITER_3GP_InternalContext *)pContext;
+
+    M4OSA_ERR err;
+    M4WRITER_StreamVideoInfos *pVideoInfo = M4OSA_NULL;
+    M4WRITER_StreamAudioInfos *pAudioInfo = M4OSA_NULL;
+    M4MP4W_StreamIDsize sizeValue;
+
+    M4OSA_TRACE1_2("M4WRITER_3GP_addStream: pContext=0x%x, "
+                   "pStreamDescription=0x%x",
+                   pContext, pStreamDescription);
+
+    /**
+     *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+         "M4WRITER_3GP_addStream: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamDescription),M4ERR_PARAMETER,
+         "M4WRITER_3GP_addStream: pStreamDescription is M4OSA_NULL");
+
+    /**
+     *    Adapt audio/video stream infos */
+    switch (pStreamDescription->streamType)
+    {
+        case M4SYS_kMPEG_4:
+        case M4SYS_kH264:
+        case M4SYS_kH263:
+            M4OSA_TRACE3_1("M4WRITER_3GP_addStream: "
+                    "adding a Video stream (streamType=0x%x)",
+                    pStreamDescription->streamType);
+            /**
+             *    Common descriptions */
+            pStreamDescription->streamID = VideoStreamID;    /**< The only values checked by our
+                                                                  core writer are streamID */
+            pStreamDescription->timeScale = 1000;            /**< and timeScale */
+
+/* Not recommended for video editing -> write explicitely the 'bitr' box into 'd263' */
+/* Rem : it is REL 5 of 3gpp documentation */
+//            /**
+//             * Average bit-rate must not be set in H263 to be compatible with Platform4 */
+//            if (M4SYS_kH263 == pStreamDescription->streamType)
+//            {
+//                pStreamDescription->averageBitrate = -1;
+//            }
+
+            /**
+             *    Decoder specific info */
+            pVideoInfo = (M4WRITER_StreamVideoInfos *)pStreamDescription->decoderSpecificInfo;
+            pStreamDescription->decoderSpecificInfoSize = pVideoInfo->Header.Size;
+            pStreamDescription->decoderSpecificInfo = (M4OSA_MemAddr32)pVideoInfo->Header.pBuf;
+            M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Video: DSI=0x%x, DSIsize=%d",
+                 pVideoInfo->Header.pBuf, pVideoInfo->Header.Size);
+            break;
+
+        case M4SYS_kAMR:
+        case M4SYS_kAMR_WB:
+        case M4SYS_kAAC:
+        case M4SYS_kEVRC:
+            M4OSA_TRACE3_1("M4WRITER_3GP_addStream: adding an Audio stream (streamType=0x%x)",
+                 pStreamDescription->streamType);
+            /**
+             *    Common descriptions */
+            pStreamDescription->streamID = AudioStreamID;    /**< The only value checked by our
+                                                                 core writer is streamID */
+
+            /**
+             *    Decoder specific info */
+            pAudioInfo = (M4WRITER_StreamAudioInfos *)pStreamDescription->decoderSpecificInfo;
+            pStreamDescription->decoderSpecificInfoSize = pAudioInfo->Header.Size;
+            pStreamDescription->decoderSpecificInfo = (M4OSA_MemAddr32)pAudioInfo->Header.pBuf;
+            M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Audio: DSI=0x%x, DSIsize=%d",
+                 pAudioInfo->Header.pBuf, pAudioInfo->Header.Size);
+            break;
+
+        default:
+            M4OSA_TRACE1_1("M4WRITER_3GP_addStream:\
+                 returning M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE (streamType=0x%x)",
+                     pStreamDescription->streamType);
+            return (M4OSA_ERR)M4WRITER_3GP_ERR_UNSUPPORTED_STREAM_TYPE;
+            break;
+    }
+
+    /**
+     *    Call the MP4 core writer */
+    M4OSA_TRACE3_0("M4WRITER_3GP_addStream: calling M4MP4W_addStream()");
+    err = M4MP4W_addStream(apContext->pMP4Context,pStreamDescription);
+    if (M4OSA_ERR_IS_ERROR(err))
+    {
+        M4OSA_TRACE1_1("M4WRITER_3GP_addStream: M4MP4W_addStream returns error 0x%x", err);
+        M4OSA_TRACE1_1("M4WRITER_3GP_addStream: returning 0x%x", err);
+        return (err);
+    }
+
+    /**
+     *    For Video, set the M4MP4W_trackSize Option */
+    switch (pStreamDescription->streamType)
+    {
+        case M4SYS_kMPEG_4:
+        case M4SYS_kH264:
+        case M4SYS_kH263:
+            sizeValue.streamID = VideoStreamID;
+            sizeValue.height = (M4OSA_UInt16)(pVideoInfo->height);
+            sizeValue.width  = (M4OSA_UInt16)(pVideoInfo->width);
+            M4OSA_TRACE3_2("M4WRITER_3GP_addStream: Video: height=%d, width=%d",
+                 sizeValue.height, sizeValue.width);
+
+            M4OSA_TRACE3_0("M4WRITER_3GP_addStream: calling M4MP4W_setOption(M4MP4W_trackSize)");
+            err = M4MP4W_setOption( apContext->pMP4Context, M4MP4W_trackSize,
+                 (M4OSA_DataOption)&sizeValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4WRITER_3GP_addStream: M4MP4W_setOption returns error 0x%x",
+                     err);
+            }
+            break;
+        default:
+            break;
+    }
+
+    M4OSA_TRACE2_1("M4WRITER_3GP_addStream: returning 0x%x", err);
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_closeWrite(M4WRITER_Context pContext)
+ * @brief    Close the writer. The context is freed here.
+ * @note
+ * @param     pContext:   (IN) Execution context of the 3GP writer,
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return    any error returned by the MP4 core writer closeWrite (Its coreID
+ *            is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_closeWrite(M4WRITER_Context pContext)
+{
+    M4WRITER_3GP_InternalContext* apContext=(M4WRITER_3GP_InternalContext*)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE1_1("M4WRITER_3GP_closeWrite called with pContext=0x%x", pContext);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+         "M4WRITER_3GP_closeWrite: pContext is M4OSA_NULL");
+
+    /**
+     *    Call the MP4 core writer */
+    if (M4OSA_NULL != apContext->pMP4Context)
+    {
+        M4OSA_TRACE3_0("M4WRITER_3GP_closeWrite: calling M4MP4W_closeWrite()");
+        err = M4MP4W_closeWrite(apContext->pMP4Context);
+        if (M4OSA_ERR_IS_ERROR(err))
+        {
+            M4OSA_TRACE1_1("M4WRITER_3GP_closeWrite: M4MP4W_closeWrite returns error 0x%x", err);
+        }
+    }
+
+    /**
+     *    Deallocate our own context */
+    M4OSA_free((M4OSA_MemAddr32)apContext);
+
+    M4OSA_TRACE2_1("M4WRITER_3GP_closeWrite: returning 0x%x", err);
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_setOption(
+ *        M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+ *        M4OSA_DataOption optionValue)
+ * @brief     This function asks the writer to set the value associated with
+ *            the optionID. The caller is responsible for allocating/
+ *            de-allocating the memory of the value field.
+ * @note      The options handled by the component depend on the implementation
+ *            of the component.
+ * @param     pContext:     (IN) Execution context of the 3GP writer,
+ * @param     pptionId:     (IN) ID of the option to set.
+ * @param     OptionValue : (IN) Value of the option to set.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return    M4ERR_BAD_OPTION_ID: the ID of the option is not valid.
+ * @return    any error returned by the MP4 core writer setOption (Its coreID
+ *            is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_setOption(
+        M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+        M4OSA_DataOption optionValue)
+{
+    M4WRITER_3GP_InternalContext* apContext =
+            (M4WRITER_3GP_InternalContext*)pContext;
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4MP4W_memAddr memval;
+    M4SYS_StreamIDValue optval;
+
+    M4OSA_TRACE2_3("M4WRITER_3GP_setOption: pContext=0x%x, optionID=0x%x,\
+         optionValue=0x%x", pContext, optionID, optionValue);
+
+    /**
+     *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==apContext),M4ERR_PARAMETER,
+         "M4WRITER_3GP_setOption: pContext is M4OSA_NULL");
+
+    switch (optionID)
+    {
+        /**
+         *    Maximum Access Unit size */
+        case M4WRITER_kMaxAUSize:
+            M4OSA_TRACE2_0("setting M4WRITER_kMaxAUSize option");
+            err = M4MP4W_setOption(
+                    apContext->pMP4Context,M4MP4W_maxAUsize, optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxAUsize) "
+                               "returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    Maximum chunck size */
+        case M4WRITER_kMaxChunckSize:
+            M4OSA_TRACE2_0("setting M4WRITER_kMaxChunckSize option");
+            err = M4MP4W_setOption(
+                apContext->pMP4Context,M4MP4W_maxChunkSize, optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxChunkSize)\
+                     returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    File string signature */
+        case M4WRITER_kEmbeddedString:
+            M4OSA_TRACE2_0("setting M4WRITER_kEmbeddedString option");
+            /* The given M4OSA_DataOption must actually
+               be a text string */
+            memval.addr = (M4OSA_MemAddr32)optionValue;
+            /**< this is max string size copied by the core */
+            memval.size = 16;
+            err = M4MP4W_setOption(
+                apContext->pMP4Context,M4MP4W_embeddedString, &memval);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_embeddedString)\
+                     returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    File integration tag */
+        case M4WRITER_kIntegrationTag:
+            M4OSA_TRACE2_0("setting M4WRITER_kIntegrationTag option");
+            /* The given M4OSA_DataOption must actually
+               be a text string */
+            memval.addr = (M4OSA_MemAddr32)optionValue;
+            /**< this is max string size copied by the core */
+            memval.size = M4OSA_chrLength(optionValue);
+            err = M4MP4W_setOption(
+                apContext->pMP4Context,M4MP4W_integrationTag, &memval);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_integrationTag)"
+                               " returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    File version signature */
+        case M4WRITER_kEmbeddedVersion:
+            M4OSA_TRACE2_0("setting M4WRITER_kEmbeddedVersion option");
+            /* The given M4OSA_DataOption must actually
+               be a version number */
+
+            /**< Here 0 means both streams */
+            optval.streamID = 0;
+            /**< version number */
+            optval.value = *(M4OSA_UInt32*)optionValue;
+            err = M4MP4W_setOption(
+                apContext->pMP4Context,M4MP4W_CamcoderVersion, &optval);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_CamcoderVersion)"
+                               " returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    Some options are read-only */
+        case M4WRITER_kFileSize:
+        case M4WRITER_kFileSizeAudioEstimated:
+            M4OSA_TRACE2_1("trying to set a read-only option! (ID=0x%x)",
+                    optionID);
+            return (M4OSA_ERR)M4ERR_READ_ONLY;
+            break;
+        /**
+         *    Maximum filesize limitation */
+        case M4WRITER_kMaxFileSize:
+            M4OSA_TRACE2_0("setting M4WRITER_kMaxFileSize option");
+            err = M4MP4W_setOption(
+                apContext->pMP4Context,M4MP4W_maxFileSize, optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_maxFileSize)\
+                     returns error 0x%x", err);
+            }
+            break;
+
+        /**
+         *    Maximum file duration limitation */
+        case M4WRITER_kMaxFileDuration:
+            M4OSA_TRACE2_0("setting M4WRITER_kMaxFileDuration option");
+            err = M4MP4W_setOption(
+                apContext->pMP4Context,M4MP4W_maxFileDuration, optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4WRITER_kMaxFileDuration)"
+                               " returns error 0x%x", err);
+            }
+            break;
+
+        /**
+         *    Set 'ftyp' atom */
+        case M4WRITER_kSetFtypBox:
+            M4OSA_TRACE2_0("setting M4WRITER_kSetFtypBox option");
+            err = M4MP4W_setOption(
+                apContext->pMP4Context, M4MP4W_setFtypBox, optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_setFtypBox)\
+                     returns error 0x%x", err);
+            }
+            break;
+
+        /**
+         *    Decoder Specific Info */
+        case M4WRITER_kDSI:
+            M4OSA_TRACE2_0("setting M4WRITER_kDSI option");
+            err = M4MP4W_setOption(
+                apContext->pMP4Context, M4MP4W_DSI, optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_DSI)\
+                     returns error 0x%x", err);
+            }
+            break;
+        /*+ H.264 Trimming  */
+        case M4WRITER_kMUL_PPS_SPS:
+            M4OSA_TRACE2_0("setting M4WRITER_kMUL_PPS_SPS option");
+            err = M4MP4W_setOption(
+                apContext->pMP4Context, M4MP4W_MUL_PPS_SPS, optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_setOption(M4MP4W_DSI)\
+                     returns error 0x%x", err);
+            }
+            break;
+        /*- H.264 Trimming  */
+
+        /**
+         *    Unknown option */
+        default:
+            M4OSA_TRACE2_1("trying to set an unknown option!\
+                 (optionID=0x%x)", optionID);
+            return (M4OSA_ERR)M4ERR_BAD_OPTION_ID;
+            break;
+    }
+
+    M4OSA_TRACE3_1("M4WRITER_3GP_setOption: returning 0x%x", err);
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_getOption(
+ *     M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+ *     M4OSA_DataOption optionValue)
+ * @brief     This function asks the writer to return the value associated with
+ *            the optionID. The caller is responsible for allocating/
+ *            de-allocating the memory of the value field.
+ * @note      The options handled by the component depend on the implementation
+ *            of the component.
+ * @param     pContext:     (IN) Execution context of the 3GP writer,
+ * @param     OptionId:      (IN) Id of the option to get.
+ * @param     pOptionValue: (OUT) Value of the option to get.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only)
+ * @return    M4ERR_BAD_OPTION_ID: the ID of the option is not valid.
+ * @return    M4ERR_NOT_IMPLEMENTED: This option is not implemented yet.
+ * @return    any error returned by the MP4 core writer getOption (Its coreID
+ *            is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_getOption(
+        M4WRITER_Context pContext, M4OSA_UInt32 optionID,
+        M4OSA_DataOption optionValue)
+{
+    M4WRITER_3GP_InternalContext* apContext =
+            (M4WRITER_3GP_InternalContext*)pContext;
+
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_3("M4WRITER_3GP_getOption: pContext=0x%x, optionID=0x%x,\
+         optionValue=0x%x", pContext, optionID, optionValue);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext),M4ERR_PARAMETER,
+         "M4WRITER_3GP_getOption: pContext is M4OSA_NULL");
+
+    switch (optionID)
+    {
+        /**
+         *    Maximum Access Unit size */
+        case M4WRITER_kMaxAUSize:
+            M4OSA_TRACE2_0("getting M4WRITER_kMaxAUSize option");
+            err = M4MP4W_getOption(apContext->pMP4Context,M4MP4W_maxAUsize,
+                (M4OSA_DataOption*)&optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_getOption(M4MP4W_maxAUsize)"
+                               " returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    Maximum chunck size */
+        case M4WRITER_kMaxChunckSize:
+            M4OSA_TRACE2_0("getting M4WRITER_kMaxChunckSize option");
+            err = M4MP4W_getOption(apContext->pMP4Context,M4MP4W_maxChunkSize,
+                (M4OSA_DataOption*)&optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_getOption(M4MP4W_maxChunkSize)\
+                     returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    The file size option */
+        case M4WRITER_kFileSize:
+            M4OSA_TRACE2_0("getting M4WRITER_kFileSize option");
+            /* get the current file size */
+            err = M4MP4W_getCurrentFileSize(
+                apContext->pMP4Context, (M4OSA_UInt32*)optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_getCurrentFileSize"
+                               " returns error 0x%x", err);
+            }
+            break;
+        /**
+         *    The file size with audio option has its own function call
+              in the MP4 core writer */
+        case M4WRITER_kFileSizeAudioEstimated:
+            M4OSA_TRACE2_0("getting M4WRITER_kFileSizeAudioEstimated option");
+            /* get the current file size ... */
+            err = M4MP4W_getCurrentFileSize(
+                apContext->pMP4Context, (M4OSA_UInt32*)optionValue);
+            if (M4OSA_ERR_IS_ERROR(err))
+            {
+                M4OSA_TRACE1_1("M4MP4W_getCurrentFileSize"
+                               " returns error 0x%x", err);
+            }
+            //no more needed 3gp writer has its own mecanism
+            ///* ... add the estimated next max AU size */
+            //*((M4OSA_UInt32*)optionValue) += apContext->maxAUsizes;
+            break;
+        /**
+         *    Unknown option */
+        default:
+            M4OSA_TRACE2_1("trying to get an unknown option!\
+                 (optionID=0x%x)", optionID);
+            return    (M4OSA_ERR)M4ERR_BAD_OPTION_ID;
+            break;
+    }
+
+    M4OSA_TRACE3_1("M4WRITER_3GP_getOption: returning 0x%x", err);
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_startAU(
+ *          M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ *          M4SYS_AccessUnit* pAU)
+ * @brief     Prepare an Access Unit to be ready to store data
+ * @note
+ * @param     pContext: (IN) Execution context of the 3GP writer,
+ * @param     streamID: (IN) Id of the stream to which the Access Unit
+ *            is related.
+ * @param     pAU:      (IN/OUT) Access Unit to be prepared.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pAU is M4OSA_NULL (debug only)
+ * @return    M4ERR_BAD_STREAM_ID: streamID is not VideoStreamID nor
+ *            AudioStreamID (debug only)
+ * @return    any error returned by the MP4 core writer startAU (Its coreID
+ *            is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_startAU(
+        M4WRITER_Context pContext, M4SYS_StreamID streamID,
+        M4SYS_AccessUnit* pAU)
+{
+    M4WRITER_3GP_InternalContext* apContext =
+            (M4WRITER_3GP_InternalContext*)pContext;
+
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_3("M4WRITER_3GP_startAU: pContext=0x%x, streamID=%d, pAU=0x%x",
+         pContext, streamID, pAU);
+
+    /**
+     *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext), M4ERR_PARAMETER,
+         "M4WRITER_3GP_startAU: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pAU), M4ERR_PARAMETER,
+         "M4WRITER_3GP_startAU: pAU is M4OSA_NULL");
+    M4OSA_DEBUG_IF2(
+         ((VideoStreamID != streamID) && (AudioStreamID != streamID)),
+         M4ERR_BAD_STREAM_ID,
+         "M4WRITER_3GP_processAU: Wrong streamID");
+
+    /**
+     * Call the MP4 writer */
+    M4OSA_TRACE3_0("M4WRITER_3GP_startAU: calling M4MP4W_startAU()");
+    err = M4MP4W_startAU(apContext->pMP4Context, streamID, pAU);
+    if (M4OSA_ERR_IS_ERROR(err))
+    {
+        M4OSA_TRACE1_1("M4MP4W_startAU returns error 0x%x", err);
+    }
+
+    M4OSA_TRACE3_2("AU: dataAddress=0x%x, size=%d",
+         pAU->dataAddress, pAU->size);
+
+    /* Convert oversize to a request toward VES automaton */
+    if (M4WAR_MP4W_OVERSIZE == err)
+    {
+        err = M4WAR_WRITER_STOP_REQ;
+    }
+
+    M4OSA_TRACE3_1("M4WRITER_3GP_startAU: returning 0x%x", err);
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_processAU(
+ *          M4WRITER_Context pContext, M4SYS_StreamID streamID,
+ *          M4SYS_AccessUnit* pAU)
+ * @brief     Write an Access Unit
+ * @note
+ * @param     pContext: (IN) Execution context of the 3GP writer,
+ * @param     streamID: (IN) Id of the stream to which the Access Unit
+ *            is related.
+ * @param     pAU:      (IN/OUT) Access Unit to be written
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pAU is M4OSA_NULL (debug only)
+ * @return    M4ERR_BAD_STREAM_ID: streamID is not VideoStreamID nor
+ *            AudioStreamID (debug only)
+ * @return    any error returned by the MP4 core writer processAU
+ *            (Its coreID is M4MP4_WRITER)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_processAU(
+        M4WRITER_Context pContext, M4SYS_StreamID streamID,
+        M4SYS_AccessUnit* pAU)
+{
+    M4WRITER_3GP_InternalContext* apContext =
+        (M4WRITER_3GP_InternalContext*)pContext;
+
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_3("M4WRITER_3GP_processAU: "
+                   "pContext=0x%x, streamID=%d, pAU=0x%x",
+                    pContext, streamID, pAU);
+
+    /**
+     *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == apContext), M4ERR_PARAMETER,
+         "M4WRITER_3GP_processAU: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pAU), M4ERR_PARAMETER,
+         "M4WRITER_3GP_processAU: pAU is M4OSA_NULL");
+    M4OSA_DEBUG_IF2(
+         ((VideoStreamID != streamID) && (AudioStreamID != streamID)),
+         M4ERR_BAD_STREAM_ID,
+         "M4WRITER_3GP_processAU: Wrong streamID");
+
+    M4OSA_TRACE3_4("M4WRITER_3GP_processAU: AU: "
+         "dataAddress=0x%x, size=%d, CTS=%d, nbFrag=%d",
+         pAU->dataAddress, pAU->size, (M4OSA_UInt32)pAU->CTS, pAU->nbFrag);
+
+    if(pAU->size > apContext->maxAUsizes)
+    {
+        apContext->maxAUsizes = pAU->size;
+    }
+    /**
+     * Call the MP4 writer */
+    M4OSA_TRACE3_0("M4WRITER_3GP_processAU: calling M4MP4W_processAU()");
+    err = M4MP4W_processAU(apContext->pMP4Context, streamID, pAU);
+    if (M4OSA_ERR_IS_ERROR(err))
+    {
+        M4OSA_TRACE1_1("M4MP4W_processAU returns error 0x%x", err);
+    }
+
+    /* Convert oversize to a request toward VES automaton */
+    if(M4WAR_MP4W_OVERSIZE == err)
+    {
+        err = M4WAR_WRITER_STOP_REQ;
+    }
+
+    M4OSA_TRACE3_1("M4WRITER_3GP_processAU: returning 0x%x", err);
+    return err;
+}
+
+
+/******************************************************************************
+ * M4OSA_ERR M4WRITER_3GP_getInterfaces(
+ *      M4WRITER_OutputFileType* Type,
+ *      M4WRITER_GlobalInterface** SrcGlobalInterface,
+ *      M4WRITER_DataInterface** SrcDataInterface)
+ * @brief     Get the 3GPP writer common interface
+ * @note      Retrieves the set of functions needed to use the 3GPP writer.
+ *            It follows the common writer interface.
+ * @param     Type: (OUT) return the type of this writer. Will always be
+ *            M4WRITER_k3GPP.
+ * @param     SrcGlobalInterface: (OUT) Main set of function to use this
+ *            3GPP writer
+ * @param     SrcDataInterface:   (OUT) Set of function related to datas
+ *            to use this 3GPP writer
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_ALLOC: there is no more available memory
+ * @return    M4ERR_PARAMETER: At least one of the parameters is M4OSA_NULL
+ *            (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4WRITER_3GP_getInterfaces(
+        M4WRITER_OutputFileType* Type,
+        M4WRITER_GlobalInterface** SrcGlobalInterface,
+        M4WRITER_DataInterface** SrcDataInterface)
+{
+    M4WRITER_GlobalInterface *pGlobal;
+    M4WRITER_DataInterface *pData;
+
+    M4OSA_TRACE2_3("M4WRITER_3GP_getInterfaces: "
+         "Type=0x%x, SrcGlobalInterface=0x%x,\
+         SrcDataInterface=0x%x", Type, SrcGlobalInterface, SrcDataInterface);
+
+    /**
+     *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Type), M4ERR_PARAMETER,
+         "M4WRITER_3GP_getInterfaces: Type is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == SrcGlobalInterface), M4ERR_PARAMETER,
+         "M4WRITER_3GP_getInterfaces: SrcGlobalInterface is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == SrcDataInterface), M4ERR_PARAMETER,
+         "M4WRITER_3GP_getInterfaces: SrcDataInterface is M4OSA_NULL");
+
+    /**
+     *    Set the output type */
+    *Type = M4WRITER_k3GPP;
+
+    /**
+     *    Allocate the global interface structure */
+    pGlobal = (M4WRITER_GlobalInterface*)M4OSA_malloc(
+                sizeof(M4WRITER_GlobalInterface),
+                M4WRITER_3GP, (M4OSA_Char *)"M4WRITER_GlobalInterface");
+    if (M4OSA_NULL == pGlobal)
+    {
+        M4OSA_TRACE1_0("unable to allocate M4WRITER_GlobalInterface,\
+             returning M4ERR_ALLOC");
+        *SrcGlobalInterface = M4OSA_NULL;
+        *SrcDataInterface = M4OSA_NULL;
+        return (M4OSA_ERR)M4ERR_ALLOC;
+    }
+
+    /**
+     *    Allocate the data interface structure */
+    pData =
+        (M4WRITER_DataInterface *)M4OSA_malloc(sizeof(M4WRITER_DataInterface),
+        M4WRITER_3GP, (M4OSA_Char *)"M4WRITER_DataInterface");
+    if (M4OSA_NULL == pData)
+    {
+        M4OSA_TRACE1_0("unable to allocate M4WRITER_DataInterface,\
+             returning M4ERR_ALLOC");
+        M4OSA_free((M4OSA_MemAddr32)pGlobal);
+        *SrcGlobalInterface = M4OSA_NULL;
+        *SrcDataInterface = M4OSA_NULL;
+        return (M4OSA_ERR)M4ERR_ALLOC;
+    }
+
+    /**
+     *    Fill the global interface structure */
+    pGlobal->pFctOpen = M4WRITER_3GP_openWrite;
+    pGlobal->pFctAddStream = M4WRITER_3GP_addStream;
+    pGlobal->pFctStartWriting = M4WRITER_3GP_startWriting;
+    pGlobal->pFctCloseWrite = M4WRITER_3GP_closeWrite;
+    pGlobal->pFctSetOption = M4WRITER_3GP_setOption;
+    pGlobal->pFctGetOption = M4WRITER_3GP_getOption;
+
+    /**
+     *    Fill the data interface structure */
+    pData->pStartAU = M4WRITER_3GP_startAU;
+    pData->pProcessAU = M4WRITER_3GP_processAU;
+
+    /**
+     *    Set the return values */
+    *SrcGlobalInterface = pGlobal;
+    *SrcDataInterface = pData;
+
+    M4OSA_TRACE2_0("M4WRITER_3GP_getInterfaces: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c
new file mode 100755
index 0000000..5da85eb
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Utils.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+******************************************************************************
+ * @file    M4MP4W_Utils.c
+ * @brief   Utilities and private functions for the MP4 writer
+******************************************************************************
+*/
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+#include "M4MP4W_Utils.h"
+#include "M4OSA_Error.h"
+#include "M4MP4W_Types.h"
+
+#define ERR_CHECK(exp, err) if (!(exp)) { return err; }
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putByte(M4OSA_UChar c, M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context)
+/*******************************************************************************/
+{
+    M4OSA_ERR err = fileFunction->writeData(context, (M4OSA_MemAddr8)&c, 1);
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBE16(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context)
+/*******************************************************************************/
+{
+    M4OSA_ERR err;
+    err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
+    ERR_CHECK(err == M4NO_ERROR, err);
+    err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBE24(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context)
+/*******************************************************************************/
+{
+    M4OSA_ERR err;
+    err = M4MP4W_putByte((M4OSA_UChar)(val >> 16), fileFunction, context);
+    ERR_CHECK(err == M4NO_ERROR, err);
+    err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
+    ERR_CHECK(err == M4NO_ERROR, err);
+    err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBE32(M4OSA_UInt32 val, M4OSA_FileWriterPointer* fileFunction,
+                         M4OSA_Context context)
+/*******************************************************************************/
+{
+    M4OSA_ERR err;
+    err = M4MP4W_putByte((M4OSA_UChar)(val >> 24), fileFunction, context);
+    ERR_CHECK(err == M4NO_ERROR, err);
+    err = M4MP4W_putByte((M4OSA_UChar)(val >> 16), fileFunction, context);
+    ERR_CHECK(err == M4NO_ERROR, err);
+    err = M4MP4W_putByte((M4OSA_UChar)(val >> 8), fileFunction, context);
+    ERR_CHECK(err == M4NO_ERROR, err);
+    err = M4MP4W_putByte((M4OSA_UChar)val, fileFunction, context);
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_putBlock(const M4OSA_UChar* Block, M4OSA_UInt32 size,
+                           M4OSA_FileWriterPointer* fileFunction, M4OSA_Context context)
+/*******************************************************************************/
+{
+    M4OSA_ERR err = fileFunction->writeData(context, (M4OSA_MemAddr8)Block, size);
+    return err;
+}
+
+/*******************************************************************************/
+void M4MP4W_convertInt32BE(M4OSA_UInt32* valPtr)
+/*******************************************************************************/
+{
+    M4OSA_UChar a, b;
+    M4OSA_UChar* c = (M4OSA_UChar*)valPtr;
+    a       = *(c);
+    b       = *(c+1);
+    *(c)   = *(c+3);
+    *(c+1) = *(c+2);
+    *(c+2) = b;
+    *(c+3) = a;
+}
+
+/*******************************************************************************/
+void M4MP4W_table32ToBE(M4OSA_UInt32* tab, M4OSA_UInt32 nb)
+/*******************************************************************************/
+{
+    M4OSA_UInt32 i;
+    for (i=0; i<nb; i++)
+        M4MP4W_convertInt32BE(&(tab)[i]);
+}
+
+/*******************************************************************************/
+void* M4MP4W_realloc(M4OSA_MemAddr32 ptr, M4OSA_UInt32 oldSize, M4OSA_UInt32 newSize)
+/*******************************************************************************/
+{
+    M4OSA_MemAddr32 ptr2 = (M4OSA_MemAddr32)M4OSA_malloc(newSize, M4MP4_WRITER,
+                                                          (M4OSA_Char *)"realloc");
+    if (M4OSA_NULL != ptr2)
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8)ptr2, (M4OSA_MemAddr8)ptr, oldSize);
+    }
+    M4OSA_free(ptr);
+    return ptr2;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_freeContext(M4OSA_Context context)
+/*******************************************************************************/
+{
+#ifdef _M4MP4W_MOOV_FIRST
+    M4OSA_UInt32 i;
+#endif /*_M4MP4W_MOOV_FIRST*/
+    M4MP4W_Mp4FileData* mMp4FileDataPtr = (M4MP4W_Mp4FileData*)context;
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    /*freeContext is now called after closeWrite*/
+    ERR_CHECK( mMp4FileDataPtr->state == M4MP4W_closed, M4ERR_STATE);
+    mMp4FileDataPtr->state = M4MP4W_closed;
+
+    if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)
+    {
+        /*delete also other chunks if any*/
+        /*for (i=0; i<=mMp4FileDataPtr->audioTrackPtr->currentChunk; i++)*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+        for (i=0; i<=mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk; i++)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->Chunk[i]);
+        }
+#else
+        if ((M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk) &&
+             (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk[0]))
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->Chunk[0]);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable);
+        }
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+        /*now dynamic*/
+        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->Chunk)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->Chunk);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkSizeTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->chunkSizeTable);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable);
+        }
+
+        if (mMp4FileDataPtr->audioTrackPtr->TABLE_STTS != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->TABLE_STTS);
+        }
+
+        if (mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ);
+        }
+
+        if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->DSI);
+            mMp4FileDataPtr->audioTrackPtr->DSI = M4OSA_NULL;
+        }
+
+        M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr);
+        mMp4FileDataPtr->audioTrackPtr = M4OSA_NULL;
+    }
+    if (mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)
+    {
+        /*delete also other chunks if any*/
+        /*for (i=0; i<=mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+        for (i=0; i<=mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk; i++)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->Chunk[i]);
+        }
+#else
+        if ((M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk) &&
+             (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk[0]))
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->Chunk[0]);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable);
+        }
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+        /*now dynamic*/
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->Chunk)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->Chunk);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkSizeTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->chunkSizeTable);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable);
+        }
+
+        if (mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->DSI);
+            mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+        }
+
+        /*now dynamic*/
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STTS)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->TABLE_STTS);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ);
+        }
+        if (M4OSA_NULL != mMp4FileDataPtr->videoTrackPtr->TABLE_STSS)
+        {
+            M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->TABLE_STSS);
+        }
+
+        M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr);
+        mMp4FileDataPtr->videoTrackPtr = M4OSA_NULL;
+    }
+
+    if (mMp4FileDataPtr->embeddedString != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->embeddedString);
+        mMp4FileDataPtr->embeddedString = M4OSA_NULL;
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr);
+
+    return M4NO_ERROR;
+}
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+/*******************************************************************************/
+M4OSA_Void M4MP4W_put32_Hi(M4OSA_UInt32* tab, M4OSA_UInt16 Hi)
+/*******************************************************************************/
+{
+    *tab &= 0xFFFF;
+    *tab |= Hi<<16;
+}
+
+/*******************************************************************************/
+M4OSA_Void M4MP4W_put32_Lo(M4OSA_UInt32* tab, M4OSA_UInt16 Lo)
+/*******************************************************************************/
+{
+    *tab &= 0xFFFF0000;
+    *tab |= Lo;
+}
+
+/*******************************************************************************/
+M4OSA_UInt16 M4MP4W_get32_Hi(M4OSA_UInt32* tab)
+/*******************************************************************************/
+{
+    return (*tab >> 16) & 0xFFFF;
+}
+
+/*******************************************************************************/
+M4OSA_UInt16 M4MP4W_get32_Lo(M4OSA_UInt32* tab)
+/*******************************************************************************/
+{
+    return *tab & 0xFFFF;
+}
+#endif
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
+
diff --git a/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
new file mode 100755
index 0000000..b37dded
--- /dev/null
+++ b/libvideoeditor/vss/3gpwriter/src/M4MP4W_Writer.c
@@ -0,0 +1,5382 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4MP4W_Writer.c
+ * @brief   Implementation of the core MP4 writer
+ ******************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef _M4MP4W_USE_CST_MEMORY_WRITER
+
+#include "M4OSA_Error.h"
+#include "M4OSA_Debug.h"
+#include "M4MP4W_Writer.h"
+#include "M4MP4W_Utils.h"
+
+/* Check optimisation flags : BEGIN */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+#ifdef _M4MP4W_MOOV_FIRST
+#error "_M4MP4W_OPTIMIZE_FOR_PHONE should not be used with _M4MP4W_MOOV_FIRST"
+
+#endif
+
+#endif
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+#error "_M4MP4W_UNBUFFERED_VIDEO should be used with _M4MP4W_OPTIMIZE_FOR_PHONE"
+
+#endif
+
+#endif
+/* Check optimisation flags : END */
+
+#ifndef _M4MP4W_DONT_USE_TIME_H
+#include <time.h>
+
+#endif /*_M4MP4W_DONT_USE_TIME_H*/
+
+/*MACROS*/
+#define MAJOR_VERSION 3
+#define MINOR_VERSION 3
+#define REVISION 0
+
+#define ERR_CHECK(exp, err) if (!(exp)) { return err; }
+#define CLEANUPonERR(func) if ((err = func) != M4NO_ERROR) goto cleanup
+
+#define max(a,b) (((a) > (b)) ? (a) : (b))
+
+/***************/
+/*Static blocks*/
+/***************/
+
+/*CommonBlocks*/
+
+const M4OSA_UChar Default_ftyp [] =
+{
+    0x00, 0x00, 0x00, 0x18, 'f', 't', 'y', 'p', '3', 'g', 'p', '7', 0x00, 0x00,
+    0x03, 0x00, '3', 'g', 'p', '7', 'i', 's', 'o', 'm'
+};
+
+const M4OSA_UChar CommonBlock2 [] =
+{
+    'm', 'd', 'a', 't'
+};
+
+const M4OSA_UChar CommonBlock3 [] =
+{
+    'm', 'o', 'o', 'v', 0x00, 0x00, 0x00, 0x6C, 'm', 'v', 'h', 'd', 0x00,
+    0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock4 [] =
+{
+    0x00, 0x00, 0x03, 0xE8
+};
+
+const M4OSA_UChar CommonBlock5 [] =
+{
+    0x00, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03
+};
+
+const M4OSA_UChar CommonBlock6 [] =
+{
+    't', 'r', 'a', 'k', 0x00, 0x00, 0x00, 0x5C, 't', 'k', 'h', 'd', 0x00,
+    0x00, 0x00, 0x01
+};
+
+const M4OSA_UChar CommonBlock7 [] =
+{
+    0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock7bis [] =
+{
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x01, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x40, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock8 [] =
+{
+    'm', 'd', 'i', 'a', 0x00, 0x00, 0x00, 0x20, 'm', 'd', 'h', 'd', 0x00,
+    0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock9 [] =
+{
+    0x55, 0xC4, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock10 [] =
+{
+    'm', 'i', 'n', 'f', 0x00, 0x00, 0x00, 0x24, 'd', 'i', 'n', 'f', 0x00,
+    0x00, 0x00, 0x1C, 'd', 'r', 'e', 'f', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x01, 0x00, 0x00, 0x00, 0x0C, 'u', 'r', 'l', ' ', 0x00, 0x00, 0x00,
+    0x01
+};
+
+const M4OSA_UChar CommonBlock11 [] =
+{
+    's', 't', 'b', 'l'
+};
+
+const M4OSA_UChar CommonBlock12 [] =
+{
+    's', 't', 't', 's', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar SampleDescriptionHeader [] =
+{
+    's', 't', 's', 'd', 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+};
+
+const M4OSA_UChar SampleDescriptionEntryStart [] =
+{
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock15 [] =
+{
+    's', 't', 's', 'z', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock16 [] =
+{
+    's', 't', 's', 'c', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar CommonBlock17 [] =
+{
+    's', 't', 'c', 'o', 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar BlockSignatureSkipHeader [] =
+{
+    0x00, 0x00, 0x00, 0x5E, 's', 'k', 'i', 'p'
+};
+/* due to current limitations, size must be 16 */
+const M4OSA_UChar BlockSignatureSkipDefaultEmbeddedString [] =
+{
+    'N', 'X', 'P', 'S', 'W', ' ', 'C', 'A', 'M', 'C', 'O', 'R', 'D', 'E',
+    'R', ' '
+};
+/* follows the version (like " 3.0.2"), then " -- " */
+/* due to current limitations, size must be 60 */
+const M4OSA_UChar BlockSignatureSkipDefaultIntegrationTag [] =
+{
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/*VideoBlocks*/
+/* 320*240, now no longer hardcoded */
+/* const M4OSA_UChar VideoBlock1[] =
+    { 0x01, 0x40, 0x00, 0x00, 0x00, 0xF0, 0x00, 0x00 }; */
+const M4OSA_UChar VideoBlock1_1 [] =
+{
+    0x00, 0x00, 0x00, 0x21, 'h', 'd', 'l', 'r', 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 'v', 'i', 'd', 'e', 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar SampleDescriptionEntryVideoBoilerplate1 [] =
+{
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar SampleDescriptionEntryVideoBoilerplate2 [] =
+{
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x18, 0xFF, 0xFF
+};
+
+const M4OSA_UChar VideoBlock4 [] =
+{
+    's', 't', 's', 's', 0x00, 0x00, 0x00, 0x00
+}; /*STSS*/
+
+const M4OSA_UChar VideoBlock5 [] =
+{
+    0x00, 0x00, 0x00, 0x14, 'v', 'm', 'h', 'd', 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar VideoResolutions [] =
+{
+    0x00, 0x48, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00
+};
+
+/*Mp4vBlocks*/
+const M4OSA_UChar Mp4vBlock1 [] =
+{
+    'm', 'p', '4', 'v'
+};
+
+const M4OSA_UChar Mp4vBlock3 [] =
+{
+    0x20, 0x11
+};
+
+/*H263Blocks*/
+const M4OSA_UChar H263Block1 [] =
+{
+    's', '2', '6', '3'
+};
+
+const M4OSA_UChar H263Block2 [] =
+{
+    0x00, 0x00, 0x00, 0x0F, 'd', '2', '6', '3'
+};
+
+const M4OSA_UChar H263Block2_bitr [] =
+{
+    0x00, 0x00, 0x00, 0x1F, 'd', '2', '6', '3'
+};
+
+const M4OSA_UChar H263Block3 [] =
+{
+    'P', 'H', 'L', 'P', 0x00, 0x0A, 0x00
+};
+
+const M4OSA_UChar H263Block4 [] =
+{
+    0x00, 0x00, 0x00, 0x10, 'b', 'i', 't', 'r'
+};
+
+/*H264Blocks*/
+const M4OSA_UChar H264Block1 [] =
+{
+    'a', 'v', 'c', '1'
+};
+
+/* Store the avcC field, the version (=1),
+    the profile (=66), the compatibility (=0), */
+
+/* the level (=10),111111 + NAL field Size (= 4 - 1),
+    111 + number of PPS (=1) */
+
+const M4OSA_UChar H264Block2 [] =
+{
+        // Remove the hardcoded DSI values of H264Block2
+        'a' , 'v' , 'c' , 'C'
+};
+
+/*AMRBlocks*/
+const M4OSA_UChar AMRBlock1 [] =
+{
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar AMRBlock1_1 [] =
+{
+    0x00, 0x00, 0x00, 0x21, 'h', 'd', 'l', 'r', 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 's', 'o', 'u', 'n', 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar AudioSampleDescEntryBoilerplate [] =
+{
+    0x00, 0x02, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00
+};
+
+const M4OSA_UChar AMRDSIHeader [] =
+{
+    0x00, 0x00, 0x00, 0x11, 'd', 'a', 'm', 'r'
+};
+
+const M4OSA_UChar AMRDefaultDSI [] =
+{
+    'P', 'H', 'L', 'P', 0x00, 0x00, 0x80, 0x00, 0x01
+};
+
+const M4OSA_UChar AMRBlock4 [] =
+{
+    0x00, 0x00, 0x00, 0x10, 's', 'm', 'h', 'd', 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00
+};
+
+/*AMR8Blocks*/
+const M4OSA_UChar AMR8Block1 [] =
+{
+    's', 'a', 'm', 'r'
+};
+
+/*AMR16Blocks*/
+/*const M4OSA_UChar AMR16Block1[] = { 's', 'a', 'w', 'b'};*/
+
+/*AACBlocks*/
+const M4OSA_UChar AACBlock1 [] =
+{
+    'm', 'p', '4', 'a'
+};
+
+const M4OSA_UChar AACBlock2 [] =
+{
+    0x40, 0x15
+};
+
+/*MPEGConfigBlocks (AAC & MP4V)*/
+const M4OSA_UChar MPEGConfigBlock0 [] =
+{
+    'e', 's', 'd', 's', 0x00, 0x00, 0x00, 0x00, 0x03
+};
+
+const M4OSA_UChar MPEGConfigBlock1 [] =
+{
+    0x00, 0x00, 0x00, 0x04
+};
+
+const M4OSA_UChar MPEGConfigBlock2 [] = { 0x05 };
+const M4OSA_UChar MPEGConfigBlock3 [] =
+{
+    0x06, 0x01, 0x02
+};
+
+/*EVRCBlocks*/
+const M4OSA_UChar EVRCBlock3_1 [] =
+{
+    0x00, 0x00, 0x00, 0x0E, 'd', 'e', 'v', 'c'
+};
+
+const M4OSA_UChar EVRCBlock3_2 [] =
+{
+    'P', 'H', 'L', 'P', 0x00, 0x00
+};
+
+/*EVRC8Blocks*/
+const M4OSA_UChar EVRC8Block1 [] =
+{
+    's', 'e', 'v', 'c'
+};
+
+/***********/
+/* Methods */
+/***********/
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getVersion(M4OSA_UInt8 *major, M4OSA_UInt8 *minor,
+                            M4OSA_UInt8 *revision )
+/*******************************************************************************/
+{
+    ERR_CHECK(M4OSA_NULL != major, M4ERR_PARAMETER);
+    ERR_CHECK(M4OSA_NULL != minor, M4ERR_PARAMETER);
+    ERR_CHECK(M4OSA_NULL != revision, M4ERR_PARAMETER);
+
+    *major = MAJOR_VERSION;
+    *minor = MINOR_VERSION;
+    *revision = REVISION;
+
+    return M4NO_ERROR;
+}
+
+static M4OSA_UInt32 M4MP4W_STTS_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_STSZ_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_STSS_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_CHUNK_ALLOC_NB;
+static M4OSA_UInt32 M4MP4W_STTS_AUDIO_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_STSZ_AUDIO_ALLOC_SIZE;
+static M4OSA_UInt32 M4MP4W_CHUNK_AUDIO_ALLOC_NB;
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+/* stsc[ ] table is splitted at 12 bits */
+#define M4MP4W_VIDEO_MAX_AU_PER_CHUNK 4095 /* 0=notused */
+
+#else
+#define M4MP4W_VIDEO_MAX_AU_PER_CHUNK 10   /* 0=notused */
+
+#endif
+
+#endif
+
+/*******************************************************************************/
+
+M4OSA_ERR M4MP4W_initializeAllocationParameters(M4MP4W_Mp4FileData *Ptr )
+/*******************************************************************************/
+{
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+    M4OSA_UInt32 maxMemory, vesMemory;
+    M4OSA_UInt32 nbVideoFrames, nbAudioFrames;
+    M4OSA_UInt32 averageVideoChunk;
+
+    /*-----------*/
+    /* NB_FRAMES */
+    /*-----------*/
+
+    /* magical formula : memory = vesMemory + 12 * framerate * duration */
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+    vesMemory = 0x32000; /* 200 kB */
+
+#else
+
+    vesMemory = 0x3E800; /* 250 kB */
+
+#endif
+
+#define VIDEO_POOL_MEMORY 1000000
+
+    maxMemory = VIDEO_POOL_MEMORY;
+
+    if (maxMemory < vesMemory) {
+        return M4ERR_ALLOC;
+    }
+
+    nbVideoFrames = ( maxMemory - vesMemory) / 12;
+
+    M4OSA_TRACE1_1("M4MP4W: %d images max", nbVideoFrames);
+
+    /* VIDEO */
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+    /* assume an average of 25 fpc : reference = 15 fps * 2s * 0.8 */
+
+    averageVideoChunk = 2500;
+
+#else
+
+    if (M4MP4W_VIDEO_MAX_AU_PER_CHUNK > 0)
+    {
+        averageVideoChunk = 100 * M4MP4W_VIDEO_MAX_AU_PER_CHUNK - 20
+            * (M4MP4W_VIDEO_MAX_AU_PER_CHUNK - 1); /* margin 20% */
+    }
+    else
+    {
+        /* assume an average of 50 fpc */
+        averageVideoChunk = 5000;
+    }
+
+#endif
+
+    M4MP4W_STTS_ALLOC_SIZE = nbVideoFrames * sizeof(M4OSA_UInt32);
+    M4MP4W_STSZ_ALLOC_SIZE = nbVideoFrames * sizeof(M4OSA_UInt16);
+    M4MP4W_STSS_ALLOC_SIZE = nbVideoFrames * sizeof(
+        M4OSA_UInt32); /* very conservative (all images are intra) */
+
+    M4MP4W_CHUNK_ALLOC_NB = ( nbVideoFrames * 100) / averageVideoChunk + 1;
+
+    /* AUDIO */
+
+    nbAudioFrames = nbVideoFrames;
+    /* audio is 5 fps, which is the smallest framerate for video */
+
+    M4MP4W_STTS_AUDIO_ALLOC_SIZE = 100; /* compressed */
+    M4MP4W_STSZ_AUDIO_ALLOC_SIZE = 100; /* compressed */
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+    M4MP4W_CHUNK_AUDIO_ALLOC_NB = nbAudioFrames / 10 + 1;
+
+#else
+
+    M4MP4W_CHUNK_AUDIO_ALLOC_NB = nbAudioFrames / 38 + 1;
+
+#endif
+
+    return M4NO_ERROR;
+
+#else
+
+    /* VIDEO 5 min at 25 fps null-enc */
+
+    M4MP4W_STTS_ALLOC_SIZE = 20000;
+    M4MP4W_STSZ_ALLOC_SIZE = 18000;
+    M4MP4W_STSS_ALLOC_SIZE = 5000;
+    M4MP4W_CHUNK_ALLOC_NB = 500;
+
+    /* AUDIO 2 min aac+ null-enc */
+
+    M4MP4W_STTS_AUDIO_ALLOC_SIZE = 32000;
+    M4MP4W_STSZ_AUDIO_ALLOC_SIZE = 20000;
+    M4MP4W_CHUNK_AUDIO_ALLOC_NB = 1000;
+
+    return M4NO_ERROR;
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_openWrite(M4OSA_Context *contextPtr,
+                           void *outputFileDescriptor,
+                           M4OSA_FileWriterPointer *fileWriterFunction,
+                           void *tempFileDescriptor,
+                           M4OSA_FileReadPointer *fileReaderFunction )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = M4OSA_NULL;
+
+    ERR_CHECK(M4OSA_NULL != contextPtr, M4ERR_PARAMETER);
+    ERR_CHECK(M4OSA_NULL != outputFileDescriptor, M4ERR_PARAMETER);
+    ERR_CHECK(M4OSA_NULL != fileWriterFunction, M4ERR_PARAMETER);
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+    /* Optional, feature won't be used if NULL */
+
+    M4OSA_TRACE2_1("tempFileDescriptor = %p", tempFileDescriptor);
+
+    if (M4OSA_NULL == tempFileDescriptor)
+    {
+        M4OSA_TRACE1_0(
+            "tempFileDescriptor is NULL, RESERVED_MOOV_DISK_SPACE feature not used");
+    }
+
+#else /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+    /* Not used : ERR_CHECK(M4OSA_NULL != tempFileDescriptor, M4ERR_PARAMETER); */
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+    /* Not used : ERR_CHECK(M4OSA_NULL != fileReaderFunction, M4ERR_PARAMETER); */
+
+    /* The context reuse mode was suppressed*/
+
+    mMp4FileDataPtr =
+        (M4MP4W_Mp4FileData *)M4OSA_malloc(sizeof(M4MP4W_Mp4FileData),
+        M4MP4_WRITER, (M4OSA_Char *)"MP4 writer context");
+    ERR_CHECK(mMp4FileDataPtr != M4OSA_NULL, M4ERR_ALLOC);
+    mMp4FileDataPtr->url = outputFileDescriptor;
+    mMp4FileDataPtr->audioTrackPtr = M4OSA_NULL;
+    mMp4FileDataPtr->videoTrackPtr = M4OSA_NULL;
+    mMp4FileDataPtr->MaxChunkSize = M4MP4W_DefaultMaxChunkSize; /*default  */
+    mMp4FileDataPtr->MaxAUSize = M4MP4W_DefaultMaxAuSize;       /*default  */
+    mMp4FileDataPtr->InterleaveDur =
+        M4MP4W_DefaultInterleaveDur; /*default = 0, i.e. not used*/
+    mMp4FileDataPtr->MaxFileSize = 0; /*default = 0, i.e. not used*/
+    mMp4FileDataPtr->camcoderVersion = 0; /*default is " 0.0.0"*/
+    mMp4FileDataPtr->embeddedString =
+        M4OSA_NULL; /*default is in BlockSignatureSkipDefaultEmbeddedString */
+    mMp4FileDataPtr->integrationTag = M4OSA_NULL; /*default is 0 */
+    mMp4FileDataPtr->MaxFileDuration = 0; /*default = 0, i.e. not used*/
+
+    mMp4FileDataPtr->fileWriterFunctions = fileWriterFunction;
+    mMp4FileDataPtr->hasAudio = M4OSA_FALSE;
+    mMp4FileDataPtr->hasVideo = M4OSA_FALSE;
+    mMp4FileDataPtr->state = M4MP4W_opened;
+    mMp4FileDataPtr->duration = 0; /*i*/
+    /*patch for integrationTag 174 -> 238 (+64)*/
+    mMp4FileDataPtr->filesize =
+        238; /*initialization with constant part in ftyp+mdat+moov+skip*/
+
+    mMp4FileDataPtr->estimateAudioSize = M4OSA_FALSE;
+    mMp4FileDataPtr->audioMsChunkDur =
+        0; /*set and used only when estimateAudioSize is true*/
+    mMp4FileDataPtr->audioMsStopTime =
+        0; /*set and used only when estimateAudioSize is true*/
+
+    mMp4FileDataPtr->fileWriterContext = M4OSA_NULL;
+    /* + CRLV6775 -H.264 trimming */
+    mMp4FileDataPtr->bMULPPSSPS = M4OSA_FALSE;
+    /* - CRLV6775 -H.264 trimming */
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+    mMp4FileDataPtr->absoluteCurrentPos =
+        32; /*init with ftyp + beginning of mdat size*/
+
+#endif
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+
+    mMp4FileDataPtr->safetyFileUrl = tempFileDescriptor;
+    mMp4FileDataPtr->cleanSafetyFile =
+        M4OSA_FALSE; /* No need to clean it just yet. */
+
+#endif               /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+    /* ftyp atom */
+
+    M4OSA_memset((M4OSA_MemAddr8) &mMp4FileDataPtr->ftyp,
+        sizeof(mMp4FileDataPtr->ftyp), 0);
+
+    *contextPtr = mMp4FileDataPtr;
+
+    M4MP4W_initializeAllocationParameters(mMp4FileDataPtr);
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_addStream(M4OSA_Context context,
+                           M4SYS_StreamDescription *streamDescPtr )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+
+    ERR_CHECK(M4OSA_NULL != context, M4ERR_PARAMETER);
+
+    ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
+        || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+    mMp4FileDataPtr->state = M4MP4W_ready;
+
+    switch (streamDescPtr->streamType)
+    {
+        case M4SYS_kAMR:
+        case M4SYS_kAAC:
+        case M4SYS_kEVRC:
+            /*Audio*/
+            ERR_CHECK(streamDescPtr->streamID == AudioStreamID,
+                M4ERR_PARAMETER);
+
+            /*check if an audio track has already been added*/
+            ERR_CHECK(mMp4FileDataPtr->hasAudio == M4OSA_FALSE,
+                M4ERR_BAD_CONTEXT);
+
+            /*check if alloc need to be done*/
+            if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)
+            {
+                mMp4FileDataPtr->audioTrackPtr = (M4MP4W_AudioTrackData
+                    *)M4OSA_malloc(sizeof(M4MP4W_AudioTrackData),
+                    M4MP4_WRITER, (M4OSA_Char *)"M4MP4W_AudioTrackData");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL,
+                    M4ERR_ALLOC);
+
+                /**
+                * We must init these pointers in case an alloc bellow fails */
+                mMp4FileDataPtr->audioTrackPtr->Chunk = M4OSA_NULL;
+                mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable = M4OSA_NULL;
+                mMp4FileDataPtr->audioTrackPtr->chunkSizeTable = M4OSA_NULL;
+                mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable = M4OSA_NULL;
+                mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable = M4OSA_NULL;
+                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS = M4OSA_NULL;
+                mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ = M4OSA_NULL;
+                mMp4FileDataPtr->audioTrackPtr->DSI = M4OSA_NULL;
+
+                /*now dynamic*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+                mMp4FileDataPtr->audioTrackPtr->Chunk =
+                    (M4OSA_UChar ** )M4OSA_malloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+                    * sizeof(M4OSA_UChar *),
+                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
+                    M4ERR_ALLOC);
+
+#else
+
+                mMp4FileDataPtr->audioTrackPtr->Chunk =
+                    (M4OSA_UChar ** )M4OSA_malloc(sizeof(M4OSA_UChar *),
+                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
+                    M4ERR_ALLOC);
+                mMp4FileDataPtr->audioTrackPtr->Chunk[0] = M4OSA_NULL;
+
+                mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+                    * sizeof(M4OSA_UInt32),
+                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkOffsetTable");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_STTS_AUDIO_ALLOC_SIZE,
+                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->TABLE_STTS");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks = 1;
+
+                mMp4FileDataPtr->audioTrackPtr->chunkSizeTable =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+                    * sizeof(M4OSA_UInt32),
+                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkSizeTable");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+                    * sizeof(M4OSA_UInt32),
+                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkSampleNbTable");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_CHUNK_AUDIO_ALLOC_NB
+                    * sizeof(M4OSA_UInt32),
+                    M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->chunkTimeMsTable");
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+
+                mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk = 0;
+            }
+            mMp4FileDataPtr->hasAudio = M4OSA_TRUE;
+            mMp4FileDataPtr->filesize += 402;
+            mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
+                mMp4FileDataPtr->MaxChunkSize; /* init value */
+            mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
+                mMp4FileDataPtr->MaxAUSize;
+            mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS = 0;
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb = 0;
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize = 0;
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb = 1;
+            mMp4FileDataPtr->audioTrackPtr->CommonData.timescale =
+                streamDescPtr->timeScale;
+            mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[0] = 0;     /*init*/
+            mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[0] = 0; /*init*/
+            mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable[0] = 0;   /*init*/
+            mMp4FileDataPtr->audioTrackPtr->currentChunk =
+                0; /*1st chunk is Chunk[0]*/
+            mMp4FileDataPtr->audioTrackPtr->currentPos = 0;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            mMp4FileDataPtr->audioTrackPtr->currentStsc = 0;
+
+#endif
+
+            mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_ready;
+            mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks = 0;
+            mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ = M4OSA_NULL;
+
+            mMp4FileDataPtr->audioTrackPtr->avgBitrate =
+                streamDescPtr->averageBitrate;
+            mMp4FileDataPtr->audioTrackPtr->maxBitrate =
+                streamDescPtr->maxBitrate;
+
+            if (streamDescPtr->streamType == M4SYS_kAMR)
+            {
+
+                mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
+                    M4SYS_kAMR;
+                ERR_CHECK(streamDescPtr->timeScale == 8000, M4ERR_PARAMETER);
+                mMp4FileDataPtr->audioTrackPtr->sampleDuration =
+                    160; /*AMR8+timescale=8000 => sample duration 160 constant*/
+
+                /*Use given DSI if passed, else use default value*/
+                if (streamDescPtr->decoderSpecificInfoSize != 0)
+                {
+                    /*amr DSI is 9 bytes long !*/
+                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
+                        9; /*always 9 for amr*/
+                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 9,
+                        M4ERR_PARAMETER);
+                    mMp4FileDataPtr->audioTrackPtr->DSI =
+                        (M4OSA_UChar *)M4OSA_malloc(9, M4MP4_WRITER,
+                        (M4OSA_Char *)"audioTrackPtr->DSI");
+                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
+                        M4ERR_ALLOC);
+                    M4OSA_memcpy(
+                        (M4OSA_MemAddr8)mMp4FileDataPtr->audioTrackPtr->DSI,
+                        (M4OSA_MemAddr8)streamDescPtr->decoderSpecificInfo,
+                        9);
+                }
+                else
+                {
+                    mMp4FileDataPtr->audioTrackPtr->DSI =
+                        M4OSA_NULL; /*default static block will be used*/
+                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
+                        0; /*but the actual static dsi is 9 bytes !*/
+                }
+            }
+            else if (streamDescPtr->streamType == M4SYS_kEVRC)
+            {
+
+                mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
+                    M4SYS_kEVRC;
+                ERR_CHECK(streamDescPtr->timeScale == 8000, M4ERR_PARAMETER);
+                mMp4FileDataPtr->audioTrackPtr->sampleDuration =
+                    160; /*EVRC+timescale=8000 => sample duration 160 constant*/
+
+                /*Use given DSI if passed, else use default value*/
+                if (streamDescPtr->decoderSpecificInfoSize != 0)
+                {
+                    /*evrc DSI is 6 bytes long !*/
+                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
+                        6; /*always 6 for evrc*/
+                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 6,
+                        M4ERR_PARAMETER);
+                    mMp4FileDataPtr->audioTrackPtr->DSI =
+                        (M4OSA_UChar *)M4OSA_malloc(6, M4MP4_WRITER,
+                        (M4OSA_Char *)"audioTrackPtr->DSI");
+                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
+                        M4ERR_ALLOC);
+                    M4OSA_memcpy(
+                        (M4OSA_MemAddr8)mMp4FileDataPtr->audioTrackPtr->DSI,
+                        (M4OSA_MemAddr8)streamDescPtr->decoderSpecificInfo,
+                        6);
+                }
+                else
+                {
+                    mMp4FileDataPtr->audioTrackPtr->DSI =
+                        M4OSA_NULL; /*default static block will be used*/
+                    mMp4FileDataPtr->audioTrackPtr->dsiSize =
+                        0; /*but the actual static dsi is 6 bytes !*/
+                }
+            }
+            else /*M4SYS_kAAC*/
+            {
+                /*avg bitrate should be set*/
+                ERR_CHECK(streamDescPtr->averageBitrate != -1, M4ERR_PARAMETER);
+                ERR_CHECK(streamDescPtr->maxBitrate != -1, M4ERR_PARAMETER);
+
+                mMp4FileDataPtr->audioTrackPtr->CommonData.trackType =
+                    M4SYS_kAAC;
+                mMp4FileDataPtr->audioTrackPtr->sampleDuration =
+                    0; /*don't know for aac, so set 0*/
+
+                mMp4FileDataPtr->audioTrackPtr->dsiSize =
+                    (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+
+                if (mMp4FileDataPtr->audioTrackPtr->dsiSize != 0)
+                {
+                    mMp4FileDataPtr->audioTrackPtr->DSI =
+                        (M4OSA_UChar *)M4OSA_malloc(
+                        streamDescPtr->decoderSpecificInfoSize,
+                        M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->DSI");
+                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL,
+                        M4ERR_ALLOC);
+                    M4OSA_memcpy(
+                        (M4OSA_MemAddr8)mMp4FileDataPtr->audioTrackPtr->DSI,
+                        (M4OSA_MemAddr8)streamDescPtr->decoderSpecificInfo,
+                        streamDescPtr->decoderSpecificInfoSize);
+                }
+                else
+                {
+                    /*no dsi: return bad parameter ?*/
+                    return M4ERR_PARAMETER;
+                }
+            }
+
+            break;
+
+        case (M4SYS_kMPEG_4):
+        case (M4SYS_kH264):
+        case (M4SYS_kH263):
+            /*Video*/
+            ERR_CHECK(streamDescPtr->streamID == VideoStreamID,
+                M4ERR_PARAMETER);
+
+            /*check if a video track has already been added*/
+            ERR_CHECK(mMp4FileDataPtr->hasVideo == M4OSA_FALSE,
+                M4ERR_BAD_CONTEXT);
+
+            /*check if alloc need to be done*/
+            if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)
+            {
+                mMp4FileDataPtr->videoTrackPtr = (M4MP4W_VideoTrackData
+                    *)M4OSA_malloc(sizeof(M4MP4W_VideoTrackData),
+                    M4MP4_WRITER, (M4OSA_Char *)"M4MP4W_VideoTrackData");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL,
+                    M4ERR_ALLOC);
+
+                /**
+                * We must init these pointers in case an alloc bellow fails */
+                mMp4FileDataPtr->videoTrackPtr->Chunk = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->chunkSizeTable = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STSS = M4OSA_NULL;
+                mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+
+                /*now dynamic*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+                mMp4FileDataPtr->videoTrackPtr->Chunk =
+                    (M4OSA_UChar ** )M4OSA_malloc(M4MP4W_CHUNK_ALLOC_NB
+                    * sizeof(M4OSA_UChar *),
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+                    M4ERR_ALLOC);
+
+#else
+                /*re-use the same chunk and flush it when full*/
+
+                mMp4FileDataPtr->videoTrackPtr->Chunk =
+                    (M4OSA_UChar ** )M4OSA_malloc(sizeof(M4OSA_UChar *),
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+                    M4ERR_ALLOC);
+                mMp4FileDataPtr->videoTrackPtr->Chunk[0] = M4OSA_NULL;
+
+                mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_CHUNK_ALLOC_NB
+                    * sizeof(M4OSA_UInt32),
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkOffsetTable");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+                    M4ERR_ALLOC);
+                mMp4FileDataPtr->videoTrackPtr->chunkSizeTable =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_CHUNK_ALLOC_NB
+                    * sizeof(M4OSA_UInt32),
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkSizeTable");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_CHUNK_ALLOC_NB
+                    * sizeof(M4OSA_UInt32),
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkSampleNbTable");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable =
+                    (M4MP4W_Time32 *)M4OSA_malloc(M4MP4W_CHUNK_ALLOC_NB
+                    * sizeof(M4MP4W_Time32),
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->chunkTimeMsTable");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable
+                    != M4OSA_NULL, M4ERR_ALLOC);
+
+                mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk = 0;
+                /*tables are now dynamic*/
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_STTS_ALLOC_SIZE,
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STTS");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks = 1;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
+                    (M4OSA_UInt16 *)M4OSA_malloc(M4MP4W_STSZ_ALLOC_SIZE,
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSZ");
+
+#else
+
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_STSZ_ALLOC_SIZE,
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSZ");
+
+#endif
+
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks = 1;
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STSS =
+                    (M4OSA_UInt32 *)M4OSA_malloc(M4MP4W_STSS_ALLOC_SIZE,
+                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->TABLE_STSS");
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS
+                    != M4OSA_NULL, M4ERR_ALLOC);
+                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks = 1;
+            }
+            mMp4FileDataPtr->hasVideo = M4OSA_TRUE;
+            mMp4FileDataPtr->filesize += 462;
+            mMp4FileDataPtr->videoTrackPtr->width = M4MP4W_DefaultWidth;
+            mMp4FileDataPtr->videoTrackPtr->height = M4MP4W_DefaultHeight;
+            mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
+                mMp4FileDataPtr->MaxAUSize;
+            mMp4FileDataPtr->videoTrackPtr->CommonData.trackType =
+                streamDescPtr->streamType;
+            mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
+                mMp4FileDataPtr->MaxChunkSize; /* init value */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk =
+                M4MP4W_VIDEO_MAX_AU_PER_CHUNK;
+
+#endif
+
+            ERR_CHECK(streamDescPtr->timeScale == 1000, M4ERR_PARAMETER);
+            mMp4FileDataPtr->videoTrackPtr->CommonData.timescale = 1000;
+            mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS = 0;
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb = 0;
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleSize = 0;
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb = 1;
+            mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[0] = 0;     /*init*/
+            mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[0] = 0; /*init*/
+            mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable[0] = 0;   /*init*/
+            mMp4FileDataPtr->videoTrackPtr->currentChunk =
+                0; /*1st chunk is Chunk[0]*/
+            mMp4FileDataPtr->videoTrackPtr->currentPos = 0;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            mMp4FileDataPtr->videoTrackPtr->currentStsc = 0;
+
+#endif
+
+            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb = 0;
+            mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_ready;
+
+            if (streamDescPtr->streamType == M4SYS_kH263)
+            {
+                if (( streamDescPtr->averageBitrate == -1)
+                    || (streamDescPtr->maxBitrate == -1))
+                {
+                    /*the bitrate will not be written if the bitrate information
+                     is not fully set */
+                    mMp4FileDataPtr->videoTrackPtr->avgBitrate = -1;
+                    mMp4FileDataPtr->videoTrackPtr->maxBitrate = -1;
+                }
+                else
+                {
+                    /*proprietary storage of h263 bitrate.
+                     Warning: not the actual bitrate (bit set to 1).*/
+                    mMp4FileDataPtr->videoTrackPtr->avgBitrate =
+                        streamDescPtr->averageBitrate;
+                    mMp4FileDataPtr->videoTrackPtr->maxBitrate =
+                        streamDescPtr->maxBitrate;
+                }
+
+                if (( 0 != streamDescPtr->decoderSpecificInfoSize)
+                    && (M4OSA_NULL != streamDescPtr->decoderSpecificInfo))
+                {
+                    /*decoder specific info size is supposed to be always 7 bytes long */
+                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize == 7,
+                        M4ERR_PARAMETER);
+                    mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                        (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+                    mMp4FileDataPtr->videoTrackPtr->DSI =
+                        (M4OSA_UChar *)M4OSA_malloc(
+                        streamDescPtr->decoderSpecificInfoSize,
+                        M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+                    ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL,
+                        M4ERR_ALLOC);
+                    M4OSA_memcpy(
+                        (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->DSI,
+                        (M4OSA_MemAddr8)streamDescPtr->decoderSpecificInfo,
+                        streamDescPtr->decoderSpecificInfoSize);
+                }
+                else
+                {
+                    /*use the default dsi*/
+                    mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+                    mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
+                }
+            }
+
+            if (streamDescPtr->streamType == M4SYS_kMPEG_4)
+            {
+                mMp4FileDataPtr->filesize += 22; /*extra bytes (from h263)*/
+                /* allow DSI to be M4OSA_NULL, in which case the actual DSI will be
+                 set by setOption. */
+                if (( 0 == streamDescPtr->decoderSpecificInfoSize)
+                    || (M4OSA_NULL == streamDescPtr->decoderSpecificInfo))
+                {
+                    mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+                    mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
+                }
+                else
+                {
+                    /*MP4V specific*/
+                    /*decoder specific info size is supposed to be always <
+                        105 so that ESD size can be coded with 1 byte*/
+                    /*(this should not be restrictive because dsi is always shorter !)*/
+                    ERR_CHECK(streamDescPtr->decoderSpecificInfoSize < 105,
+                        M4ERR_PARAMETER);
+                    mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                        (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+                    mMp4FileDataPtr->videoTrackPtr->DSI =
+                        (M4OSA_UChar *)M4OSA_malloc(
+                        streamDescPtr->decoderSpecificInfoSize,
+                        M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+                    ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI != M4OSA_NULL,
+                        M4ERR_ALLOC);
+                    M4OSA_memcpy(
+                        (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->DSI,
+                        (M4OSA_MemAddr8)streamDescPtr->decoderSpecificInfo,
+                        streamDescPtr->decoderSpecificInfoSize);
+                    mMp4FileDataPtr->filesize +=
+                        streamDescPtr->decoderSpecificInfoSize;
+                }
+                /*avg bitrate should be set*/
+                ERR_CHECK(streamDescPtr->averageBitrate != -1, M4ERR_PARAMETER);
+                mMp4FileDataPtr->videoTrackPtr->avgBitrate =
+                    streamDescPtr->averageBitrate;
+                mMp4FileDataPtr->videoTrackPtr->maxBitrate =
+                    streamDescPtr->averageBitrate;
+            }
+
+            if (streamDescPtr->streamType == M4SYS_kH264)
+            {
+                /* H264 specific information */
+                mMp4FileDataPtr->videoTrackPtr->avgBitrate =
+                    streamDescPtr->averageBitrate;
+                mMp4FileDataPtr->videoTrackPtr->maxBitrate =
+                    streamDescPtr->maxBitrate;
+
+                if ((0 != streamDescPtr->decoderSpecificInfoSize)
+                    && (M4OSA_NULL != streamDescPtr->decoderSpecificInfo))
+                {
+                    /* + H.264 trimming */
+                    if (M4OSA_TRUE == mMp4FileDataPtr->bMULPPSSPS)
+                    {
+                        M4OSA_UInt16 SPSLength, PPSLength;
+                        M4OSA_UInt16 *DSI;
+                        /* Store the DSI size */
+                        mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                            (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize
+                            - 24;
+
+                        /* Copy the DSI (SPS + PPS) */
+                        mMp4FileDataPtr->videoTrackPtr->DSI =
+                            (M4OSA_UChar *)M4OSA_malloc(
+                            streamDescPtr->decoderSpecificInfoSize,
+                            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+                        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+                            != M4OSA_NULL, M4ERR_ALLOC);
+
+                        DSI =
+                            (M4OSA_UInt16 *)streamDescPtr->decoderSpecificInfo;
+                        SPSLength = DSI[6];
+                        PPSLength = DSI[10];
+                        M4OSA_memcpy(
+                            (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->DSI,
+                            (M4OSA_MemAddr8)(streamDescPtr->
+                            decoderSpecificInfo)+12, 2);
+                        M4OSA_memcpy(
+                            (M4OSA_MemAddr8)(mMp4FileDataPtr->videoTrackPtr->
+                            DSI)+2, (M4OSA_MemAddr8)(streamDescPtr->
+                            decoderSpecificInfo)+28, SPSLength);
+
+                        M4OSA_memcpy(
+                            (M4OSA_MemAddr8)(mMp4FileDataPtr->videoTrackPtr->
+                            DSI)+2 + SPSLength,
+                            (M4OSA_MemAddr8)(streamDescPtr->
+                            decoderSpecificInfo)+20, 2);
+                        M4OSA_memcpy(
+                            (M4OSA_MemAddr8)(mMp4FileDataPtr->videoTrackPtr->
+                            DSI)+4 + SPSLength,
+                            (M4OSA_MemAddr8)(streamDescPtr->
+                            decoderSpecificInfo)+28 + SPSLength,
+                            PPSLength);
+                        /* - H.264 trimming */
+                    }
+                    else
+                    {
+                        /* Store the DSI size */
+                        mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                            (M4OSA_UInt8)streamDescPtr->decoderSpecificInfoSize;
+
+                        /* Copy the DSI (SPS + PPS) */
+                        mMp4FileDataPtr->videoTrackPtr->DSI =
+                            (M4OSA_UChar *)M4OSA_malloc(
+                            streamDescPtr->decoderSpecificInfoSize,
+                            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+                        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+                            != M4OSA_NULL, M4ERR_ALLOC);
+                        M4OSA_memcpy(
+                            (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->DSI,
+                            (M4OSA_MemAddr8)streamDescPtr->
+                            decoderSpecificInfo,
+                            streamDescPtr->decoderSpecificInfoSize);
+                    }
+                }
+                else
+                {
+                    /*use the default dsi*/
+                    mMp4FileDataPtr->videoTrackPtr->DSI = M4OSA_NULL;
+                    mMp4FileDataPtr->videoTrackPtr->dsiSize = 0;
+                }
+            }
+            break;
+
+        default:
+            err = M4ERR_PARAMETER;
+    }
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_startWriting( M4OSA_Context context )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 fileModeAccess = M4OSA_kFileWrite | M4OSA_kFileCreate;
+    M4OSA_UInt32 i;
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+    mMp4FileDataPtr->state = M4MP4W_writing;
+
+    /*audio microstate */
+    /*    if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)*/
+    if (mMp4FileDataPtr->hasAudio)
+    {
+        ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState == M4MP4W_ready),
+            M4ERR_STATE);
+        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing;
+
+        /* First audio chunk allocation */
+        mMp4FileDataPtr->audioTrackPtr->Chunk[0] = (M4OSA_UChar
+            *)M4OSA_malloc(mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
+            M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->Chunk[0]");
+        ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk[0] != M4OSA_NULL,
+            M4ERR_ALLOC);
+    }
+
+    /*video microstate*/
+    /*    if (mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)*/
+    if (mMp4FileDataPtr->hasVideo)
+    {
+        ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState == M4MP4W_ready),
+            M4ERR_STATE);
+        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing;
+
+        /* First video chunk allocation */
+        mMp4FileDataPtr->videoTrackPtr->Chunk[0] = (M4OSA_UChar
+            *)M4OSA_malloc(mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
+            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->Chunk[0]");
+        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk[0] != M4OSA_NULL,
+            M4ERR_ALLOC);
+    }
+
+    if (mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+    {
+        /*set audioMsChunkDur (duration in ms before a new chunk is created)
+         for audio size estimation*/
+        ERR_CHECK(mMp4FileDataPtr->hasVideo, M4ERR_BAD_CONTEXT);
+        ERR_CHECK(mMp4FileDataPtr->hasAudio, M4ERR_BAD_CONTEXT);
+
+        mMp4FileDataPtr->audioMsChunkDur =
+            20 * mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
+            / mMp4FileDataPtr->audioTrackPtr->MaxAUSize;
+
+        if (( mMp4FileDataPtr->InterleaveDur != 0)
+            && (mMp4FileDataPtr->InterleaveDur
+            < 20 *mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
+            / mMp4FileDataPtr->audioTrackPtr->MaxAUSize))
+        {
+            mMp4FileDataPtr->audioMsChunkDur = mMp4FileDataPtr->InterleaveDur;
+        }
+    }
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+    /*open file in write binary mode*/
+
+    err = mMp4FileDataPtr->fileWriterFunctions->openWrite(
+        &mMp4FileDataPtr->fileWriterContext,
+        mMp4FileDataPtr->url, fileModeAccess);
+    ERR_CHECK((M4NO_ERROR == err), err);
+
+    /*ftyp atom*/
+    if (mMp4FileDataPtr->ftyp.major_brand != 0)
+    {
+        /* Put customized ftyp box */
+        err =
+            M4MP4W_putBE32(16 + (mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4),
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext);
+        ERR_CHECK((M4NO_ERROR == err), err);
+        err = M4MP4W_putBE32(M4MPAC_FTYP_TAG,
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext);
+        ERR_CHECK((M4NO_ERROR == err), err);
+        err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.major_brand,
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext);
+        ERR_CHECK((M4NO_ERROR == err), err);
+        err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.minor_version,
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext);
+        ERR_CHECK((M4NO_ERROR == err), err);
+
+        for ( i = 0; i < mMp4FileDataPtr->ftyp.nbCompatibleBrands; i++ )
+        {
+            err = M4MP4W_putBE32(mMp4FileDataPtr->ftyp.compatible_brands[i],
+                mMp4FileDataPtr->fileWriterFunctions,
+                mMp4FileDataPtr->fileWriterContext);
+            ERR_CHECK((M4NO_ERROR == err), err);
+        }
+    }
+    else
+    {
+        /* Put default ftyp box */
+        err = M4MP4W_putBlock(Default_ftyp, sizeof(Default_ftyp),
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext);
+        ERR_CHECK((M4NO_ERROR == err), err);
+    }
+
+    /*init mdat value with 0 but the right value is set just before the file is closed*/
+    err = M4MP4W_putBE32(0, mMp4FileDataPtr->fileWriterFunctions,
+        mMp4FileDataPtr->fileWriterContext);
+    ERR_CHECK((M4NO_ERROR == err), err);
+    err = M4MP4W_putBlock(CommonBlock2, sizeof(CommonBlock2),
+        mMp4FileDataPtr->fileWriterFunctions,
+        mMp4FileDataPtr->fileWriterContext);
+    ERR_CHECK((M4NO_ERROR == err), err);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+
+    if (0 != mMp4FileDataPtr->MaxFileSize
+        && M4OSA_NULL != mMp4FileDataPtr->safetyFileUrl)
+    {
+        M4OSA_ERR err2 = M4NO_ERROR;
+        M4OSA_Context safetyFileContext = M4OSA_NULL;
+        M4OSA_UInt32 safetyFileSize = 0, addendum = 0;
+        M4OSA_UChar dummyData[100]; /* To fill the safety file with */
+
+        err =
+            mMp4FileDataPtr->fileWriterFunctions->openWrite(&safetyFileContext,
+            mMp4FileDataPtr->safetyFileUrl, fileModeAccess);
+        ERR_CHECK((M4NO_ERROR == err), err);
+
+        mMp4FileDataPtr->cleanSafetyFile = M4OSA_TRUE;
+
+        /* 10% seems to be a reasonable worst case, but also provision for 1kb of moov overhead.*/
+        safetyFileSize = 1000 + (mMp4FileDataPtr->MaxFileSize * 10 + 99) / 100;
+
+        /* Here we add space to take into account the fact we have to flush any pending
+        chunk in closeWrite, this space is the sum of the maximum chunk sizes, for each
+        track. */
+
+#ifndef _M4MP4W_UNBUFFERED_VIDEO
+
+        if (mMp4FileDataPtr->hasVideo)
+        {
+            safetyFileSize += mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+        }
+
+#endif
+
+        if (mMp4FileDataPtr->hasAudio)
+        {
+            safetyFileSize += mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
+        }
+
+        M4OSA_memset(dummyData, sizeof(dummyData),
+            0xCA); /* For extra safety. */
+
+        for ( i = 0;
+            i < (safetyFileSize + sizeof(dummyData) - 1) / sizeof(dummyData);
+            i++ )
+        {
+            err = mMp4FileDataPtr->fileWriterFunctions->writeData(
+                safetyFileContext, dummyData, sizeof(dummyData));
+
+            if (M4NO_ERROR != err)
+                break;
+            /* Don't return from the function yet, as we need to close the file first. */
+        }
+
+        /* I don't need to keep it open. */
+        err2 =
+            mMp4FileDataPtr->fileWriterFunctions->closeWrite(safetyFileContext);
+
+        if (M4NO_ERROR != err)
+        {
+            return err;
+        }
+        else
+            ERR_CHECK((M4NO_ERROR == err2), err2);
+
+        M4OSA_TRACE1_0("Safety file correctly created");
+    }
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_newAudioChunk( M4OSA_Context context,
+                               M4OSA_UInt32 *leftSpaceInChunk )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    M4OSA_Double scale_audio;
+
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+    M4OSA_UInt32 reallocNb;
+
+#endif
+
+    /* video only */
+
+    if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)
+        return M4NO_ERROR;
+
+    M4OSA_TRACE1_0(" M4MP4W_newAudioChunk - flush audio");
+    M4OSA_TRACE1_2("current chunk = %d  offset = 0x%x",
+        mMp4FileDataPtr->audioTrackPtr->currentChunk,
+        mMp4FileDataPtr->absoluteCurrentPos);
+
+    scale_audio = 1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+
+#ifndef _M4MP4W_MOOV_FIRST
+    /*flush chunk*/
+
+    err = M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[0],
+        mMp4FileDataPtr->audioTrackPtr->currentPos,
+        mMp4FileDataPtr->fileWriterFunctions,
+        mMp4FileDataPtr->fileWriterContext);
+
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos;
+        M4OSA_TRACE2_1(
+            "M4MP4W_newAudioChunk: putBlock error when flushing chunk: %#X",
+            err);
+        /* Ouch, we got an error writing to the file, but we need to properly react so that the
+         state is still consistent and we can properly close the file so that what has been
+         recorded so far is not lost. Yay error recovery! */
+
+        /* First, we do not know where we are in the file. Put us back at where we were before
+        attempting to write the data. That way, we're consistent with the chunk state data. */
+        err = mMp4FileDataPtr->fileWriterFunctions->seek(
+            mMp4FileDataPtr->fileWriterContext,
+            M4OSA_kFileSeekBeginning, &temp);
+
+        M4OSA_TRACE2_3(
+            "Backtracking to position 0x%08X, seek returned %d and position %08X",
+            mMp4FileDataPtr->absoluteCurrentPos, err, temp);
+
+        /* Then, do not update any info whatsoever in the writing state. This will have the
+         consequence that it will be as if the chunk has not been flushed yet, and therefore
+         it will be done as part of closeWrite (where there could be room to do so,
+         if some emergency room is freed for that purpose). */
+
+        /* And lastly (for here), return that we've reached the limit of available space. */
+
+        return M4WAR_MP4W_OVERSIZE;
+    }
+
+    /*update chunk offset*/
+    mMp4FileDataPtr->audioTrackPtr->
+        chunkOffsetTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+        mMp4FileDataPtr->absoluteCurrentPos;
+
+    /*add chunk size to absoluteCurrentPos*/
+    mMp4FileDataPtr->absoluteCurrentPos +=
+        mMp4FileDataPtr->audioTrackPtr->currentPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    /*update chunk info */
+
+    mMp4FileDataPtr->audioTrackPtr->
+        chunkSizeTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+        mMp4FileDataPtr->audioTrackPtr->currentPos;
+    mMp4FileDataPtr->audioTrackPtr->
+        chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+        mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS;
+
+    mMp4FileDataPtr->audioTrackPtr->currentChunk += 1;
+    /*if audio amount of data is not estimated*/
+    if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+        mMp4FileDataPtr->filesize += 16;
+
+    /*alloc new chunk*/
+    /*only if not already allocated*/
+    if (mMp4FileDataPtr->audioTrackPtr->currentChunk
+            > mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk)
+    {
+        /*update LastAllocatedChunk ( -> = currentChunk)*/
+        mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk += 1;
+
+        /*max nb of chunk is now dynamic*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        if (mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk
+            + 3 > M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+        {
+            M4OSA_TRACE1_0("M4MP4W_newAudioChunk : audio chunk table is full");
+            return M4WAR_MP4W_OVERSIZE;
+        }
+
+#else
+
+        if (((mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk)
+            % M4MP4W_CHUNK_AUDIO_ALLOC_NB) == 0)
+        {
+            reallocNb = mMp4FileDataPtr->audioTrackPtr->LastAllocatedChunk
+                + M4MP4W_CHUNK_AUDIO_ALLOC_NB;
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+            mMp4FileDataPtr->audioTrackPtr->Chunk =
+                (M4OSA_UChar ** )M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->Chunk,
+                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+                * sizeof(M4OSA_UChar *),
+                reallocNb * sizeof(M4OSA_UChar *));
+            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->Chunk != M4OSA_NULL,
+                M4ERR_ALLOC);
+
+#else
+
+            mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable =
+                (M4OSA_UInt32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+                chunkOffsetTable,
+                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+                * sizeof(M4OSA_UInt32),
+                reallocNb * sizeof(M4OSA_UInt32));
+            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+            mMp4FileDataPtr->audioTrackPtr->chunkSizeTable =
+                (M4OSA_UInt32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+                chunkSizeTable,
+                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+                * sizeof(M4OSA_UInt32),
+                reallocNb * sizeof(M4OSA_UInt32));
+            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSizeTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+
+            mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable =
+                (M4OSA_UInt32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+                chunkSampleNbTable,
+                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+                * sizeof(M4OSA_UInt32),
+                reallocNb * sizeof(M4OSA_UInt32));
+            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+
+            mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable =
+                (M4MP4W_Time32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+                chunkTimeMsTable,
+                ( reallocNb - M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+                * sizeof(M4MP4W_Time32),
+                reallocNb * sizeof(M4MP4W_Time32));
+            ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+        }
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+        mMp4FileDataPtr->audioTrackPtr->
+            Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk] = (M4OSA_UChar
+            *)M4OSA_malloc(mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
+            M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->currentChunk");
+        ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->
+            Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk]
+        != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    }
+
+    /*update leftSpaceInChunk, currentPos and currentChunkDur*/
+    *leftSpaceInChunk = mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
+    mMp4FileDataPtr->audioTrackPtr->currentPos = 0;
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+    /* check wether to use a new stsc or not */
+
+    if (mMp4FileDataPtr->audioTrackPtr->currentStsc > 0)
+    {
+        if (( mMp4FileDataPtr->audioTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->
+            currentStsc] & 0xFFF) != (mMp4FileDataPtr->audioTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc
+            - 1] & 0xFFF))
+            mMp4FileDataPtr->audioTrackPtr->currentStsc += 1;
+    }
+    else
+        mMp4FileDataPtr->audioTrackPtr->currentStsc += 1;
+
+    /* max nb of chunk is now dynamic */
+    if (mMp4FileDataPtr->audioTrackPtr->currentStsc
+        + 3 > M4MP4W_CHUNK_AUDIO_ALLOC_NB)
+    {
+        M4OSA_TRACE1_0("M4MP4W_newAudioChunk : audio stsc table is full");
+        return M4WAR_MP4W_OVERSIZE;
+    }
+
+    /* set nb of samples in the new chunk to 0 */
+    mMp4FileDataPtr->audioTrackPtr->
+        chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc] =
+        0 + (mMp4FileDataPtr->audioTrackPtr->currentChunk << 12);
+
+#else
+    /*set nb of samples in the new chunk to 0*/
+
+    mMp4FileDataPtr->audioTrackPtr->
+        chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] = 0;
+
+#endif
+
+    /*set time of the new chunk to lastCTS (for initialization, but updated further to the
+    CTS of the last sample in the chunk)*/
+
+    mMp4FileDataPtr->audioTrackPtr->
+        chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+        (M4OSA_UInt32)(mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
+        * scale_audio);
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_newVideoChunk( M4OSA_Context context,
+                               M4OSA_UInt32 *leftSpaceInChunk )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    M4OSA_Double scale_video;
+
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+    M4OSA_UInt32 reallocNb;
+
+#endif
+
+    /* audio only */
+
+    if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)
+        return M4NO_ERROR;
+
+    M4OSA_TRACE1_0("M4MP4W_newVideoChunk - flush video");
+    M4OSA_TRACE1_2("current chunk = %d  offset = 0x%x",
+        mMp4FileDataPtr->videoTrackPtr->currentChunk,
+        mMp4FileDataPtr->absoluteCurrentPos);
+
+    scale_video = 1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+    /* samples are already written to file */
+#else
+    /*flush chunk*/
+
+    err = M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[0],
+        mMp4FileDataPtr->videoTrackPtr->currentPos,
+        mMp4FileDataPtr->fileWriterFunctions,
+        mMp4FileDataPtr->fileWriterContext);
+
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos;
+        M4OSA_TRACE2_1(
+            "M4MP4W_newVideoChunk: putBlock error when flushing chunk: %#X",
+            err);
+        /* Ouch, we got an error writing to the file, but we need to properly react so that the
+         state is still consistent and we can properly close the file so that what has been
+         recorded so far is not lost. Yay error recovery! */
+
+        /* First, we do not know where we are in the file. Put us back at where we were before
+        attempting to write the data. That way, we're consistent with the chunk state data. */
+        err = mMp4FileDataPtr->fileWriterFunctions->seek(
+            mMp4FileDataPtr->fileWriterContext,
+            M4OSA_kFileSeekBeginning, &temp);
+
+        M4OSA_TRACE2_3(
+            "Backtracking to position 0x%08X, seek returned %d and position %08X",
+            mMp4FileDataPtr->absoluteCurrentPos, err, temp);
+        /* Then, do not update any info whatsoever in the writing state. This will have the
+         consequence that it will be as if the chunk has not been flushed yet, and therefore it
+         will be done as part of closeWrite (where there could be room to do so, if some
+         emergency room is freed for that purpose). */
+
+        /* And lastly (for here), return that we've reached the limit of available space.
+         We don't care about the error originally returned by putBlock. */
+
+        return M4WAR_MP4W_OVERSIZE;
+    }
+
+#endif
+
+    /*update chunk offset*/
+
+    mMp4FileDataPtr->videoTrackPtr->
+        chunkOffsetTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+        mMp4FileDataPtr->absoluteCurrentPos;
+
+    /*add chunk size to absoluteCurrentPos*/
+    mMp4FileDataPtr->absoluteCurrentPos +=
+        mMp4FileDataPtr->videoTrackPtr->currentPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    /*update chunk info before to go for a new one*/
+
+    mMp4FileDataPtr->videoTrackPtr->
+        chunkSizeTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+        mMp4FileDataPtr->videoTrackPtr->currentPos;
+    mMp4FileDataPtr->videoTrackPtr->
+        chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+        (M4OSA_UInt32)(mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+        * scale_video);
+
+    mMp4FileDataPtr->videoTrackPtr->currentChunk += 1;
+    mMp4FileDataPtr->filesize += 16;
+
+    /*alloc new chunk*/
+    /*only if not already allocated*/
+    if (mMp4FileDataPtr->videoTrackPtr->currentChunk
+        > mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk)
+    {
+        /*update LastAllocatedChunk ( -> = currentChunk)*/
+        mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk += 1;
+
+        /*max nb of chunk is now dynamic*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        if ( mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk
+            + 3 > M4MP4W_CHUNK_ALLOC_NB)
+        {
+            M4OSA_TRACE1_0("M4MP4W_newVideoChunk : video chunk table is full");
+            return M4WAR_MP4W_OVERSIZE;
+        }
+
+#else
+
+        if (((mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk)
+            % M4MP4W_CHUNK_ALLOC_NB) == 0)
+        {
+            reallocNb = mMp4FileDataPtr->videoTrackPtr->LastAllocatedChunk
+                + M4MP4W_CHUNK_ALLOC_NB;
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+            mMp4FileDataPtr->videoTrackPtr->Chunk =
+                (M4OSA_UChar ** )M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->Chunk,
+                ( reallocNb
+                - M4MP4W_CHUNK_ALLOC_NB) * sizeof(M4OSA_UChar *),
+                reallocNb * sizeof(M4OSA_UChar *));
+            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->Chunk != M4OSA_NULL,
+                M4ERR_ALLOC);
+
+#else
+
+            mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable =
+                (M4OSA_UInt32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+                chunkOffsetTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
+                * sizeof(M4OSA_UInt32),
+                reallocNb * sizeof(M4OSA_UInt32));
+            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+            mMp4FileDataPtr->videoTrackPtr->chunkSizeTable =
+                (M4OSA_UInt32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+                chunkSizeTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
+                * sizeof(M4OSA_UInt32),
+                reallocNb * sizeof(M4OSA_UInt32));
+            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSizeTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+
+            mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable =
+                (M4OSA_UInt32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+                chunkSampleNbTable, ( reallocNb - M4MP4W_CHUNK_ALLOC_NB)
+                * sizeof(M4OSA_UInt32),
+                reallocNb * sizeof(M4OSA_UInt32));
+            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+
+            mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable =
+                (M4MP4W_Time32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+                chunkTimeMsTable, ( reallocNb
+                - M4MP4W_CHUNK_ALLOC_NB) * sizeof(M4MP4W_Time32),
+                reallocNb * sizeof(M4MP4W_Time32));
+            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable
+                != M4OSA_NULL, M4ERR_ALLOC);
+        }
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+        mMp4FileDataPtr->videoTrackPtr->
+            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk] = (M4OSA_UChar
+            *)M4OSA_malloc(mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
+            M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->MaxChunkSize");
+        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->
+            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
+        != M4OSA_NULL, M4ERR_ALLOC);
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    }
+
+    /*update leftSpaceInChunk, currentPos and currentChunkDur*/
+    *leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+    mMp4FileDataPtr->videoTrackPtr->currentPos = 0;
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+    /* check wether to use a new stsc or not */
+
+    if (mMp4FileDataPtr->videoTrackPtr->currentStsc > 0)
+    {
+        if ((mMp4FileDataPtr->videoTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
+            currentStsc] & 0xFFF) != (mMp4FileDataPtr->videoTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc
+            - 1] & 0xFFF))
+            mMp4FileDataPtr->videoTrackPtr->currentStsc += 1;
+    }
+    else
+        mMp4FileDataPtr->videoTrackPtr->currentStsc += 1;
+
+    /* max nb of chunk is now dynamic */
+    if (mMp4FileDataPtr->videoTrackPtr->currentStsc
+        + 3 > M4MP4W_CHUNK_ALLOC_NB)
+    {
+        M4OSA_TRACE1_0("M4MP4W_newVideoChunk : video stsc table is full");
+        return M4WAR_MP4W_OVERSIZE;
+    }
+
+    /* set nb of samples in the new chunk to 0 */
+    mMp4FileDataPtr->videoTrackPtr->
+        chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc] =
+        0 + (mMp4FileDataPtr->videoTrackPtr->currentChunk << 12);
+
+#else
+    /*set nb of samples in the new chunk to 0*/
+
+    mMp4FileDataPtr->videoTrackPtr->
+        chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] = 0;
+
+#endif
+
+    /*set time of the new chunk to lastCTS (for initialization, but updated further to the
+    CTS of the last sample in the chunk)*/
+
+    mMp4FileDataPtr->videoTrackPtr->
+        chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+        (M4OSA_UInt32)(mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+        * scale_video);
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_startAU( M4OSA_Context context, M4SYS_StreamID streamID,
+                         M4SYS_AccessUnit *auPtr )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+
+    M4OSA_UInt32 leftSpaceInChunk;
+    M4MP4W_Time32 chunkDurMs;
+
+    M4OSA_Double scale_audio;
+    M4OSA_Double scale_video;
+
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+    ERR_CHECK(auPtr != M4OSA_NULL, M4ERR_PARAMETER);
+
+    M4OSA_TRACE2_0("----- M4MP4W_startAU -----");
+
+    /*check macro state*/
+    ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_writing), M4ERR_STATE);
+
+    if (streamID == AudioStreamID) /*audio stream*/
+    {
+        M4OSA_TRACE2_0("M4MP4W_startAU -> audio");
+
+        scale_audio =
+            1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+
+        /*audio microstate*/
+        ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState
+            == M4MP4W_writing), M4ERR_STATE);
+        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing_startAU;
+
+        leftSpaceInChunk = mMp4FileDataPtr->audioTrackPtr->MaxChunkSize
+            - mMp4FileDataPtr->audioTrackPtr->currentPos;
+
+        M4OSA_TRACE2_2("audio %d  %d",
+            mMp4FileDataPtr->audioTrackPtr->currentPos, leftSpaceInChunk);
+
+        chunkDurMs =
+            (M4OSA_UInt32)(( mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
+            * scale_audio) - mMp4FileDataPtr->audioTrackPtr->
+            chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->
+            currentChunk]);
+
+        if ((leftSpaceInChunk < mMp4FileDataPtr->audioTrackPtr->MaxAUSize)
+            || (( mMp4FileDataPtr->InterleaveDur != 0)
+            && (chunkDurMs >= mMp4FileDataPtr->InterleaveDur)))
+        {
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+            /* only if there is at least 1 video sample in the chunk */
+
+            if ((mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL)
+                && (mMp4FileDataPtr->videoTrackPtr->currentPos > 0))
+            {
+                /* close the opened video chunk before creating a new audio one */
+                err = M4MP4W_newVideoChunk(context, &leftSpaceInChunk);
+
+                if (err != M4NO_ERROR)
+                    return err;
+            }
+
+#endif
+            /* not enough space in current chunk: create a new one */
+
+            err = M4MP4W_newAudioChunk(context, &leftSpaceInChunk);
+
+            if (err != M4NO_ERROR)
+                return err;
+        }
+
+        auPtr->size = leftSpaceInChunk;
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+        auPtr->dataAddress = (M4OSA_MemAddr32)(mMp4FileDataPtr->audioTrackPtr->
+            Chunk[mMp4FileDataPtr->audioTrackPtr->currentChunk]
+        + mMp4FileDataPtr->audioTrackPtr->currentPos);
+
+#else
+
+        auPtr->dataAddress =
+            (M4OSA_MemAddr32)(mMp4FileDataPtr->audioTrackPtr->Chunk[0]
+        + mMp4FileDataPtr->audioTrackPtr->currentPos);
+
+#endif                                   /*_M4MP4W_MOOV_FIRST*/
+
+    }
+    else if (streamID == VideoStreamID) /*video stream*/
+    {
+        M4OSA_TRACE2_0("M4MP4W_startAU -> video");
+
+        scale_video =
+            1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+
+        /*video microstate*/
+        ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState
+            == M4MP4W_writing), M4ERR_STATE);
+        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing_startAU;
+
+        leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize
+            - mMp4FileDataPtr->videoTrackPtr->currentPos;
+
+        chunkDurMs =
+            (M4OSA_UInt32)(( mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+            * scale_video) - mMp4FileDataPtr->videoTrackPtr->
+            chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->
+            currentChunk]);
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+        leftSpaceInChunk = mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+
+#endif
+
+        M4OSA_TRACE2_2("video %d  %d",
+            mMp4FileDataPtr->videoTrackPtr->currentPos, leftSpaceInChunk);
+
+        if (( leftSpaceInChunk < mMp4FileDataPtr->videoTrackPtr->MaxAUSize)
+            || (( mMp4FileDataPtr->InterleaveDur != 0)
+            && (chunkDurMs >= mMp4FileDataPtr->InterleaveDur))
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            || (( mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk != 0)
+            && (( mMp4FileDataPtr->videoTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
+            currentStsc] & 0xFFF)
+            == mMp4FileDataPtr->videoTrackPtr->MaxAUperChunk))
+
+#endif
+
+            )
+        {
+            /*not enough space in current chunk: create a new one*/
+            err = M4MP4W_newVideoChunk(context, &leftSpaceInChunk);
+
+            if (err != M4NO_ERROR)
+                return err;
+        }
+
+        M4OSA_TRACE2_3("startAU: size 0x%x pos 0x%x chunk %u", auPtr->size,
+            mMp4FileDataPtr->videoTrackPtr->currentPos,
+            mMp4FileDataPtr->videoTrackPtr->currentChunk);
+
+        M4OSA_TRACE3_1("adr = 0x%p", auPtr->dataAddress);
+
+        if (auPtr->dataAddress)
+        {
+            M4OSA_TRACE3_3(" data = %08X %08X %08X", auPtr->dataAddress[0],
+                auPtr->dataAddress[1], auPtr->dataAddress[2]);
+        }
+
+        auPtr->size = leftSpaceInChunk;
+#ifdef _M4MP4W_MOOV_FIRST
+
+        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+            == M4SYS_kH264)
+            auPtr->dataAddress =
+            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->
+            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
+        + mMp4FileDataPtr->videoTrackPtr->currentPos + 4);
+        else
+            auPtr->dataAddress =
+            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->
+            Chunk[mMp4FileDataPtr->videoTrackPtr->currentChunk]
+        + mMp4FileDataPtr->videoTrackPtr->currentPos);
+
+#else
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+            == M4SYS_kH264)
+            auPtr->dataAddress =
+            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0] + 4);
+        else
+            auPtr->dataAddress =
+            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]);
+
+#else
+
+        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+            == M4SYS_kH264)
+            auPtr->dataAddress =
+            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]
+        + mMp4FileDataPtr->videoTrackPtr->currentPos
+            + 4); /* In H264, we must start by the length of the NALU, coded in 4 bytes */
+        else
+            auPtr->dataAddress =
+            (M4OSA_MemAddr32)(mMp4FileDataPtr->videoTrackPtr->Chunk[0]
+        + mMp4FileDataPtr->videoTrackPtr->currentPos);
+
+#endif /*_M4MP4W_UNBUFFERED_VIDEO*/
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    }
+    else
+        return M4ERR_BAD_STREAM_ID;
+
+    M4OSA_TRACE1_3("M4MPW_startAU: start address:%p, size:%lu, stream:%d",
+        auPtr->dataAddress, auPtr->size, streamID);
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_processAU( M4OSA_Context context, M4SYS_StreamID streamID,
+                           M4SYS_AccessUnit *auPtr )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4MP4W_Time32 delta;
+    M4MP4W_Time32 lastSampleDur;
+    M4OSA_UInt32 i;
+    /*expectedSize is the max filesize to forecast when adding a new AU:*/
+    M4OSA_UInt32 expectedSize =
+        32; /*initialized with an estimation of the max metadata space needed for an AU.*/
+    M4OSA_Double scale_audio = 0.0;
+    M4OSA_Double scale_video = 0.0;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    /*check macro state*/
+    ERR_CHECK((mMp4FileDataPtr->state == M4MP4W_writing), M4ERR_STATE);
+
+    M4OSA_TRACE2_0("M4MP4W_processAU");
+
+    if (streamID == AudioStreamID)
+        scale_audio =
+        1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+
+    if (streamID == VideoStreamID)
+        scale_video =
+        1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+
+    /* PL 27/10/2008: after the resurgence of the AAC 128 bug, I added a debug check that
+     the encoded data didn't overflow the available space in the AU */
+
+    switch( streamID )
+    {
+        case AudioStreamID:
+            M4OSA_DEBUG_IF1(auPtr->size
+                + mMp4FileDataPtr->audioTrackPtr->currentPos
+            > mMp4FileDataPtr->audioTrackPtr->MaxChunkSize,
+            M4ERR_CONTEXT_FAILED,
+            "Uh oh. Buffer overflow in the writer. Abandon ship!");
+            M4OSA_DEBUG_IF2(auPtr->size
+                > mMp4FileDataPtr->audioTrackPtr->MaxAUSize,
+                M4ERR_CONTEXT_FAILED,
+                "Oops. An AU went over the declared Max AU size.\
+                 You might wish to investigate that.");
+            break;
+
+        case VideoStreamID:
+            M4OSA_DEBUG_IF1(auPtr->size
+                + mMp4FileDataPtr->videoTrackPtr->currentPos
+                    > mMp4FileDataPtr->videoTrackPtr->MaxChunkSize,
+                    M4ERR_CONTEXT_FAILED,
+                    "Uh oh. Buffer overflow in the writer. Abandon ship!");
+            M4OSA_DEBUG_IF2(auPtr->size
+                    > mMp4FileDataPtr->videoTrackPtr->MaxAUSize,
+                    M4ERR_CONTEXT_FAILED,
+                    "Oops. An AU went over the declared Max AU size.\
+                     You might wish to investigate that.");
+            break;
+    }
+
+    /*only if not in the case audio with estimateAudioSize
+    (else, size already estimated at this point)*/
+    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+        || (streamID == VideoStreamID))
+    {
+        /*check filesize if needed*/
+        if (mMp4FileDataPtr->MaxFileSize != 0)
+        {
+            expectedSize += mMp4FileDataPtr->filesize + auPtr->size;
+
+            if ((streamID == VideoStreamID)
+                && (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+                == M4SYS_kH264))
+            {
+                expectedSize += 4;
+            }
+
+            if (expectedSize > mMp4FileDataPtr->MaxFileSize)
+            {
+                M4OSA_TRACE1_0("processAU : !! FILESIZE EXCEEDED !!");
+
+                /* patch for autostop is MaxFileSize exceeded */
+                M4OSA_TRACE1_0("M4MP4W_processAU : stop at targeted filesize");
+                return M4WAR_MP4W_OVERSIZE;
+            }
+        }
+    }
+
+    /*case audioMsStopTime has already been set during video processing,
+     and now check it for audio*/
+    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+        && (streamID == AudioStreamID))
+    {
+        if (mMp4FileDataPtr->audioMsStopTime <= (auPtr->CTS *scale_audio))
+        {
+            /* bugfix: if a new chunk was just created, cancel it before to close */
+            if ((mMp4FileDataPtr->audioTrackPtr->currentChunk != 0)
+                && (mMp4FileDataPtr->audioTrackPtr->currentPos == 0))
+            {
+                mMp4FileDataPtr->audioTrackPtr->currentChunk--;
+            }
+            M4OSA_TRACE1_0("M4MP4W_processAU : audio stop time reached");
+            return M4WAR_MP4W_OVERSIZE;
+        }
+    }
+
+    if (streamID == AudioStreamID) /*audio stream*/
+    {
+        M4OSA_TRACE2_0("M4MP4W_processAU -> audio");
+
+        /*audio microstate*/
+        ERR_CHECK((mMp4FileDataPtr->audioTrackPtr->microState
+            == M4MP4W_writing_startAU), M4ERR_STATE);
+        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_writing;
+
+        mMp4FileDataPtr->audioTrackPtr->currentPos += auPtr->size;
+        /* Warning: time conversion cast 64to32! */
+        delta = (M4MP4W_Time32)auPtr->CTS
+            - mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS;
+
+        /* DEBUG stts entries which are equal to 0 */
+        M4OSA_TRACE2_1("A_DELTA = %ld\n", delta);
+
+        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+            == 0) /*test if first AU*/
+        {
+            /*set au size*/
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize = auPtr->size;
+
+            /*sample duration is a priori constant in audio case, */
+            /*but if an Au at least has different size, a stsz table will be created */
+
+            /*mMp4FileDataPtr->audioTrackPtr->sampleDuration = delta; */
+            /*TODO test sample duration? (should be 20ms in AMR8, 160 tics with timescale 8000) */
+        }
+        else
+        {
+            /*check if au size is constant (audio) */
+            /*0 sample size means non constant size*/
+            if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize != 0)
+            {
+                if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize
+                    != auPtr->size)
+                {
+                    /*first AU with different size => non constant size => STSZ table needed*/
+                    /*computation of the nb of block of size M4MP4W_STSZ_ALLOC_SIZE to allocate*/
+                    mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks =
+                        1 + mMp4FileDataPtr->audioTrackPtr->
+                        CommonData.sampleNb
+                        * 4 / M4MP4W_STSZ_AUDIO_ALLOC_SIZE;
+                    mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ =
+                        (M4OSA_UInt32 *)M4OSA_malloc(
+                        mMp4FileDataPtr->audioTrackPtr->
+                        nbOfAllocatedStszBlocks
+                        * M4MP4W_STSZ_AUDIO_ALLOC_SIZE,
+                        M4MP4_WRITER, (M4OSA_Char *)"audioTrackPtr->TABLE_STSZ");
+                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ
+                        != M4OSA_NULL, M4ERR_ALLOC);
+
+                    for ( i = 0;
+                        i < mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+                        i++ )
+                    {
+                        mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ[i] =
+                            mMp4FileDataPtr->audioTrackPtr->
+                            CommonData.sampleSize;
+                    }
+                    mMp4FileDataPtr->audioTrackPtr->
+                        TABLE_STSZ[mMp4FileDataPtr->audioTrackPtr->
+                        CommonData.sampleNb] = auPtr->size;
+                    mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize =
+                        0; /*used as a flag in that case*/
+                    /*more bytes in the file in that case:*/
+                    if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+                        mMp4FileDataPtr->filesize +=
+                        4 * mMp4FileDataPtr->audioTrackPtr->
+                        CommonData.sampleNb;
+                }
+            }
+            /*else table already exists*/
+            else
+            {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+                if (4 *(mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb + 3)
+                    >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks
+                    *M4MP4W_STSZ_AUDIO_ALLOC_SIZE)
+                {
+                    M4OSA_TRACE1_0(
+                        "M4MP4W_processAU : audio stsz table is full");
+                    return M4WAR_MP4W_OVERSIZE;
+                }
+
+#else
+
+                if (4 *mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+                    >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks
+                    *M4MP4W_STSZ_AUDIO_ALLOC_SIZE)
+                {
+                    mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedStszBlocks +=
+                        1;
+                    mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ =
+                        (M4OSA_UInt32 *)M4MP4W_realloc(
+                        (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+                        TABLE_STSZ, ( mMp4FileDataPtr->audioTrackPtr->
+                        nbOfAllocatedStszBlocks - 1)
+                        * M4MP4W_STSZ_AUDIO_ALLOC_SIZE,
+                        mMp4FileDataPtr->audioTrackPtr->
+                        nbOfAllocatedStszBlocks
+                        * M4MP4W_STSZ_AUDIO_ALLOC_SIZE);
+                    ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ
+                        != M4OSA_NULL, M4ERR_ALLOC);
+                }
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+                mMp4FileDataPtr->audioTrackPtr->
+                    TABLE_STSZ[mMp4FileDataPtr->audioTrackPtr->
+                    CommonData.sampleNb] = auPtr->size;
+
+                if (mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+                    mMp4FileDataPtr->filesize += 4;
+            }
+        }
+
+        if (delta > mMp4FileDataPtr->audioTrackPtr->sampleDuration)
+        {
+            /* keep track of real sample duration*/
+            mMp4FileDataPtr->audioTrackPtr->sampleDuration = delta;
+        }
+
+        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+            == 0) /*test if first AU*/
+        {
+            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[0] = 1;
+            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[1] = 0;
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb = 1;
+            mMp4FileDataPtr->filesize += 8;
+        }
+        else if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+            == 1) /*test if second AU*/
+        {
+#ifndef DUPLICATE_STTS_IN_LAST_AU
+
+            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[0] += 1;
+
+#endif /*DUPLICATE_STTS_IN_LAST_AU*/
+
+            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[1] = delta;
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb += 1;
+            mMp4FileDataPtr->filesize += 8;
+        }
+        else
+        {
+            /*retrieve last sample delta*/
+            lastSampleDur = mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
+                * (mMp4FileDataPtr->audioTrackPtr->
+                CommonData.sttsTableEntryNb - 1) - 1];
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            if (8 *(mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+                + 3) >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks
+                *M4MP4W_STTS_AUDIO_ALLOC_SIZE)
+            {
+                M4OSA_TRACE1_0("M4MP4W_processAU : audio stts table is full");
+                return M4WAR_MP4W_OVERSIZE;
+            }
+
+#else
+
+            if (8 *mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+                >= mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks
+                *M4MP4W_STTS_AUDIO_ALLOC_SIZE)
+            {
+                mMp4FileDataPtr->audioTrackPtr->nbOfAllocatedSttsBlocks += 1;
+                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS =
+                    (M4OSA_UInt32 *)M4MP4W_realloc(
+                    (M4OSA_MemAddr32)mMp4FileDataPtr->audioTrackPtr->
+                    TABLE_STTS, ( mMp4FileDataPtr->audioTrackPtr->
+                    nbOfAllocatedSttsBlocks
+                    - 1) * M4MP4W_STTS_AUDIO_ALLOC_SIZE,
+                    mMp4FileDataPtr->audioTrackPtr->
+                    nbOfAllocatedSttsBlocks
+                    * M4MP4W_STTS_AUDIO_ALLOC_SIZE);
+                ERR_CHECK(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS
+                    != M4OSA_NULL, M4ERR_ALLOC);
+            }
+
+#endif                                   /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+            if (delta != lastSampleDur) /*new entry in the table*/
+            {
+                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
+                    mMp4FileDataPtr->audioTrackPtr->
+                    CommonData.sttsTableEntryNb - 1)] = 1;
+                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
+                    mMp4FileDataPtr->audioTrackPtr->
+                    CommonData.sttsTableEntryNb - 1) + 1] = delta;
+                mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb +=
+                    1;
+                mMp4FileDataPtr->filesize += 8;
+            }
+            else
+            {
+                /*increase of 1 the number of consecutive AUs with same duration*/
+                mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2 *(
+                    mMp4FileDataPtr->audioTrackPtr->
+                    CommonData.sttsTableEntryNb - 1) - 2] += 1;
+            }
+        }
+        mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb += 1;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        mMp4FileDataPtr->audioTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentStsc] +=
+            1;
+
+#else
+
+        mMp4FileDataPtr->audioTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] +=
+            1;
+
+#endif
+        /* Warning: time conversion cast 64to32! */
+
+        mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS =
+            (M4MP4W_Time32)auPtr->CTS;
+    }
+    else if (streamID == VideoStreamID) /*video stream*/
+    {
+        M4OSA_TRACE2_0("M4MP4W_processAU -> video");
+
+        /* In h264, the size of the AU must be added to the data */
+        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+            == M4SYS_kH264)
+        {
+            /* Add the size of the NALU in BE */
+            M4OSA_MemAddr8 pTmpDataAddress = M4OSA_NULL;
+            auPtr->dataAddress -= 1;
+            pTmpDataAddress = (M4OSA_MemAddr8)auPtr->dataAddress;
+
+            // bit manipulation
+            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 24) & 0x000000FF);
+            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 16) & 0x000000FF);
+            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size >> 8)  & 0x000000FF);
+            *pTmpDataAddress++ = (M4OSA_UInt8)((auPtr->size)       & 0x000000FF);
+
+            auPtr->size += 4;
+        }
+
+        /*video microstate*/
+        ERR_CHECK((mMp4FileDataPtr->videoTrackPtr->microState
+            == M4MP4W_writing_startAU), M4ERR_STATE);
+        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_writing;
+
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+        /* samples are written to file now */
+
+        err = M4MP4W_putBlock((M4OSA_UChar *)auPtr->dataAddress, auPtr->size,
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext);
+
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_FilePosition temp = mMp4FileDataPtr->absoluteCurrentPos
+                + mMp4FileDataPtr->videoTrackPtr->currentPos;
+            M4OSA_TRACE2_1(
+                "M4MP4W_processAU: putBlock error when writing unbuffered video sample: %#X",
+                err);
+            /* Ouch, we got an error writing to the file, but we need to properly react so that
+             the state is still consistent and we can properly close the file so that what has
+              been recorded so far is not lost. Yay error recovery! */
+
+            /* First, we do not know where we are in the file. Put us back at where we were before
+            attempting to write the data. That way, we're consistent with the chunk and sample
+             state data.absoluteCurrentPos is only updated for chunks, it points to the beginning
+             of the chunk,therefore we need to add videoTrackPtr->currentPos to know where we
+             were in the file. */
+            err = mMp4FileDataPtr->fileWriterFunctions->seek(
+                mMp4FileDataPtr->fileWriterContext,
+                M4OSA_kFileSeekBeginning, &temp);
+
+            M4OSA_TRACE2_3(
+                "Backtracking to position 0x%08X, seek returned %d and position %08X",
+                mMp4FileDataPtr->absoluteCurrentPos
+                + mMp4FileDataPtr->videoTrackPtr->currentPos, err, temp);
+
+            /* Then, do not update any info whatsoever in the writing state. This will have the
+             consequence that it will be as if the sample has never been written, so the chunk
+             will be merely closed after the previous sample (the sample we attempted to write
+             here is lost). */
+
+            /* And lastly (for here), return that we've reached the limit of available space.
+             We don't care about the error originally returned by putBlock. */
+
+            return M4WAR_MP4W_OVERSIZE;
+        }
+
+#endif
+
+        mMp4FileDataPtr->videoTrackPtr->currentPos += auPtr->size;
+
+        /* Warning: time conversion cast 64to32! */
+        delta = (M4MP4W_Time32)auPtr->CTS
+            - mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS;
+
+        /* DEBUG stts entries which are equal to 0 */
+        M4OSA_TRACE2_1("V_DELTA = %ld\n", delta);
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        if (2 *(mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb + 3)
+            >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
+            *M4MP4W_STSZ_ALLOC_SIZE)
+        {
+            M4OSA_TRACE1_0("M4MP4W_processAU : video stsz table is full");
+            return M4WAR_MP4W_OVERSIZE;
+        }
+
+        mMp4FileDataPtr->videoTrackPtr->
+            TABLE_STSZ[mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb] =
+            (M4OSA_UInt16)auPtr->size;
+        mMp4FileDataPtr->filesize += 4;
+
+#else
+
+        if (4 *mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+            >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
+            *M4MP4W_STSZ_ALLOC_SIZE)
+        {
+            mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks += 1;
+
+            mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ =
+                (M4OSA_UInt32 *)M4MP4W_realloc(
+                (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
+                ( mMp4FileDataPtr->videoTrackPtr->
+                nbOfAllocatedStszBlocks
+                - 1) * M4MP4W_STSZ_ALLOC_SIZE,
+                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStszBlocks
+                * M4MP4W_STSZ_ALLOC_SIZE);
+
+            ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ != M4OSA_NULL,
+                M4ERR_ALLOC);
+        }
+
+        mMp4FileDataPtr->videoTrackPtr->
+            TABLE_STSZ[mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb] =
+            auPtr->size;
+        mMp4FileDataPtr->filesize += 4;
+
+#endif
+
+        if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+            == 0) /*test if first AU*/
+        {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0], 1);
+            M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0], 0);
+
+#else
+
+            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0] = 1;
+            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = 0;
+
+#endif
+
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb = 1;
+            mMp4FileDataPtr->filesize += 8;
+        }
+        else if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+            == 1 ) /*test if second AU*/
+        {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[0],
+                (M4OSA_UInt16)delta);
+
+#else
+
+            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = delta;
+
+#endif
+
+        }
+        else
+        {
+            /*retrieve last sample delta*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            lastSampleDur = M4MP4W_get32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+                TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                CommonData.sttsTableEntryNb - 1]);
+
+            if (4 *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+                + 3) >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks
+                *M4MP4W_STTS_ALLOC_SIZE)
+            {
+                M4OSA_TRACE1_0("M4MP4W_processAU : video stts table is full");
+                return M4WAR_MP4W_OVERSIZE;
+            }
+
+#else
+
+            lastSampleDur = mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+                * (mMp4FileDataPtr->videoTrackPtr->
+                CommonData.sttsTableEntryNb - 1) + 1];
+
+            if (8 *mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+                >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks
+                *M4MP4W_STTS_ALLOC_SIZE)
+            {
+                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedSttsBlocks += 1;
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS =
+                    (M4OSA_UInt32 *)M4MP4W_realloc(
+                    (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+                    TABLE_STTS, ( mMp4FileDataPtr->videoTrackPtr->
+                    nbOfAllocatedSttsBlocks
+                    - 1) * M4MP4W_STTS_ALLOC_SIZE,
+                    mMp4FileDataPtr->videoTrackPtr->
+                    nbOfAllocatedSttsBlocks
+                    * M4MP4W_STTS_ALLOC_SIZE);
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS
+                    != M4OSA_NULL, M4ERR_ALLOC);
+            }
+
+#endif                                   /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+            if (delta != lastSampleDur) /*new entry in the table*/
+            {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+                M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->
+                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb], 1);
+                M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb], (M4OSA_UInt16)delta);
+
+#else
+
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
+                    mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb)] = 1;
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+                    *(mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb)+1] = delta;
+
+#endif
+
+                mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb +=
+                    1;
+                mMp4FileDataPtr->filesize += 8;
+            }
+            else
+            {
+                /*increase of 1 the number of consecutive AUs with same duration*/
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+                mMp4FileDataPtr->videoTrackPtr->
+                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb - 1] += 1;
+
+#else
+
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
+                    mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb - 1)] += 1;
+
+#endif
+
+            }
+        }
+
+        mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb += 1;
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        mMp4FileDataPtr->videoTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentStsc] +=
+            1;
+
+#else
+
+        mMp4FileDataPtr->videoTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] +=
+            1;
+
+#endif
+
+        if (auPtr->attribute == AU_RAP)
+        {
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            if (4 *(mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb + 3)
+                >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks
+                *M4MP4W_STSS_ALLOC_SIZE)
+            {
+                M4OSA_TRACE1_0("M4MP4W_processAU : video stss table is full");
+                return M4WAR_MP4W_OVERSIZE;
+            }
+
+#else
+
+            if (4 *mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb
+                >= mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks
+                *M4MP4W_STSS_ALLOC_SIZE)
+            {
+                mMp4FileDataPtr->videoTrackPtr->nbOfAllocatedStssBlocks += 1;
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STSS =
+                    (M4OSA_UInt32 *)M4MP4W_realloc(
+                    (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->
+                    TABLE_STSS, ( mMp4FileDataPtr->videoTrackPtr->
+                    nbOfAllocatedStssBlocks
+                    - 1) * M4MP4W_STSS_ALLOC_SIZE,
+                    mMp4FileDataPtr->videoTrackPtr->
+                    nbOfAllocatedStssBlocks
+                    * M4MP4W_STSS_ALLOC_SIZE);
+                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS
+                    != M4OSA_NULL, M4ERR_ALLOC);
+            }
+
+#endif /*_M4MP4W_OPTIMIZE_FOR_PHONE*/
+
+            mMp4FileDataPtr->videoTrackPtr->
+                TABLE_STSS[mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb] =
+                mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb;
+            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb += 1;
+            mMp4FileDataPtr->filesize += 4;
+        }
+
+        /* Warning: time conversion cast 64to32! */
+        mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS =
+            (M4MP4W_Time32)auPtr->CTS;
+    }
+    else
+        return M4ERR_BAD_STREAM_ID;
+
+    /* I moved some state modification to after we know the sample has been written correctly. */
+    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+        && (streamID == VideoStreamID))
+    {
+        mMp4FileDataPtr->audioMsStopTime =
+            (M4MP4W_Time32)(auPtr->CTS * scale_video);
+    }
+
+    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_FALSE)
+        || (streamID == VideoStreamID))
+    {
+        /*update fileSize*/
+        mMp4FileDataPtr->filesize += auPtr->size;
+    }
+
+    if ((mMp4FileDataPtr->estimateAudioSize == M4OSA_TRUE)
+        && (streamID == VideoStreamID))
+    {
+        /*update filesize with estimated audio data that will be added later.    */
+        /*Warning: Assumption is made that:                                     */
+        /* - audio samples have constant size (e.g. no sid).                    */
+        /* - max audio sample size has been set, and is the actual sample size. */
+
+        ERR_CHECK(mMp4FileDataPtr->audioMsChunkDur != 0,
+            M4WAR_MP4W_NOT_EVALUABLE);
+        mMp4FileDataPtr->filesize -=
+            (M4OSA_UInt32)(( mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS
+            * scale_video) * (0.05/*always 50 AMR samples per second*/
+            *(M4OSA_Double)mMp4FileDataPtr->audioTrackPtr->MaxAUSize
+            + 16/*additional data for a new chunk*/
+            / (M4OSA_Double)mMp4FileDataPtr->audioMsChunkDur));
+
+        mMp4FileDataPtr->filesize += (M4OSA_UInt32)(( auPtr->CTS * scale_video)
+            * (0.05/*always 50 AMR samples per second*/
+            *(M4OSA_Double)mMp4FileDataPtr->audioTrackPtr->MaxAUSize
+            + 16/*additional data for a new chunk*/
+            / (M4OSA_Double)mMp4FileDataPtr->audioMsChunkDur));
+    }
+
+    M4OSA_TRACE1_4("processAU : size 0x%x mode %d filesize %lu limit %lu",
+        auPtr->size, auPtr->attribute, mMp4FileDataPtr->filesize,
+        mMp4FileDataPtr->MaxFileSize);
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_closeWrite( M4OSA_Context context )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_ERR err2 = M4NO_ERROR, err3 = M4NO_ERROR;
+
+    /*Warning: test should be done here to ensure context->pContext is not M4OSA_NULL,
+     but C is not C++...*/
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+
+    M4OSA_UChar camcoder_maj, camcoder_min, camcoder_rev; /*camcoder version*/
+    M4OSA_Bool bAudio =
+        (( mMp4FileDataPtr->hasAudio)
+        && (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb
+        != 0)); /*((mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL) &&
+                    (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb != 0));*/
+    M4OSA_Bool bVideo =
+        (( mMp4FileDataPtr->hasVideo)
+        && (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb
+        != 0)); /*((mMp4FileDataPtr->videoTrackPtr != M4OSA_NULL) &&
+                    (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb != 0));*/
+    M4OSA_Bool bH263 = M4OSA_FALSE;
+    M4OSA_Bool bH264 = M4OSA_FALSE;
+    M4OSA_Bool bMP4V = M4OSA_FALSE;
+    M4OSA_Bool bAAC = M4OSA_FALSE;
+    M4OSA_Bool bEVRC = M4OSA_FALSE;
+
+    /*intermediate variables*/
+    M4OSA_UInt32 A, B, N, AB4N;
+
+    /*Trak variables*/
+    M4OSA_UInt32 a_trakId = AudioStreamID; /*     (audio=1)*/
+    /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
+    M4OSA_UInt32 a_trakOffset = 32;
+    M4OSA_UInt32 a_sttsSize = 24;          /* A (audio=24)*/
+    M4OSA_UInt32 a_stszSize = 20;          /* B (audio=20)*/
+    M4OSA_UInt32 a_trakSize = 402;         /*     (audio=402)*/
+    M4OSA_UInt32 a_mdiaSize = 302;         /*     (audio=302)*/
+    M4OSA_UInt32 a_minfSize = 229;         /*     (audio=229)*/
+    M4OSA_UInt32 a_stblSize = 169;         /*     (audio=169)*/
+    M4OSA_UInt32 a_stsdSize = 69;          /*     (audio=69 )*/
+    M4OSA_UInt32 a_esdSize = 53;           /*     (audio=53 )*/
+    M4OSA_UInt32 a_dataSize = 0;           /* temp: At the end, = currentPos*/
+    M4MP4W_Time32 a_trakDuration = 0;      /* equals lastCTS*/
+    M4MP4W_Time32 a_msTrakDuration = 0;
+    M4OSA_UInt32 a_stscSize = 28;          /* 16+12*nbchunksaudio*/
+    M4OSA_UInt32 a_stcoSize = 20;          /* 16+4*nbchunksaudio*/
+
+    M4OSA_UInt32 v_trakId = VideoStreamID; /* (video=2)*/
+    /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
+    M4OSA_UInt32 v_trakOffset = 32;
+    M4OSA_UInt32 v_sttsSize = 0;      /* A (video=16+8J)*/
+    M4OSA_UInt32 v_stszSize = 0;      /* B (video=20+4K)*/
+    M4OSA_UInt32 v_trakSize = 0; /* (h263=A+B+4N+426), (mp4v=A+B+dsi+4N+448) */
+    M4OSA_UInt32 v_mdiaSize = 0; /* (h263=A+B+4N+326), (mp4v=A+B+dsi+4N+348) */
+    M4OSA_UInt32 v_minfSize = 0; /* (h263=A+B+4N+253), (mp4v=A+B+dsi+4N+275) */
+    M4OSA_UInt32 v_stblSize = 0; /* (h263=A+B+4N+189), (mp4v=A+B+dsi+4N+211) */
+    M4OSA_UInt32 v_stsdSize = 0;      /* (h263=117)        , (mp4v=139+dsi    )*/
+    M4OSA_UInt32 v_esdSize = 0;       /* (h263=101)        , (mp4v=153+dsi    )*/
+    M4OSA_UInt32 v_dataSize = 0;      /* temp: At the end, = currentPos*/
+    M4MP4W_Time32 v_trakDuration = 0; /* equals lastCTS*/
+    M4MP4W_Time32 v_msTrakDuration = 0;
+    M4OSA_UInt32 v_stscSize = 28;     /* 16+12*nbchunksvideo*/
+    M4OSA_UInt32 v_stcoSize = 20;     /* 16+4*nbchunksvideo*/
+
+    /*video variables*/
+    M4OSA_UInt32 v_stssSize = 0; /* 4*N+16     STSS*/
+
+    /*aac & mp4v temp variable*/
+    M4OSA_UInt8 dsi = 0;
+
+    /*H264 variables*/
+    M4OSA_UInt32 v_avcCSize = 0; /* dsi+15*/
+
+    /*MP4V variables*/
+    M4OSA_UInt32 v_esdsSize = 0;        /* dsi+37*/
+    M4OSA_UInt8 v_ESDescriptorSize =
+        0; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+    M4OSA_UInt8 v_DCDescriptorSize = 0; /* dsi+15*/
+
+    /*AAC variables*/
+    M4OSA_UInt32 a_esdsSize = 0;        /* dsi+37*/
+    M4OSA_UInt8 a_ESDescriptorSize =
+        0; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+    M4OSA_UInt8 a_DCDescriptorSize = 0; /* dsi+15*/
+
+    /*General variables*/
+
+    /* audio chunk size + video chunk size*/
+    M4OSA_UInt32 mdatSize = 8;
+    M4OSA_UInt32 moovSize = 116; /* 116 + 402(audio) +    (A+B+4N+426)(h263) or */
+    /*                        (A+B+dsi+4N+448)(mp4v)    */
+    M4OSA_UInt32 creationTime; /* C */
+
+    /*flag to set up the chunk interleave strategy*/
+    M4OSA_Bool bInterleaveAV =
+        (bAudio && bVideo && (mMp4FileDataPtr->InterleaveDur != 0));
+
+    M4OSA_Context fileWriterContext = mMp4FileDataPtr->fileWriterContext;
+
+    M4OSA_UInt32 i;
+
+    M4OSA_Double scale_audio = 0.0;
+    M4OSA_Double scale_video = 0.0;
+    M4MP4W_Time32 delta;
+
+#ifndef _M4MP4W_MOOV_FIRST
+
+    M4OSA_UInt32 filePos;
+    M4OSA_FilePosition moovPos, mdatPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    /*macro state */
+    mMp4FileDataPtr->state = M4MP4W_closed;
+
+    /*if no data !*/
+    if ((!bAudio) && (!bVideo))
+    {
+        err = M4NO_ERROR; /*would be better to return a warning ?*/
+        goto cleanup;
+    }
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+    /* Remove safety file to make room for what needs to be written out here
+    (chunk flushing and moov). */
+
+    if (M4OSA_TRUE == mMp4FileDataPtr->cleanSafetyFile)
+    {
+        M4OSA_Context tempContext;
+        err = mMp4FileDataPtr->fileWriterFunctions->openWrite(&tempContext,
+            mMp4FileDataPtr->safetyFileUrl,
+            M4OSA_kFileWrite | M4OSA_kFileCreate);
+
+        if (M4NO_ERROR != err)
+            goto cleanup;
+        err = mMp4FileDataPtr->fileWriterFunctions->closeWrite(tempContext);
+
+        if (M4NO_ERROR != err)
+            goto cleanup;
+        mMp4FileDataPtr->safetyFileUrl = M4OSA_NULL;
+        mMp4FileDataPtr->cleanSafetyFile = M4OSA_FALSE;
+    }
+
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+    if (bVideo)
+    {
+        if ((M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable)
+            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->chunkSizeTable)
+            || (M4OSA_NULL
+            == mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable)
+            || (M4OSA_NULL
+            == mMp4FileDataPtr->videoTrackPtr->chunkTimeMsTable)
+            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ)
+            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STTS)
+            || (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->TABLE_STSS))
+        {
+            mMp4FileDataPtr->fileWriterFunctions->closeWrite(
+                fileWriterContext); /**< close the stream anyway */
+            M4MP4W_freeContext(context); /**< Free the context content */
+            return M4ERR_ALLOC;
+        }
+
+        /*video microstate*/
+        mMp4FileDataPtr->videoTrackPtr->microState = M4MP4W_closed;
+
+        /*current chunk is the last one and gives the total number of video chunks (-1)*/
+        for ( i = 0; i < mMp4FileDataPtr->videoTrackPtr->currentChunk; i++ )
+        {
+            v_dataSize += mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
+        }
+
+#ifndef _M4MP4W_MOOV_FIRST
+#ifndef _M4MP4W_UNBUFFERED_VIDEO
+        /*flush chunk*/
+
+        if (mMp4FileDataPtr->videoTrackPtr->currentPos > 0)
+        {
+            err = M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[0],
+                mMp4FileDataPtr->videoTrackPtr->currentPos,
+                mMp4FileDataPtr->fileWriterFunctions,
+                mMp4FileDataPtr->fileWriterContext);
+
+            if (M4NO_ERROR != err)
+                goto cleanup;
+        }
+
+#endif
+
+        M4OSA_TRACE1_0("flush video | CLOSE");
+        M4OSA_TRACE1_3("current chunk = %d  offset = 0x%x size = 0x%08X",
+            mMp4FileDataPtr->videoTrackPtr->currentChunk,
+            mMp4FileDataPtr->absoluteCurrentPos,
+            mMp4FileDataPtr->videoTrackPtr->currentPos);
+
+        /*update chunk offset*/
+        mMp4FileDataPtr->videoTrackPtr->
+            chunkOffsetTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+            mMp4FileDataPtr->absoluteCurrentPos;
+
+        /*add chunk size to absoluteCurrentPos*/
+        mMp4FileDataPtr->absoluteCurrentPos +=
+            mMp4FileDataPtr->videoTrackPtr->currentPos;
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+        /*update last chunk size, and add this value to v_dataSize*/
+
+        mMp4FileDataPtr->videoTrackPtr->
+            chunkSizeTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+            mMp4FileDataPtr->videoTrackPtr->currentPos;
+        v_dataSize +=
+            mMp4FileDataPtr->videoTrackPtr->currentPos; /*add last chunk size*/
+
+        v_trakDuration = mMp4FileDataPtr->videoTrackPtr->
+            CommonData.lastCTS; /* equals lastCTS*/
+
+        /* bugfix: if a new chunk was just created, cancel it before to close */
+        if ((mMp4FileDataPtr->videoTrackPtr->currentChunk != 0)
+            && (mMp4FileDataPtr->videoTrackPtr->currentPos == 0))
+        {
+            mMp4FileDataPtr->videoTrackPtr->currentChunk--;
+        }
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+        if ((mMp4FileDataPtr->videoTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->videoTrackPtr->
+            currentStsc] & 0xFFF) == 0)
+        {
+            mMp4FileDataPtr->videoTrackPtr->currentStsc--;
+        }
+
+#endif /*_M4MP4W_UNBUFFERED_VIDEO*/
+
+        /* Last sample duration */
+        /* If we have the file duration we use it, else we duplicate the last AU */
+
+        if (mMp4FileDataPtr->MaxFileDuration > 0)
+        {
+            /* use max file duration to calculate delta of last AU */
+            delta = mMp4FileDataPtr->MaxFileDuration
+                - mMp4FileDataPtr->videoTrackPtr->CommonData.lastCTS;
+            v_trakDuration = mMp4FileDataPtr->MaxFileDuration;
+
+            if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb > 1)
+            {
+                /* if more than 1 frame, create a new stts entry (else already created) */
+                mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb++;
+            }
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+            M4MP4W_put32_Lo(&mMp4FileDataPtr->videoTrackPtr->
+                TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                CommonData.sttsTableEntryNb - 1], 1);
+            M4MP4W_put32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+                TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                CommonData.sttsTableEntryNb - 1], delta);
+
+#else
+
+            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+                *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+                - 1)] = 1;
+            mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+                *(mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb
+                - 1) + 1] = delta;
+
+#endif
+
+        }
+        else
+        {
+            /* duplicate the delta of the previous frame */
+            if (mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb > 1)
+            {
+                /* if more than 1 frame, duplicate the stts entry (else already exists) */
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+                v_trakDuration +=
+                    M4MP4W_get32_Hi(&mMp4FileDataPtr->videoTrackPtr->
+                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb - 1]);
+                mMp4FileDataPtr->videoTrackPtr->
+                    TABLE_STTS[mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb - 1] += 1;
+
+#else
+
+                v_trakDuration += mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2
+                    * (mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb - 1) + 1];
+                mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[2 *(
+                    mMp4FileDataPtr->videoTrackPtr->
+                    CommonData.sttsTableEntryNb - 1)] += 1;
+
+#endif
+
+            }
+            else
+            {
+                M4OSA_TRACE1_0("M4MP4W_closeWrite : ! videoTrackPtr,\
+                     cannot know the duration of the unique AU !");
+                /* If there is an audio track, we use it as a file duration
+                (and so, as AU duration...) */
+                if (mMp4FileDataPtr->audioTrackPtr != M4OSA_NULL)
+                {
+                    M4OSA_TRACE1_0(
+                        "M4MP4W_closeWrite : ! Let's use the audio track duration !");
+                    mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] =
+                        (M4OSA_UInt32)(
+                        mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS
+                        * (1000.0 / mMp4FileDataPtr->audioTrackPtr->
+                        CommonData.timescale));
+                    v_trakDuration =
+                        mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1];
+                }
+                /* Else, we use a MAGICAL value (66 ms) */
+                else
+                {
+                    M4OSA_TRACE1_0(
+                        "M4MP4W_closeWrite : ! No audio track -> use magical value (66) !"); /*    */
+                    mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[1] = 66;
+                    v_trakDuration = 66;
+                }
+            }
+        }
+
+        /* Calculate table sizes */
+        A = v_sttsSize = 16 + 8 * mMp4FileDataPtr->videoTrackPtr->
+            CommonData.sttsTableEntryNb; /* A (video=16+8J)*/
+        B = v_stszSize = 20 + 4 * mMp4FileDataPtr->videoTrackPtr->
+            CommonData.sampleNb; /* B (video=20+4K)*/
+        N = mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb;
+        AB4N = A + B + 4 * N;
+
+        scale_video =
+            1000.0 / mMp4FileDataPtr->videoTrackPtr->CommonData.timescale;
+        v_msTrakDuration = (M4OSA_UInt32)(v_trakDuration * scale_video);
+
+        /*Convert integers in the table from LE into BE*/
+#ifndef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb);
+        M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STTS,
+            2 * (mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb));
+
+#endif
+
+        M4MP4W_table32ToBE(mMp4FileDataPtr->videoTrackPtr->TABLE_STSS,
+            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb);
+
+        if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+            == M4SYS_kH263)
+        {
+            bH263 = M4OSA_TRUE;
+            v_trakSize = AB4N + 426; /* (h263=A+B+4N+426)*/
+            v_mdiaSize = AB4N + 326; /* (h263=A+B+4N+326)*/
+            v_minfSize = AB4N + 253; /* (h263=A+B+4N+253)*/
+            v_stblSize = AB4N + 189; /* (h263=A+B+4N+189)*/
+            v_stsdSize = 117;        /* (h263=117)*/
+            v_esdSize = 101;         /* (h263=101)*/
+
+            moovSize += AB4N + 426;
+
+            if (((M4OSA_Int32)mMp4FileDataPtr->videoTrackPtr->avgBitrate) != -1)
+            {
+                /*the optional 'bitr' atom is appended to the dsi,so filesize is 16 bytes bigger*/
+                v_trakSize += 16;
+                v_mdiaSize += 16;
+                v_minfSize += 16;
+                v_stblSize += 16;
+                v_stsdSize += 16;
+                v_esdSize += 16;
+                moovSize += 16;
+            }
+        }
+        else if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+            == M4SYS_kH264)
+        {
+            bH264 = M4OSA_TRUE;
+            /* For H264 there is no default DSI, and its presence is mandatory,
+            so check the DSI has been set*/
+            if (0 == mMp4FileDataPtr->videoTrackPtr->dsiSize
+                || M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+            {
+                M4OSA_TRACE1_0(
+                    "M4MP4W_closeWrite: error, no H264 DSI has been set!");
+                err = M4ERR_STATE;
+                goto cleanup;
+            }
+
+            /*H264 sizes of the atom*/
+
+            // Remove the hardcoded DSI values of H264Block2
+            // TODO: check bMULPPSSPS case
+            v_avcCSize = sizeof(M4OSA_UInt32) + sizeof(H264Block2) +
+                mMp4FileDataPtr->videoTrackPtr->dsiSize;
+
+            v_trakSize = AB4N + v_avcCSize + 411;
+            v_mdiaSize = AB4N + v_avcCSize + 311;
+            v_minfSize = AB4N + v_avcCSize + 238;
+            v_stblSize = AB4N + v_avcCSize + 174;
+            v_stsdSize =        v_avcCSize + 102;
+            v_esdSize  =        v_avcCSize + 86;
+
+            moovSize   += AB4N + v_avcCSize + 411;
+
+        }
+        else if (mMp4FileDataPtr->videoTrackPtr->CommonData.trackType
+            == M4SYS_kMPEG_4)
+        {
+            bMP4V = M4OSA_TRUE;
+            /* For MPEG4 there is no default DSI, and its presence is mandatory,
+            so check the DSI has been set*/
+            if (0 == mMp4FileDataPtr->videoTrackPtr->dsiSize
+                || M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+            {
+                M4OSA_TRACE1_0(
+                    "M4MP4W_closeWrite: error, no MPEG4 DSI has been set!");
+                err = M4ERR_STATE;
+                goto cleanup;
+            }
+
+            /*MP4V variables*/
+            dsi = mMp4FileDataPtr->videoTrackPtr->dsiSize;
+            v_esdsSize = 37 + dsi;         /* dsi+37*/
+            v_ESDescriptorSize =
+                23
+                + dsi; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+            v_DCDescriptorSize = 15 + dsi; /* dsi+15*/
+
+            v_trakSize = AB4N + dsi + 448; /* (mp4v=A+B+dsi+4N+448)    */
+            v_mdiaSize = AB4N + dsi + 348; /* (mp4v=A+B+dsi+4N+348)    */
+            v_minfSize = AB4N + dsi + 275; /* (mp4v=A+B+dsi+4N+275)    */
+            v_stblSize = AB4N + dsi + 211; /* (mp4v=A+B+dsi+4N+211)    */
+            v_stsdSize = dsi + 139;        /* (mp4v=139+dsi)*/
+            v_esdSize = dsi + 123;         /* (mp4v=123+dsi)*/
+
+            moovSize += AB4N + dsi + 448;
+        }
+
+        /*video variables*/
+        v_stssSize = 16 + 4 * N; /* 4*N+16     STSS*/
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+        /* stsc update */
+
+        v_stscSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+        v_stblSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+        v_minfSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+        v_mdiaSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+        v_trakSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+        moovSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentStsc;
+
+        /* stco update */
+        v_stcoSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_stblSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_minfSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_mdiaSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_trakSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        moovSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+
+#else
+        /*stsc/stco update*/
+
+        v_stscSize += 12 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_stcoSize += 4 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_stblSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_minfSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_mdiaSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        v_trakSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+        moovSize += 16 * mMp4FileDataPtr->videoTrackPtr->currentChunk;
+
+#endif
+
+        /*update last chunk time*/
+
+        mMp4FileDataPtr->videoTrackPtr->
+            chunkTimeMsTable[mMp4FileDataPtr->videoTrackPtr->currentChunk] =
+            v_msTrakDuration;
+    }
+
+    if (bAudio)
+    {
+        if ((M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable)
+            || (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->chunkSizeTable)
+            || (M4OSA_NULL
+            == mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable)
+            || (M4OSA_NULL
+            == mMp4FileDataPtr->audioTrackPtr->chunkTimeMsTable)
+            || (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->TABLE_STTS))
+        {
+            mMp4FileDataPtr->fileWriterFunctions->closeWrite(
+                fileWriterContext); /**< close the stream anyway */
+            M4MP4W_freeContext(context); /**< Free the context content */
+            return M4ERR_ALLOC;
+        }
+
+        /*audio microstate*/
+        mMp4FileDataPtr->audioTrackPtr->microState = M4MP4W_closed;
+
+        if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType == M4SYS_kAAC)
+        {
+            bAAC =
+                M4OSA_TRUE; /*else, audio is implicitely amr in the following*/
+            dsi = mMp4FileDataPtr->audioTrackPtr->dsiSize; /*variable size*/
+
+            a_esdsSize = 37 + dsi;                         /* dsi+37*/
+            a_ESDescriptorSize =
+                23
+                + dsi; /* dsi+23 (warning: check dsi<105 for coding size on 1 byte)*/
+            a_DCDescriptorSize = 15 + dsi;                 /* dsi+15*/
+
+            a_esdSize = dsi + 73; /*overwrite a_esdSize with aac value*/
+            /*add dif. between amr & aac sizes: (- 53 + dsi + 37)*/
+            a_stsdSize += dsi + 20;
+            a_stblSize += dsi + 20;
+            a_minfSize += dsi + 20;
+            a_mdiaSize += dsi + 20;
+            a_trakSize += dsi + 20;
+            moovSize += dsi + 20;
+        }
+
+        if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType
+            == M4SYS_kEVRC)
+        {
+            bEVRC =
+                M4OSA_TRUE; /*else, audio is implicitely amr in the following*/
+
+            /* evrc dsi is only 6 bytes while amr dsi is 9 bytes,all other blocks are unchanged */
+            a_esdSize -= 3;
+            a_stsdSize -= 3;
+            a_stblSize -= 3;
+            a_minfSize -= 3;
+            a_mdiaSize -= 3;
+            a_trakSize -= 3;
+            moovSize -= 3;
+        }
+
+        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize == 0)
+        {
+            if (M4OSA_NULL == mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ)
+            {
+                mMp4FileDataPtr->fileWriterFunctions->closeWrite(
+                    fileWriterContext); /**< close the stream anyway */
+                M4MP4W_freeContext(context); /**< Free the context content */
+                return M4ERR_ALLOC;
+            }
+            /*Convert integers in the table from LE into BE*/
+            M4MP4W_table32ToBE(mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ,
+                mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb);
+            a_stszSize +=
+                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+            a_stblSize +=
+                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+            a_minfSize +=
+                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+            a_mdiaSize +=
+                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+            a_trakSize +=
+                4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+            moovSize += 4 * mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb;
+        }
+
+        moovSize += 402;
+
+        /*current chunk is the last one and gives the total number of audio chunks (-1)*/
+        for ( i = 0; i < mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+        {
+            a_dataSize += mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
+        }
+
+#ifndef _M4MP4W_MOOV_FIRST
+        /*flush chunk*/
+
+        if (mMp4FileDataPtr->audioTrackPtr->currentPos > 0)
+        {
+            err = M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[0],
+                mMp4FileDataPtr->audioTrackPtr->currentPos,
+                mMp4FileDataPtr->fileWriterFunctions,
+                mMp4FileDataPtr->fileWriterContext);
+
+            if (M4NO_ERROR != err)
+                goto cleanup;
+        }
+
+        M4OSA_TRACE1_0("flush audio | CLOSE");
+        M4OSA_TRACE1_2("current chunk = %d  offset = 0x%x",
+            mMp4FileDataPtr->audioTrackPtr->currentChunk,
+            mMp4FileDataPtr->absoluteCurrentPos);
+
+        /*update chunk offset*/
+        mMp4FileDataPtr->audioTrackPtr->
+            chunkOffsetTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+            mMp4FileDataPtr->absoluteCurrentPos;
+
+        /*add chunk size to absoluteCurrentPos*/
+        mMp4FileDataPtr->absoluteCurrentPos +=
+            mMp4FileDataPtr->audioTrackPtr->currentPos;
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+        /*update last chunk size, and add this value to a_dataSize*/
+
+        mMp4FileDataPtr->audioTrackPtr->
+            chunkSizeTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+            mMp4FileDataPtr->audioTrackPtr->currentPos;
+        a_dataSize +=
+            mMp4FileDataPtr->audioTrackPtr->currentPos; /*add last chunk size*/
+
+        /* bugfix: if a new chunk was just created, cancel it before to close */
+        if ((mMp4FileDataPtr->audioTrackPtr->currentChunk != 0)
+            && (mMp4FileDataPtr->audioTrackPtr->currentPos == 0))
+        {
+            mMp4FileDataPtr->audioTrackPtr->currentChunk--;
+        }
+#ifdef _M4MP4W_UNBUFFERED_VIDEO
+
+        if ((mMp4FileDataPtr->audioTrackPtr->
+            chunkSampleNbTable[mMp4FileDataPtr->audioTrackPtr->
+            currentStsc] & 0xFFF) == 0)
+        {
+            mMp4FileDataPtr->audioTrackPtr->currentStsc--;
+        }
+
+#endif                                                          /*_M4MP4W_UNBUFFERED_VIDEO*/
+
+        a_trakDuration = mMp4FileDataPtr->audioTrackPtr->
+            CommonData.lastCTS; /* equals lastCTS*/
+        /* add last sample dur */
+
+        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb != 1)
+        {
+#ifdef DUPLICATE_STTS_IN_LAST_AU
+            /*increase of 1 the number of consecutive AUs with same duration*/
+
+            mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
+                *(mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+                - 1) - 2] += 1;
+
+#endif /*DUPLICATE_STTS_IN_LAST_AU*/
+
+            a_trakDuration += mMp4FileDataPtr->audioTrackPtr->TABLE_STTS[2
+                * (mMp4FileDataPtr->audioTrackPtr->
+                CommonData.sttsTableEntryNb - 1) - 1];
+        }
+        else if (0 == mMp4FileDataPtr->audioTrackPtr->CommonData.lastCTS)
+        {
+            if (mMp4FileDataPtr->audioTrackPtr->CommonData.trackType
+                == M4SYS_kAMR)
+            {
+                if (12200 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 32
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+                else if (10200 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 27
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+                else if (7950 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 21
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+                else if (7400 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 20
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+                else if (6700 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 18
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+                else if (5900 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 16
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+                else if (5150 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 14
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+                else if (4750 == mMp4FileDataPtr->audioTrackPtr->avgBitrate)
+                {
+                    a_trakDuration = a_dataSize / 13
+                        * mMp4FileDataPtr->audioTrackPtr->sampleDuration;
+                }
+            }
+        }
+
+        scale_audio =
+            1000.0 / mMp4FileDataPtr->audioTrackPtr->CommonData.timescale;
+        a_msTrakDuration = (M4OSA_UInt32)(a_trakDuration * scale_audio);
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+        /* stsc update */
+
+        a_stscSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+        a_stblSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+        a_minfSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+        a_mdiaSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+        a_trakSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+        moovSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentStsc;
+
+        /* stso update */
+        a_stcoSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_stblSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_minfSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_mdiaSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_trakSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        moovSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+
+#else
+        /*stsc/stco update*/
+
+        a_stscSize += 12 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_stcoSize += 4 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_stblSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_minfSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_mdiaSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        a_trakSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+        moovSize += 16 * mMp4FileDataPtr->audioTrackPtr->currentChunk;
+
+#endif
+
+        /* compute the new size of stts*/
+
+        a_sttsSize = 16 + 8 * (mMp4FileDataPtr->audioTrackPtr->
+            CommonData.sttsTableEntryNb - 1);
+
+        moovSize += a_sttsSize - 24;
+        a_mdiaSize += a_sttsSize - 24;
+        a_minfSize += a_sttsSize - 24;
+        a_stblSize += a_sttsSize - 24;
+        a_trakSize += a_sttsSize - 24;
+
+        /*update last chunk time*/
+        mMp4FileDataPtr->audioTrackPtr->
+            chunkTimeMsTable[mMp4FileDataPtr->audioTrackPtr->currentChunk] =
+            a_msTrakDuration;
+    }
+
+    /* changing the way the mdat size is computed.
+    The real purpose of the mdat size is to know the amount to skip to get to the next
+    atom, which is the moov; the size of media in the mdat is almost secondary. Therefore,
+    it is of utmost importance that the mdat size "points" to where the moov actually
+    begins. Now, the moov begins right after the last data we wrote, so how could the sum
+    of all chunk sizes be different from the total size of what has been written? Well, it
+    can happen when the writing was unexpectedly stopped (because of lack of disk space,
+    for instance), in this case a chunk may be partially written (the partial write is not
+    necessarily erased) but it may not be reflected in the chunk size list (which may
+    believe it hasn't been written or on the contrary that it has been fully written). In
+    the case of such a mismatch, there is either unused data in the mdat (not very good,
+    but tolerable) or when reading the last chunk it will read the beginning of the moov
+    as part of the chunk (which means the last chunk won't be correctly decoded), both of
+    which are still better than losing the whole recording. In the long run it'll probably
+    be attempted to always clean up back to a consistent state, but at any rate it is
+    always safer to have the mdat size be computed using the position where the moov
+    actually begins, rather than using the size it is thought the mdat has.
+
+    Therefore, I will record where we are just before writing the moov, to serve when
+    updating the mdat size. */
+
+    /* mdatSize += a_dataSize + v_dataSize; *//*TODO allow for multiple chunks*/
+
+    /* End of Pierre Lebeaupin 19/12/2007: changing the way the mdat size is computed. */
+
+    /* first trak offset is 32+moovSize, second equals 32+moovSize+1st_track_size*/
+    a_trakOffset += moovSize;
+    v_trakOffset += moovSize/*+ a_dataSize*/;
+
+    if (bInterleaveAV == M4OSA_FALSE)
+        v_trakOffset += a_dataSize;
+
+    /*system time since 1970 */
+#ifndef _M4MP4W_DONT_USE_TIME_H
+
+    time((time_t *)&creationTime);
+    /*convert into time since 1/1/1904 00h00 (normative)*/
+    creationTime += 2082841761; /*nb of sec between 1904 and 1970*/
+
+#else                                            /*_M4MP4W_DONT_USE_TIME_H*/
+
+    creationTime =
+        0xBBD09100; /* = 7/11/2003 00h00 ; in hexa because of code scrambler limitation with
+                                           large integers */
+
+#endif                                           /*_M4MP4W_DONT_USE_TIME_H*/
+
+    mMp4FileDataPtr->duration =
+        max(a_msTrakDuration, v_msTrakDuration); /*max audio/video*/
+
+#ifdef _M4MP4W_MOOV_FIRST
+    /*open file in write binary mode*/
+
+    err = mMp4FileDataPtr->fileWriterFunctions->openWrite(&fileWriterContext,
+        mMp4FileDataPtr->url, 0x22);
+    ERR_CHECK(err == M4NO_ERROR, err);
+
+    /*ftyp atom*/
+    if (mMp4FileDataPtr->ftyp.major_brand != 0)
+    {
+        M4OSA_UInt32 i;
+
+        /* Put customized ftyp box */
+        CLEANUPonERR(M4MP4W_putBE32(16
+            + (mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4),
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(M4MPAC_FTYP_TAG,
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->ftyp.major_brand,
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->ftyp.minor_version,
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext));
+
+        for ( i = 0; i < mMp4FileDataPtr->ftyp.nbCompatibleBrands; i++ )
+        {
+            CLEANUPonERR(
+                M4MP4W_putBE32(mMp4FileDataPtr->ftyp.compatible_brands[i],
+                mMp4FileDataPtr->fileWriterFunctions,
+                mMp4FileDataPtr->fileWriterContext));
+        }
+    }
+    else
+    {
+        /* Put default ftyp box */
+        CLEANUPonERR(M4MP4W_putBlock(Default_ftyp, sizeof(Default_ftyp),
+            mMp4FileDataPtr->fileWriterFunctions,
+            mMp4FileDataPtr->fileWriterContext));
+    }
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    /* Pierre Lebeaupin 19/12/2007: changing the way the mdat size is computed. */
+#ifndef _M4MP4W_MOOV_FIRST
+    /* seek is used to get the current position relative to the start of the file. */
+    /* M4OSA_INT_TO_FILE_POSITION(0, moovPos);
+    /CLEANUPonERR( mMp4FileDataPtr->fileWriterFunctions->seek(mMp4FileDataPtr->fileWriterContext,
+     M4OSA_kFileSeekCurrent, &moovPos) ); */
+    /* ... or rather, seek used to be used for that, but it has been found this functionality
+    is not reliably, or sometimes not at all, implemented in the various OSALs, so we now avoid
+    using it. */
+    /* Notice this new method assumes we're at the end of the file, this will break if ever we
+    are overwriting a larger file. */
+
+    CLEANUPonERR(mMp4FileDataPtr->fileWriterFunctions->getOption(
+        mMp4FileDataPtr->fileWriterContext,
+        M4OSA_kFileWriteGetFileSize, (M4OSA_DataOption *) &moovPos));
+    /* moovPos will be used after writing the moov. */
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+    /* End of Pierre Lebeaupin 19/12/2007: changing the way the mdat size is computed. */
+
+    /*moov*/
+
+    CLEANUPonERR(M4MP4W_putBE32(moovSize, mMp4FileDataPtr->fileWriterFunctions,
+        fileWriterContext));
+    CLEANUPonERR(M4MP4W_putBlock(CommonBlock3, sizeof(CommonBlock3),
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    CLEANUPonERR(M4MP4W_putBE32(creationTime,
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    CLEANUPonERR(M4MP4W_putBE32(creationTime,
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    CLEANUPonERR(M4MP4W_putBlock(CommonBlock4, sizeof(CommonBlock4),
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->duration,
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    CLEANUPonERR(M4MP4W_putBlock(CommonBlock5, sizeof(CommonBlock5),
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+    if (bAudio)
+    {
+        CLEANUPonERR(M4MP4W_putBE32(a_trakSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock6, sizeof(CommonBlock6),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(a_trakId,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7, sizeof(CommonBlock7),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(a_msTrakDuration,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7bis, sizeof(CommonBlock7bis),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(AMRBlock1, sizeof(AMRBlock1),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+        CLEANUPonERR(M4MP4W_putBE32(a_mdiaSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock8, sizeof(CommonBlock8),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(
+            M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.timescale,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(a_trakDuration,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock9, sizeof(CommonBlock9),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(AMRBlock1_1, sizeof(AMRBlock1_1),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+        CLEANUPonERR(M4MP4W_putBE32(a_minfSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock10, sizeof(CommonBlock10),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(a_stblSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock11, sizeof(CommonBlock11),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(a_sttsSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock12, sizeof(CommonBlock12),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        CLEANUPonERR(M4MP4W_putBE32(
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb - 1,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        /*invert the table data to bigendian*/
+        M4MP4W_table32ToBE(mMp4FileDataPtr->audioTrackPtr->TABLE_STTS,
+            2 * (mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb
+            - 1));
+        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+            *)mMp4FileDataPtr->audioTrackPtr->TABLE_STTS,
+            ( mMp4FileDataPtr->audioTrackPtr->CommonData.sttsTableEntryNb - 1)
+            * 8,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+
+        /* stsd */
+        CLEANUPonERR(M4MP4W_putBE32(a_stsdSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionHeader,
+            sizeof(SampleDescriptionHeader),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(a_esdSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        /* sample desc entry inside stsd */
+        if (bAAC)
+        {
+            CLEANUPonERR(M4MP4W_putBlock(AACBlock1, sizeof(AACBlock1),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+        }
+        else if (bEVRC)
+        {
+            CLEANUPonERR(M4MP4W_putBlock(EVRC8Block1, sizeof(EVRC8Block1),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*evrc*/
+        }
+        else                         /*AMR8*/
+        {
+            CLEANUPonERR(M4MP4W_putBlock(AMR8Block1, sizeof(AMR8Block1),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*amr8*/
+        }
+        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryStart,
+            sizeof(SampleDescriptionEntryStart),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(AudioSampleDescEntryBoilerplate,
+            sizeof(AudioSampleDescEntryBoilerplate),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+        CLEANUPonERR(
+            M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.timescale
+            << 16,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        /* DSI inside sample desc entry */
+        if (bAAC)
+        {
+            CLEANUPonERR(M4MP4W_putBE32(a_esdsSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock0,
+                sizeof(MPEGConfigBlock0), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putByte(a_ESDescriptorSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock1,
+                sizeof(MPEGConfigBlock1), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putByte(a_DCDescriptorSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putBlock(AACBlock2, sizeof(AACBlock2),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(
+                M4MP4W_putBE24(mMp4FileDataPtr->audioTrackPtr->avgBitrate * 5,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(
+                M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->maxBitrate,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(
+                M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->avgBitrate,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock2,
+                sizeof(MPEGConfigBlock2), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putByte(mMp4FileDataPtr->audioTrackPtr->dsiSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->DSI,
+                mMp4FileDataPtr->audioTrackPtr->dsiSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock3,
+                sizeof(MPEGConfigBlock3), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*aac*/
+        }
+        else if (bEVRC)
+        {
+            M4OSA_UInt8 localDsi[6];
+            M4OSA_UInt32 localI;
+
+            CLEANUPonERR(M4MP4W_putBlock(EVRCBlock3_1, sizeof(EVRCBlock3_1),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*audio*/
+
+            /* copy the default block in a local variable*/
+            for ( localI = 0; localI < 6; localI++ )
+            {
+                localDsi[localI] = EVRCBlock3_2[localI];
+            }
+            /* computes the number of sample per au */
+            /* and stores it in the DSI*/
+            /* assumes a char is enough to store the data*/
+            localDsi[5] =
+                (M4OSA_UInt8)(mMp4FileDataPtr->audioTrackPtr->sampleDuration
+                / 160)/*EVRC 1 frame duration*/;
+
+            if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
+            {
+                /* copy vendor name */
+                for ( localI = 0; localI < 4; localI++ )
+                {
+                    localDsi[localI] = (M4OSA_UInt8)(
+                        mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
+                }
+            }
+            CLEANUPonERR(M4MP4W_putBlock(localDsi, 6,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*audio*/
+        }
+        else                         /*AMR8*/
+        {
+            M4OSA_UInt8 localDsi[9];
+            M4OSA_UInt32 localI;
+
+            CLEANUPonERR(M4MP4W_putBlock(AMRDSIHeader, sizeof(AMRDSIHeader),
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+            /* copy the default block in a local variable*/
+            for ( localI = 0; localI < 9; localI++ )
+            {
+                localDsi[localI] = AMRDefaultDSI[localI];
+            }
+            /* computes the number of sample per au */
+            /* and stores it in the DSI*/
+            /* assumes a char is enough to store the data*/
+            /* ALERT! The potential of the following line of code to explode in our face
+            is enormous when anything (sample rate or whatever) will change. This
+            calculation would be MUCH better handled by the VES or whatever deals with
+            the encoder more directly. */
+            localDsi[8] =
+                (M4OSA_UInt8)(mMp4FileDataPtr->audioTrackPtr->sampleDuration
+                / 160)/*AMR NB 1 frame duration*/;
+
+            if (mMp4FileDataPtr->audioTrackPtr->DSI != M4OSA_NULL)
+            {
+                /* copy vendor name */
+                for ( localI = 0; localI < 4; localI++ )
+                {
+                    localDsi[localI] = (M4OSA_UInt8)(
+                        mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
+                }
+
+                /* copy the Mode Set */
+                for ( localI = 5; localI < 7; localI++ )
+                {
+                    localDsi[localI] = (M4OSA_UInt8)(
+                        mMp4FileDataPtr->audioTrackPtr->DSI[localI]);
+                }
+            }
+            CLEANUPonERR(M4MP4W_putBlock(localDsi, 9,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*audio*/
+        }
+
+        /*end trak*/
+        CLEANUPonERR(M4MP4W_putBE32(a_stszSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock15, sizeof(CommonBlock15),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(
+            mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(
+            M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        /*0 value for samplesize means not constant AU size*/
+        if (mMp4FileDataPtr->audioTrackPtr->CommonData.sampleSize == 0)
+        {
+            CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+                *)mMp4FileDataPtr->audioTrackPtr->TABLE_STSZ,
+                mMp4FileDataPtr->audioTrackPtr->CommonData.sampleNb * 4,
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        }
+
+        CLEANUPonERR(M4MP4W_putBE32(a_stscSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock16, sizeof(CommonBlock16),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentStsc
+            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentStsc; i++ )
+        {
+            CLEANUPonERR(M4MP4W_putBE32(
+                ( mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[i]
+            >> 12) + 1, mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32((mMp4FileDataPtr->audioTrackPtr->
+                chunkSampleNbTable[i] & 0xFFF),
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+        }
+
+#else
+
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentChunk
+            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+        {
+            CLEANUPonERR(M4MP4W_putBE32(i + 1,
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32(
+                mMp4FileDataPtr->audioTrackPtr->chunkSampleNbTable[i],
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+        }
+
+#endif
+
+        CLEANUPonERR(M4MP4W_putBE32(a_stcoSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock17, sizeof(CommonBlock17),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->audioTrackPtr->currentChunk
+            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+        {
+            CLEANUPonERR(M4MP4W_putBE32(a_trakOffset,
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+            a_trakOffset += mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
+
+            if (( bInterleaveAV == M4OSA_TRUE)
+                && (mMp4FileDataPtr->videoTrackPtr->currentChunk >= i))
+            {
+                a_trakOffset +=
+                    mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
+            }
+        }
+
+#else
+
+        for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk; i++ )
+        {
+            CLEANUPonERR(M4MP4W_putBE32(
+                mMp4FileDataPtr->audioTrackPtr->chunkOffsetTable[i],
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+        }
+
+#endif                                                                 /*_M4MP4W_MOOV_FIRST*/
+
+        CLEANUPonERR(M4MP4W_putBlock(AMRBlock4, sizeof(AMRBlock4),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*audio*/
+    }
+
+    if (bVideo)
+    {
+        /*trak*/
+        CLEANUPonERR(M4MP4W_putBE32(v_trakSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock6, sizeof(CommonBlock6),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(v_trakId,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7, sizeof(CommonBlock7),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(v_msTrakDuration,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock7bis, sizeof(CommonBlock7bis),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        /* In the track header width and height are 16.16 fixed point values,
+        so shift left the regular integer value by 16. */
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->width << 16,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->height
+            << 16,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+        CLEANUPonERR(M4MP4W_putBE32(v_mdiaSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock8, sizeof(CommonBlock8),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(creationTime,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(
+            M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->CommonData.timescale,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(v_trakDuration,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock9, sizeof(CommonBlock9),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(VideoBlock1_1, sizeof(VideoBlock1_1),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBE32(v_minfSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock10, sizeof(CommonBlock10),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(v_stblSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock11, sizeof(CommonBlock11),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(v_sttsSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock12, sizeof(CommonBlock12),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        for ( i = 0;
+            i < mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb;
+            i++ )
+        {
+            CLEANUPonERR(M4MP4W_putBE32(M4MP4W_get32_Lo(
+                &mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[i]),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*video*/
+            CLEANUPonERR(M4MP4W_putBE32(M4MP4W_get32_Hi(
+                &mMp4FileDataPtr->videoTrackPtr->TABLE_STTS[i]),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*video*/
+        }
+
+#else
+
+        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+            *)mMp4FileDataPtr->videoTrackPtr->TABLE_STTS,
+            ( mMp4FileDataPtr->videoTrackPtr->CommonData.sttsTableEntryNb) * 8,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+#endif
+
+        /* stsd */
+
+        CLEANUPonERR(M4MP4W_putBE32(v_stsdSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionHeader,
+            sizeof(SampleDescriptionHeader),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(v_esdSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        /* sample desc entry inside stsd */
+        if (bMP4V)
+        {
+            CLEANUPonERR(M4MP4W_putBlock(Mp4vBlock1, sizeof(Mp4vBlock1),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+        }
+
+        if (bH263)
+        {
+            CLEANUPonERR(M4MP4W_putBlock(H263Block1, sizeof(H263Block1),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*h263*/
+        }
+
+        if (bH264)
+        {
+            CLEANUPonERR(M4MP4W_putBlock(H264Block1, sizeof(H264Block1),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*h264*/
+        }
+        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryStart,
+            sizeof(SampleDescriptionEntryStart),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryVideoBoilerplate1,
+            sizeof(SampleDescriptionEntryVideoBoilerplate1),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBE16(mMp4FileDataPtr->videoTrackPtr->width,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBE16(mMp4FileDataPtr->videoTrackPtr->height,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBlock(VideoResolutions, sizeof(VideoResolutions),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*mp4v*/
+        CLEANUPonERR(M4MP4W_putBlock(SampleDescriptionEntryVideoBoilerplate2,
+            sizeof(SampleDescriptionEntryVideoBoilerplate2),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+        /* DSI inside sample desc entry */
+        if (bH263)
+        {
+            /* The h263 dsi given through the api must be 7 bytes, that is, it shall not include
+             the optional bitrate box. However, if the bitrate information is set in the stream
+             handler, a bitrate box is appended here to the dsi */
+            if (((M4OSA_Int32)mMp4FileDataPtr->videoTrackPtr->avgBitrate) != -1)
+            {
+                CLEANUPonERR(M4MP4W_putBlock(H263Block2_bitr,
+                    sizeof(H263Block2_bitr),
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /* d263 box with bitr atom */
+
+                if (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+                {
+                    CLEANUPonERR(M4MP4W_putBlock(H263Block3, sizeof(H263Block3),
+                        mMp4FileDataPtr->fileWriterFunctions,
+                        fileWriterContext)); /*h263*/
+                }
+                else
+                {
+                    CLEANUPonERR(
+                        M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+                        mMp4FileDataPtr->videoTrackPtr->dsiSize,
+                        mMp4FileDataPtr->fileWriterFunctions,
+                        fileWriterContext));
+                }
+
+                CLEANUPonERR(M4MP4W_putBlock(H263Block4, sizeof(H263Block4),
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /*h263*/
+                /* Pierre Lebeaupin 2008/04/29: the two following lines used to be swapped;
+                I changed to this order in order to conform to 3GPP. */
+                CLEANUPonERR(
+                    M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->avgBitrate,
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /*h263*/
+                CLEANUPonERR(
+                    M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->maxBitrate,
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /*h263*/
+            }
+            else
+            {
+                CLEANUPonERR(M4MP4W_putBlock(H263Block2, sizeof(H263Block2),
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /* d263 box */
+
+                if (M4OSA_NULL == mMp4FileDataPtr->videoTrackPtr->DSI)
+                {
+                    CLEANUPonERR(M4MP4W_putBlock(H263Block3, sizeof(H263Block3),
+                        mMp4FileDataPtr->fileWriterFunctions,
+                        fileWriterContext)); /*h263*/
+                }
+                else
+                {
+                    CLEANUPonERR(
+                        M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+                        mMp4FileDataPtr->videoTrackPtr->dsiSize,
+                        mMp4FileDataPtr->fileWriterFunctions,
+                        fileWriterContext));
+                }
+            }
+        }
+
+        if (bMP4V)
+        {
+            M4OSA_UInt32 bufferSizeDB = 5 * mMp4FileDataPtr->videoTrackPtr->
+                avgBitrate; /*bufferSizeDB set to 5 times the bitrate*/
+
+            CLEANUPonERR(M4MP4W_putBE32(v_esdsSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock0,
+                sizeof(MPEGConfigBlock0), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putByte(v_ESDescriptorSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock1,
+                sizeof(MPEGConfigBlock1), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putByte(v_DCDescriptorSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putBlock(Mp4vBlock3, sizeof(Mp4vBlock3),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putBE24(bufferSizeDB,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(
+                M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->maxBitrate,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(
+                M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->avgBitrate,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock2,
+                sizeof(MPEGConfigBlock2), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putByte(mMp4FileDataPtr->videoTrackPtr->dsiSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+                mMp4FileDataPtr->videoTrackPtr->dsiSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+            CLEANUPonERR(M4MP4W_putBlock(MPEGConfigBlock3,
+                sizeof(MPEGConfigBlock3), mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*mp4v*/
+        }
+
+        if (bH264)
+        {
+            M4OSA_UInt16 ppsLentgh = 0; /* PPS length */
+            M4OSA_UInt16 spsLentgh = 0; /* SPS length */
+            M4OSA_UChar *tmpDSI = mMp4FileDataPtr->videoTrackPtr->DSI; /* DSI */
+            M4OSA_UInt16 NumberOfPPS;
+            M4OSA_UInt16 lCntPPS;
+
+            /* Put the avcC (header + DSI) size */
+            CLEANUPonERR(M4MP4W_putBE32(v_avcCSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*h264*/
+            /* Put the avcC header */
+            CLEANUPonERR(M4MP4W_putBlock(H264Block2, sizeof(H264Block2),
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*h264*/
+            /* Put the DSI (SPS + PPS) int the 3gp format*/
+            /* SPS length in BE */
+
+            if ((0x01 != mMp4FileDataPtr->videoTrackPtr->DSI[0]) ||
+                 (0x42 != mMp4FileDataPtr->videoTrackPtr->DSI[1]))
+            {
+                M4OSA_TRACE1_2("!!! M4MP4W_closeWrite ERROR : invalid AVCC 0x%X 0x%X",
+                    mMp4FileDataPtr->videoTrackPtr->DSI[0],
+                    mMp4FileDataPtr->videoTrackPtr->DSI[1]);
+                return M4ERR_PARAMETER;
+            }
+            // Do not strip the DSI
+            CLEANUPonERR( M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->DSI,
+                mMp4FileDataPtr->videoTrackPtr->dsiSize,
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext) );/*h264*/
+
+        }
+
+        /*end trak*/
+        CLEANUPonERR(M4MP4W_putBE32(v_stszSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock15, sizeof(CommonBlock15),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(
+            M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        for ( i = 0; i < mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb;
+            i++ )
+        {
+            CLEANUPonERR(
+                M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ[i],
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext)); /*video*/
+        }
+
+#else
+
+        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+            *)mMp4FileDataPtr->videoTrackPtr->TABLE_STSZ,
+            mMp4FileDataPtr->videoTrackPtr->CommonData.sampleNb * 4,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+
+#endif
+
+        CLEANUPonERR(M4MP4W_putBE32(v_stscSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock16, sizeof(CommonBlock16),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_OPTIMIZE_FOR_PHONE
+
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentStsc
+            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentStsc; i++ )
+        {
+            CLEANUPonERR(M4MP4W_putBE32(
+                ( mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[i]
+            >> 12) + 1, mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32((mMp4FileDataPtr->videoTrackPtr->
+                chunkSampleNbTable[i] & 0xFFF),
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+        }
+
+#else
+
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentChunk
+            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+        for (i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)
+        {
+            CLEANUPonERR(M4MP4W_putBE32(i + 1,
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32(
+                mMp4FileDataPtr->videoTrackPtr->chunkSampleNbTable[i],
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+            CLEANUPonERR(M4MP4W_putBE32(1, mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+        }
+
+#endif
+
+        CLEANUPonERR(M4MP4W_putBE32(v_stcoSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBlock(CommonBlock17, sizeof(CommonBlock17),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+        CLEANUPonERR(M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->currentChunk
+            + 1, mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+#ifdef _M4MP4W_MOOV_FIRST
+
+        for (i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++)
+        {
+            if (( bInterleaveAV == M4OSA_TRUE)
+                && (mMp4FileDataPtr->audioTrackPtr->currentChunk >= i))
+            {
+                v_trakOffset +=
+                    mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i];
+            }
+            CLEANUPonERR(M4MP4W_putBE32(v_trakOffset,
+                mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+            v_trakOffset += mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i];
+        }
+
+#else
+
+        for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk; i++ )
+        {
+            CLEANUPonERR(M4MP4W_putBE32(
+                mMp4FileDataPtr->videoTrackPtr->chunkOffsetTable[i],
+                mMp4FileDataPtr->fileWriterFunctions,
+                fileWriterContext));
+        }
+
+#endif                                                                 /*_M4MP4W_MOOV_FIRST*/
+
+        CLEANUPonERR(M4MP4W_putBE32(v_stssSize,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBlock(VideoBlock4, sizeof(VideoBlock4),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(
+            M4MP4W_putBE32(mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb,
+            mMp4FileDataPtr->fileWriterFunctions,
+            fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar
+            *)mMp4FileDataPtr->videoTrackPtr->TABLE_STSS,
+            mMp4FileDataPtr->videoTrackPtr->stssTableEntryNb * 4,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+        CLEANUPonERR(M4MP4W_putBlock(VideoBlock5, sizeof(VideoBlock5),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext)); /*video*/
+    }
+#ifdef _M4MP4W_MOOV_FIRST
+    /*mdat*/
+
+    CLEANUPonERR(M4MP4W_putBE32(mdatSize, mMp4FileDataPtr->fileWriterFunctions,
+        fileWriterContext));
+    CLEANUPonERR(M4MP4W_putBlock(CommonBlock2, sizeof(CommonBlock2),
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+    /*write data, according to the interleave mode (default is not interleaved)*/
+    if (bInterleaveAV == M4OSA_FALSE)
+    {
+        if (bAudio)
+        {
+            for ( i = 0; i <= mMp4FileDataPtr->audioTrackPtr->currentChunk;
+                i++ )
+            {
+                CLEANUPonERR(
+                    M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[i],
+                    mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i],
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /*audio (previously a_dataSize)*/
+            }
+        }
+
+        if (bVideo)
+        {
+            for ( i = 0; i <= mMp4FileDataPtr->videoTrackPtr->currentChunk;
+                i++ )
+            {
+                CLEANUPonERR(
+                    M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[i],
+                    mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i],
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /*video (previously a_dataSize)*/
+            }
+        }
+    }
+    else /*in this mode, we have audio and video to interleave*/
+    {
+        for ( i = 0; i <= max(mMp4FileDataPtr->audioTrackPtr->currentChunk,
+            mMp4FileDataPtr->videoTrackPtr->currentChunk); i++ )
+        {
+            if (i <= mMp4FileDataPtr->audioTrackPtr->currentChunk)
+            {
+                CLEANUPonERR(
+                    M4MP4W_putBlock(mMp4FileDataPtr->audioTrackPtr->Chunk[i],
+                    mMp4FileDataPtr->audioTrackPtr->chunkSizeTable[i],
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /*audio (previously a_dataSize)*/
+            }
+
+            if (i <= mMp4FileDataPtr->videoTrackPtr->currentChunk)
+            {
+                CLEANUPonERR(
+                    M4MP4W_putBlock(mMp4FileDataPtr->videoTrackPtr->Chunk[i],
+                    mMp4FileDataPtr->videoTrackPtr->chunkSizeTable[i],
+                    mMp4FileDataPtr->fileWriterFunctions,
+                    fileWriterContext)); /*video (previously a_dataSize)*/
+            }
+        }
+    }
+
+#endif /*_M4MP4W_MOOV_FIRST*/
+
+    /*skip*/
+
+    CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipHeader,
+        sizeof(BlockSignatureSkipHeader), mMp4FileDataPtr->fileWriterFunctions,
+        fileWriterContext));
+
+    /* Write embedded string */
+    if (mMp4FileDataPtr->embeddedString == M4OSA_NULL)
+    {
+        CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipDefaultEmbeddedString,
+            sizeof(BlockSignatureSkipDefaultEmbeddedString),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    }
+    else
+    {
+        CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->embeddedString, 16,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    }
+
+    /* Write ves core version */
+    camcoder_maj = (M4OSA_UChar)(mMp4FileDataPtr->camcoderVersion / 100);
+    camcoder_min =
+        (M4OSA_UChar)(( mMp4FileDataPtr->camcoderVersion - 100 * camcoder_maj)
+        / 10);
+    camcoder_rev =
+        (M4OSA_UChar)(mMp4FileDataPtr->camcoderVersion - 100 * camcoder_maj - 10
+        * camcoder_min);
+
+    CLEANUPonERR(M4MP4W_putByte(' ', mMp4FileDataPtr->fileWriterFunctions,
+        fileWriterContext));
+    CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_maj + '0'),
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    CLEANUPonERR(M4MP4W_putByte('.', mMp4FileDataPtr->fileWriterFunctions,
+        fileWriterContext));
+    CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_min + '0'),
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    CLEANUPonERR(M4MP4W_putByte('.', mMp4FileDataPtr->fileWriterFunctions,
+        fileWriterContext));
+    CLEANUPonERR(M4MP4W_putByte((M4OSA_UChar)(camcoder_rev + '0'),
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+    /* Write integration tag */
+    CLEANUPonERR(M4MP4W_putBlock((const M4OSA_UChar *)" -- ", 4,
+        mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+
+    if (mMp4FileDataPtr->integrationTag == M4OSA_NULL)
+    {
+        CLEANUPonERR(M4MP4W_putBlock(BlockSignatureSkipDefaultIntegrationTag,
+            sizeof(BlockSignatureSkipDefaultIntegrationTag),
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    }
+    else
+    {
+        CLEANUPonERR(M4MP4W_putBlock(mMp4FileDataPtr->integrationTag, 60,
+            mMp4FileDataPtr->fileWriterFunctions, fileWriterContext));
+    }
+
+#ifndef _M4MP4W_MOOV_FIRST
+    /*overwrite mdat size*/
+
+    if (mMp4FileDataPtr->ftyp.major_brand != 0)
+        filePos = 16 + mMp4FileDataPtr->ftyp.nbCompatibleBrands * 4;
+    else
+        filePos = 24;
+
+    M4OSA_INT_TO_FILE_POSITION(filePos, mdatPos);
+    M4OSA_FPOS_SUB(moovPos, moovPos, mdatPos);
+    M4OSA_FILE_POSITION_TO_INT(moovPos, mdatSize);
+
+    CLEANUPonERR(mMp4FileDataPtr->fileWriterFunctions->seek(fileWriterContext,
+        M4OSA_kFileSeekBeginning, &mdatPos)); /*seek after ftyp...*/
+    CLEANUPonERR(M4MP4W_putBE32(mdatSize, mMp4FileDataPtr->fileWriterFunctions,
+        fileWriterContext));
+
+#endif                                        /*_M4MP4W_MOOV_FIRST*/
+
+cleanup:
+
+    /**
+    * Close the file even if an error occured */
+    if (M4OSA_NULL != mMp4FileDataPtr->fileWriterContext)
+    {
+        err2 =
+            mMp4FileDataPtr->fileWriterFunctions->closeWrite(mMp4FileDataPtr->
+            fileWriterContext); /**< close the stream anyway */
+
+        if (M4NO_ERROR != err2)
+        {
+            M4OSA_TRACE1_1(
+                "M4MP4W_closeWrite: fileWriterFunctions->closeWrite returns 0x%x",
+                err2);
+        }
+        mMp4FileDataPtr->fileWriterContext = M4OSA_NULL;
+    }
+
+#ifdef _M4MP4W_RESERVED_MOOV_DISK_SPACE
+    /* Remove safety file if still present (here it is cleanup in case of error and NOT the normal
+    removal of the safety file to free emergency space for the moov). */
+
+    if (M4OSA_TRUE == mMp4FileDataPtr->cleanSafetyFile)
+    {
+        M4OSA_Context tempContext;
+        err3 = mMp4FileDataPtr->fileWriterFunctions->openWrite(&tempContext,
+            mMp4FileDataPtr->safetyFileUrl,
+            M4OSA_kFileWrite | M4OSA_kFileCreate);
+
+        if (M4NO_ERROR != err2)
+            err2 = err3;
+
+        if (M4NO_ERROR
+            != err3) /* No sense closing if we couldn't open in the first place. */
+        {
+            err3 =
+                mMp4FileDataPtr->fileWriterFunctions->closeWrite(tempContext);
+
+            if (M4NO_ERROR != err2)
+                err2 = err3;
+        }
+        mMp4FileDataPtr->safetyFileUrl = M4OSA_NULL;
+        mMp4FileDataPtr->cleanSafetyFile = M4OSA_FALSE;
+    }
+
+#endif /* _M4MP4W_RESERVED_MOOV_DISK_SPACE */
+
+    /* Delete embedded string */
+
+    if (M4OSA_NULL != mMp4FileDataPtr->embeddedString)
+    {
+        M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->embeddedString);
+        mMp4FileDataPtr->embeddedString = M4OSA_NULL;
+    }
+
+    /* Delete integration tag */
+    if (M4OSA_NULL != mMp4FileDataPtr->integrationTag)
+    {
+        M4OSA_free((M4OSA_MemAddr32)mMp4FileDataPtr->integrationTag);
+        mMp4FileDataPtr->integrationTag = M4OSA_NULL;
+    }
+
+    /**
+    * M4MP4W_freeContext() is now a private method, called only from here*/
+    err3 = M4MP4W_freeContext(context);
+
+    if (M4NO_ERROR != err3)
+    {
+        M4OSA_TRACE1_1("M4MP4W_closeWrite: M4MP4W_freeContext returns 0x%x",
+            err3);
+    }
+
+    /**
+    * Choose which error code to return */
+    if (M4NO_ERROR != err)
+    {
+        /**
+        * We give priority to main error */
+        M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err=0x%x", err);
+        return err;
+    }
+    else if (M4NO_ERROR != err2)
+    {
+        /**
+        * Error from closeWrite is returned if there is no main error */
+        M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err2=0x%x", err2);
+        return err2;
+    }
+    else
+    {
+        /**
+        * Error from M4MP4W_freeContext is returned only if there is no main error and
+          no close error */
+        M4OSA_TRACE1_1("M4MP4W_closeWrite: returning err3=0x%x", err3);
+        return err3;
+    }
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getOption( M4OSA_Context context, M4OSA_OptionID option,
+                           M4OSA_DataOption *valuePtr )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4SYS_StreamIDValue *streamIDvaluePtr = M4OSA_NULL;
+    M4MP4W_StreamIDsize *streamIDsizePtr = M4OSA_NULL;
+    M4MP4W_memAddr *memAddrPtr = M4OSA_NULL;
+    /*    M4MP4W_WriteCallBack*    callBackPtr = M4OSA_NULL;*/
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
+        || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+
+    switch( option )
+    {
+        case (M4MP4W_maxAUperChunk):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_maxChunkSize):
+
+            streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+            switch( streamIDvaluePtr->streamID )
+            {
+                case (AudioStreamID):
+                    if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+                        return M4ERR_BAD_STREAM_ID;
+                    else
+                        streamIDvaluePtr->value =
+                        mMp4FileDataPtr->audioTrackPtr->MaxChunkSize;
+                    break;
+
+                case (VideoStreamID):
+                    if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+                        return M4ERR_BAD_STREAM_ID;
+                    else
+                        streamIDvaluePtr->value =
+                        mMp4FileDataPtr->videoTrackPtr->MaxChunkSize;
+                    break;
+
+                case (0): /*all streams*/
+                    streamIDvaluePtr->value = mMp4FileDataPtr->MaxChunkSize;
+                    break;
+
+                default:
+                    return M4ERR_BAD_STREAM_ID;
+        }
+
+        break;
+
+    case (M4MP4W_maxChunkInter):
+
+        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+        switch( streamIDvaluePtr->streamID )
+        {
+            case (0): /*all streams*/
+                streamIDvaluePtr->value = (M4OSA_UInt32)mMp4FileDataPtr->
+                    InterleaveDur; /*time conversion !*/
+                break;
+
+            default:
+                return M4ERR_BAD_STREAM_ID;
+        }
+        break;
+
+    case (M4MP4W_embeddedString):
+        memAddrPtr = (M4MP4W_memAddr *)(*valuePtr);
+        /*memAddrPtr must have been already allocated by the caller
+        and memAddrPtr->size initialized with the max possible length in bytes*/
+        ERR_CHECK(memAddrPtr->size >= 16, M4ERR_PARAMETER);
+        ERR_CHECK(memAddrPtr->addr != M4OSA_NULL, M4ERR_PARAMETER);
+        /*memAddrPtr->size is updated with the actual size of the string*/
+        memAddrPtr->size = 16;
+        /*if no value was set, return the default string */
+        if (mMp4FileDataPtr->embeddedString != M4OSA_NULL)
+            M4OSA_memcpy((M4OSA_MemAddr8)memAddrPtr->addr,
+            (M4OSA_MemAddr8)mMp4FileDataPtr->embeddedString, 16);
+        else
+            M4OSA_memcpy((M4OSA_MemAddr8)memAddrPtr->addr,
+            (M4OSA_MemAddr8)BlockSignatureSkipDefaultEmbeddedString,
+            16);
+        break;
+
+    case (M4MP4W_integrationTag):
+        memAddrPtr = (M4MP4W_memAddr *)(*valuePtr);
+        /*memAddrPtr must have been already allocated by the caller
+        and memAddrPtr->size initialized with the max possible length in bytes*/
+        ERR_CHECK(memAddrPtr->size >= 60, M4ERR_PARAMETER);
+        ERR_CHECK(memAddrPtr->addr != M4OSA_NULL, M4ERR_PARAMETER);
+        /*memAddrPtr->size is updated with the actual size of the string*/
+        memAddrPtr->size = 60;
+        /*if no value was set, return the default string 0 */
+        if (mMp4FileDataPtr->integrationTag != M4OSA_NULL)
+            M4OSA_memcpy((M4OSA_MemAddr8)memAddrPtr->addr,
+            (M4OSA_MemAddr8)mMp4FileDataPtr->integrationTag, 60);
+        else
+            M4OSA_memcpy((M4OSA_MemAddr8)memAddrPtr->addr,
+            (M4OSA_MemAddr8)BlockSignatureSkipDefaultIntegrationTag,
+            60);
+        break;
+
+    case (M4MP4W_CamcoderVersion):
+
+        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+        switch( streamIDvaluePtr->streamID )
+        {
+            case (0): /*all streams*/
+                streamIDvaluePtr->value = mMp4FileDataPtr->camcoderVersion;
+                break;
+
+            default:
+                return M4ERR_BAD_STREAM_ID;
+        }
+        break;
+
+    case (M4MP4W_preWriteCallBack):
+        return M4ERR_NOT_IMPLEMENTED;
+        /*callBackPtr = (M4MP4W_WriteCallBack*)(*valuePtr);
+        *callBackPtr = mMp4FileDataPtr->PreWriteCallBack;
+        break;*/
+
+    case (M4MP4W_postWriteCallBack):
+        return M4ERR_NOT_IMPLEMENTED;
+        /*callBackPtr = (M4MP4W_WriteCallBack*)(*valuePtr);
+        *callBackPtr = mMp4FileDataPtr->PostWriteCallBack;
+        break;*/
+
+    case (M4MP4W_maxAUsize):
+
+        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+
+        switch( streamIDvaluePtr->streamID )
+        {
+            case (AudioStreamID):
+                if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+                    return M4ERR_BAD_STREAM_ID;
+                else
+                    streamIDvaluePtr->value =
+                    mMp4FileDataPtr->audioTrackPtr->MaxAUSize;
+                break;
+
+            case (VideoStreamID):
+                if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+                    return M4ERR_BAD_STREAM_ID;
+                else
+                    streamIDvaluePtr->value =
+                    mMp4FileDataPtr->videoTrackPtr->MaxAUSize;
+                break;
+
+            case (0): /*all streams*/
+                streamIDvaluePtr->value = mMp4FileDataPtr->MaxAUSize;
+                break;
+
+            default:
+                return M4ERR_BAD_STREAM_ID;
+        }
+
+        break;
+
+    case (M4MP4W_IOD):
+        return M4ERR_NOT_IMPLEMENTED;
+
+    case (M4MP4W_ESD):
+        return M4ERR_NOT_IMPLEMENTED;
+
+    case (M4MP4W_SDP):
+        return M4ERR_NOT_IMPLEMENTED;
+
+    case (M4MP4W_trackSize):
+        streamIDsizePtr = (M4MP4W_StreamIDsize *)(*valuePtr);
+        streamIDsizePtr->width = mMp4FileDataPtr->videoTrackPtr->width;
+        streamIDsizePtr->height = mMp4FileDataPtr->videoTrackPtr->height;
+        break;
+
+    case (M4MP4W_estimateAudioSize):
+        streamIDvaluePtr = (M4SYS_StreamIDValue *)(*valuePtr);
+        streamIDvaluePtr->value =
+            (M4OSA_UInt32)mMp4FileDataPtr->estimateAudioSize;
+        break;
+
+    case (M4MP4W_MOOVfirst):
+        return M4ERR_NOT_IMPLEMENTED;
+
+    case (M4MP4W_V2_MOOF):
+        return M4ERR_NOT_IMPLEMENTED;
+
+    case (M4MP4W_V2_tblCompres):
+        return M4ERR_NOT_IMPLEMENTED;
+
+    default:
+        return M4ERR_BAD_OPTION_ID;
+    }
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_setOption( M4OSA_Context context, M4OSA_OptionID option,
+                           M4OSA_DataOption value )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4SYS_StreamIDValue *streamIDvaluePtr = M4OSA_NULL;
+    M4MP4W_StreamIDsize *streamIDsizePtr = M4OSA_NULL;
+    M4MP4W_memAddr *memAddrPtr = M4OSA_NULL;
+    M4SYS_StreamIDmemAddr *streamIDmemAddrPtr;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    /* Verify state */
+    switch( option )
+    {
+        case M4MP4W_maxFileDuration:
+        case M4MP4W_DSI:
+            /* this param can be set at the end of a recording */
+            ERR_CHECK((mMp4FileDataPtr->state != M4MP4W_closed), M4ERR_STATE);
+            break;
+
+        case M4MP4W_setFtypBox:
+            /* this param can only be set before starting any write */
+            ERR_CHECK(mMp4FileDataPtr->state == M4MP4W_opened, M4ERR_STATE);
+            break;
+
+        default:
+            /* in general params can be set at open or ready stage */
+            ERR_CHECK(( mMp4FileDataPtr->state == M4MP4W_opened)
+                || (mMp4FileDataPtr->state == M4MP4W_ready), M4ERR_STATE);
+    }
+
+    /* Set option */
+    switch( option )
+    {
+        case (M4MP4W_maxAUperChunk):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_maxChunkSize):
+
+            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+            switch( streamIDvaluePtr->streamID )
+            {
+                case (AudioStreamID):
+                    if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+                        return
+                        M4ERR_BAD_STREAM_ID; /*maybe the stream has not been added yet*/
+                    else
+                    {
+                        mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
+                            streamIDvaluePtr->value;
+                    }
+
+                    break;
+
+                case (VideoStreamID):
+                    if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+                        return
+                        M4ERR_BAD_STREAM_ID; /*maybe the stream has not been added yet*/
+                    else
+                    {
+                        mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
+                            streamIDvaluePtr->value;
+                    }
+                    break;
+
+                case (0): /*all streams*/
+
+                    /*In M4MP4W_opened state, no stream is present yet, so only global value
+                    needs to be updated.*/
+                    mMp4FileDataPtr->MaxChunkSize = streamIDvaluePtr->value;
+
+                    if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
+                    {
+                        mMp4FileDataPtr->audioTrackPtr->MaxChunkSize =
+                            streamIDvaluePtr->value;
+                    }
+
+                    if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
+                    {
+                        mMp4FileDataPtr->videoTrackPtr->MaxChunkSize =
+                            streamIDvaluePtr->value;
+                    }
+                    break;
+
+                default:
+                    return M4ERR_BAD_STREAM_ID;
+            }
+            break;
+
+        case (M4MP4W_maxChunkInter):
+
+            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+            switch( streamIDvaluePtr->streamID )
+            {
+                case (0):                                       /*all streams*/
+                    mMp4FileDataPtr->InterleaveDur =
+                        (M4MP4W_Time32)streamIDvaluePtr->
+                        value; /*time conversion!*/
+                    break;
+
+                default:
+                    return M4ERR_BAD_STREAM_ID;
+                    /*not meaningfull to set this parameter on a streamID basis*/
+            }
+            break;
+
+        case (M4MP4W_maxFileSize):
+            mMp4FileDataPtr->MaxFileSize = *(M4OSA_UInt32 *)value;
+            break;
+
+        case (M4MP4W_embeddedString):
+            memAddrPtr = (M4MP4W_memAddr *)value;
+            /*
+            * If memAddrPtr->size > 16 bytes, then the string will be truncated.
+            * If memAddrPtr->size < 16 bytes, then return M4ERR_PARAMETER
+            */
+            ERR_CHECK(memAddrPtr->size >= 16, M4ERR_PARAMETER);
+
+            if (mMp4FileDataPtr->embeddedString == M4OSA_NULL)
+            {
+                mMp4FileDataPtr->embeddedString =
+                    (M4OSA_UChar *)M4OSA_malloc(16, M4MP4_WRITER,
+                    (M4OSA_Char *)"embeddedString");
+                ERR_CHECK(mMp4FileDataPtr->embeddedString != M4OSA_NULL,
+                    M4ERR_ALLOC);
+            }
+            /*else, just overwrite the previously set string*/
+            M4OSA_memcpy((M4OSA_MemAddr8)mMp4FileDataPtr->embeddedString,
+                (M4OSA_MemAddr8)memAddrPtr->addr, 16);
+            break;
+
+        case (M4MP4W_integrationTag):
+            memAddrPtr = (M4MP4W_memAddr *)value;
+            /*
+            * If memAddrPtr->size > 60 bytes, then the string will be truncated.
+            * If memAddrPtr->size < 60 bytes, then pad with 0
+            */
+            if (mMp4FileDataPtr->integrationTag == M4OSA_NULL)
+            {
+                mMp4FileDataPtr->integrationTag =
+                    (M4OSA_UChar *)M4OSA_malloc(60, M4MP4_WRITER,
+                    (M4OSA_Char *)"integrationTag");
+                ERR_CHECK(mMp4FileDataPtr->integrationTag != M4OSA_NULL,
+                    M4ERR_ALLOC);
+            }
+            /*else, just overwrite the previously set string*/
+            if (memAddrPtr->size < 60)
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)mMp4FileDataPtr->integrationTag,
+                    (M4OSA_MemAddr8)BlockSignatureSkipDefaultIntegrationTag,
+                    60);
+                M4OSA_memcpy((M4OSA_MemAddr8)mMp4FileDataPtr->integrationTag,
+                    (M4OSA_MemAddr8)memAddrPtr->addr, memAddrPtr->size);
+            }
+            else
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)mMp4FileDataPtr->integrationTag,
+                    (M4OSA_MemAddr8)memAddrPtr->addr, 60);
+            }
+            break;
+
+        case (M4MP4W_CamcoderVersion):
+
+            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+            switch( streamIDvaluePtr->streamID )
+            {
+                case (0): /*all streams*/
+                    mMp4FileDataPtr->camcoderVersion = streamIDvaluePtr->value;
+                    break;
+
+                default:
+                    return M4ERR_BAD_STREAM_ID;
+                    /*not meaningfull to set this parameter on a streamID basis*/
+            }
+            break;
+
+        case (M4MP4W_preWriteCallBack):
+            return M4ERR_NOT_IMPLEMENTED;
+            /*mMp4FileDataPtr->PreWriteCallBack = *(M4MP4W_WriteCallBack*)value;
+            break;*/
+
+        case (M4MP4W_postWriteCallBack):
+            return M4ERR_NOT_IMPLEMENTED;
+            /*mMp4FileDataPtr->PostWriteCallBack = *(M4MP4W_WriteCallBack*)value;
+            break;*/
+
+        case (M4MP4W_maxAUsize):
+
+            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+            switch( streamIDvaluePtr->streamID )
+            {
+                case (AudioStreamID):
+
+                    /*if (mMp4FileDataPtr->audioTrackPtr == M4OSA_NULL)*/
+                    if (mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+                        return M4ERR_BAD_STREAM_ID;
+                    else
+                        mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
+                        streamIDvaluePtr->value;
+                    break;
+
+                case (VideoStreamID):
+
+                    /*if (mMp4FileDataPtr->videoTrackPtr == M4OSA_NULL)*/
+                    if (mMp4FileDataPtr->hasVideo == M4OSA_FALSE)
+                        return M4ERR_BAD_STREAM_ID;
+                    else
+                        mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
+                        streamIDvaluePtr->value;
+                    break;
+
+                case (0): /*all streams*/
+
+                    mMp4FileDataPtr->MaxAUSize = streamIDvaluePtr->value;
+
+                    if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
+                        mMp4FileDataPtr->audioTrackPtr->MaxAUSize =
+                        streamIDvaluePtr->value;
+
+                    if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
+                        mMp4FileDataPtr->videoTrackPtr->MaxAUSize =
+                        streamIDvaluePtr->value;
+
+                    break;
+
+                default:
+                    return M4ERR_BAD_STREAM_ID;
+            }
+            break;
+
+        case (M4MP4W_IOD):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_ESD):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_SDP):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_trackSize):
+
+            streamIDsizePtr = (M4MP4W_StreamIDsize *)value;
+
+            if ((streamIDsizePtr->streamID != VideoStreamID)
+                || (mMp4FileDataPtr->hasVideo == M4OSA_FALSE))
+                return M4ERR_BAD_STREAM_ID;
+            else
+            {
+                mMp4FileDataPtr->videoTrackPtr->width = streamIDsizePtr->width;
+                mMp4FileDataPtr->videoTrackPtr->height =
+                    streamIDsizePtr->height;
+            }
+            break;
+
+        case (M4MP4W_estimateAudioSize):
+
+            streamIDvaluePtr = (M4SYS_StreamIDValue *)value;
+
+            /*shall not set this option before audio and video streams were added*/
+            /*nonsense to set this option if not in case audio+video*/
+            if ((mMp4FileDataPtr->hasAudio == M4OSA_FALSE)
+                || (mMp4FileDataPtr->hasVideo == M4OSA_FALSE))
+                return M4ERR_STATE;
+
+            mMp4FileDataPtr->estimateAudioSize =
+                (M4OSA_Bool)streamIDvaluePtr->value;
+            break;
+
+        case (M4MP4W_MOOVfirst):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_V2_MOOF):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_V2_tblCompres):
+            return M4ERR_NOT_IMPLEMENTED;
+
+        case (M4MP4W_maxFileDuration):
+            mMp4FileDataPtr->MaxFileDuration = *(M4OSA_UInt32 *)value;
+            break;
+
+        case (M4MP4W_setFtypBox):
+            {
+                M4OSA_UInt32 size;
+
+                ERR_CHECK(( (M4MP4C_FtypBox *)value)->major_brand != 0,
+                    M4ERR_PARAMETER);
+
+                /* Copy structure */
+                mMp4FileDataPtr->ftyp = *(M4MP4C_FtypBox *)value;
+
+                /* Update global position variables with the difference between common and
+                 user block */
+                size =
+                    mMp4FileDataPtr->ftyp.nbCompatibleBrands * sizeof(M4OSA_UInt32);
+
+                mMp4FileDataPtr->absoluteCurrentPos = 8/*mdat*/ + 16 + size;
+                mMp4FileDataPtr->filesize = 218/*mdat+moov+skip*/ + 16 + size;
+            }
+            break;
+
+        case (M4MP4W_DSI):
+            {
+                streamIDmemAddrPtr = (M4SYS_StreamIDmemAddr *)value;
+
+                /* Nested switch! Whee! */
+                switch( streamIDmemAddrPtr->streamID )
+                {
+                    case (AudioStreamID):
+                        return M4ERR_NOT_IMPLEMENTED;
+
+                    case (VideoStreamID):
+
+                        /* Protect DSI setting : only once allowed on a given stream */
+
+                        switch( mMp4FileDataPtr->videoTrackPtr->
+                            CommonData.trackType )
+                        {
+                            case M4SYS_kH263:
+                                if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
+                                    || (M4OSA_NULL
+                                    != mMp4FileDataPtr->videoTrackPtr->DSI))
+                                {
+                                    M4OSA_TRACE1_0(
+                                        "M4MP4W_setOption: dsi already set !");
+                                    return M4ERR_STATE;
+                                }
+
+                                if ((0 == streamIDmemAddrPtr->size)
+                                    || (M4OSA_NULL == streamIDmemAddrPtr->addr))
+                                {
+                                    M4OSA_TRACE1_0(
+                                        "M4MP4W_setOption: Bad H263 dsi!");
+                                    return M4ERR_PARAMETER;
+                                }
+
+                                /*decoder specific info size is supposed to be always 7
+                                 bytes long */
+                                ERR_CHECK(streamIDmemAddrPtr->size == 7,
+                                    M4ERR_PARAMETER);
+                                mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                                    (M4OSA_UInt8)streamIDmemAddrPtr->size;
+                                mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
+                                    *)M4OSA_malloc(streamIDmemAddrPtr->size,
+                                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+                                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+                                    != M4OSA_NULL, M4ERR_ALLOC);
+                                M4OSA_memcpy(
+                                    (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->
+                                    DSI,
+                                    (M4OSA_MemAddr8)streamIDmemAddrPtr->addr,
+                                    streamIDmemAddrPtr->size);
+
+                                break;
+
+                            case M4SYS_kMPEG_4:
+                                if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
+                                    || (M4OSA_NULL
+                                    != mMp4FileDataPtr->videoTrackPtr->DSI))
+                                {
+                                    M4OSA_TRACE1_0(
+                                        "M4MP4W_setOption: dsi already set !");
+                                    return M4ERR_STATE;
+                                }
+
+                                if ((0 == streamIDmemAddrPtr->size)
+                                    || (M4OSA_NULL == streamIDmemAddrPtr->addr))
+                                {
+                                    M4OSA_TRACE1_0(
+                                        "M4MP4W_setOption: Bad MPEG4 dsi!");
+                                    return M4ERR_PARAMETER;
+                                }
+
+                                /*MP4V specific*/
+                                ERR_CHECK(streamIDmemAddrPtr->size < 105,
+                                    M4ERR_PARAMETER);
+                                mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                                    (M4OSA_UInt8)streamIDmemAddrPtr->size;
+                                mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
+                                    *)M4OSA_malloc(streamIDmemAddrPtr->size,
+                                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+                                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+                                    != M4OSA_NULL, M4ERR_ALLOC);
+                                M4OSA_memcpy(
+                                    (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->
+                                    DSI,
+                                    (M4OSA_MemAddr8)streamIDmemAddrPtr->addr,
+                                    streamIDmemAddrPtr->size);
+                                mMp4FileDataPtr->filesize +=
+                                    streamIDmemAddrPtr->size;
+
+                                break;
+
+                            case M4SYS_kH264:
+                                if ((0 != mMp4FileDataPtr->videoTrackPtr->dsiSize)
+                                    || (M4OSA_NULL
+                                    != mMp4FileDataPtr->videoTrackPtr->DSI))
+                                {
+                                    /* + H.264 trimming */
+                                    if (M4OSA_TRUE == mMp4FileDataPtr->bMULPPSSPS)
+                                    {
+                                        M4OSA_free(
+                                            (M4OSA_MemAddr32)mMp4FileDataPtr->videoTrackPtr->DSI);
+
+                                        // Do not strip the DSI
+                                        /* Store the DSI size */
+                                        mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                                            (M4OSA_UInt8)streamIDmemAddrPtr->size;
+                                             M4OSA_TRACE1_1("M4MP4W_setOption: in set option DSI size =%d"\
+                                            ,mMp4FileDataPtr->videoTrackPtr->dsiSize);
+                                        /* Copy the DSI (SPS + PPS) */
+                                        mMp4FileDataPtr->videoTrackPtr->DSI =
+                                            (M4OSA_UChar*)M4OSA_malloc(
+                                            streamIDmemAddrPtr->size, M4MP4_WRITER,
+                                            (M4OSA_Char *)"videoTrackPtr->DSI");
+                                        ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI !=
+                                             M4OSA_NULL, M4ERR_ALLOC);
+                                        M4OSA_memcpy(
+                                            (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->DSI,
+                                            (M4OSA_MemAddr8)streamIDmemAddrPtr->addr,
+                                            streamIDmemAddrPtr->size);
+
+                                        break;
+                                        /* - H.264 trimming */
+                                    }
+                                    else
+                                    {
+                                        M4OSA_TRACE1_0(
+                                            "M4MP4W_setOption: dsi already set !");
+                                        return M4ERR_STATE;
+                                    }
+                                }
+
+                                if (( 0 == streamIDmemAddrPtr->size)
+                                    || (M4OSA_NULL == streamIDmemAddrPtr->addr))
+                                {
+                                    M4OSA_TRACE1_0(
+                                        "M4MP4W_setOption: Bad H264 dsi!");
+                                    return M4ERR_PARAMETER;
+                                }
+
+                                /* Store the DSI size */
+                                mMp4FileDataPtr->videoTrackPtr->dsiSize =
+                                    (M4OSA_UInt8)streamIDmemAddrPtr->size;
+
+                                /* Copy the DSI (SPS + PPS) */
+                                mMp4FileDataPtr->videoTrackPtr->DSI = (M4OSA_UChar
+                                    *)M4OSA_malloc(streamIDmemAddrPtr->size,
+                                    M4MP4_WRITER, (M4OSA_Char *)"videoTrackPtr->DSI");
+                                ERR_CHECK(mMp4FileDataPtr->videoTrackPtr->DSI
+                                    != M4OSA_NULL, M4ERR_ALLOC);
+                                M4OSA_memcpy(
+                                    (M4OSA_MemAddr8)mMp4FileDataPtr->videoTrackPtr->
+                                    DSI,
+                                    (M4OSA_MemAddr8)streamIDmemAddrPtr->addr,
+                                    streamIDmemAddrPtr->size);
+                                break;
+
+                            default:
+                                return M4ERR_BAD_STREAM_ID;
+                        }
+                    break;
+
+                default:
+                    return M4ERR_BAD_STREAM_ID;
+                }
+            }
+            break;
+            /* H.264 Trimming  */
+        case M4MP4W_MUL_PPS_SPS:
+            mMp4FileDataPtr->bMULPPSSPS = *(M4OSA_Int8 *)value;
+            /* H.264 Trimming  */
+            break;
+
+        default:
+            return M4ERR_BAD_OPTION_ID;
+    }
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getState( M4OSA_Context context, M4MP4W_State *state,
+                          M4SYS_StreamID streamID )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    switch( streamID )
+    {
+        case (0):
+            *state = mMp4FileDataPtr->state;
+            break;
+
+        case (AudioStreamID):
+            if (mMp4FileDataPtr->hasAudio == M4OSA_TRUE)
+            {
+                *state = mMp4FileDataPtr->audioTrackPtr->microState;
+            }
+            else
+            {
+                return M4ERR_BAD_STREAM_ID;
+            }
+            break;
+
+        case (VideoStreamID):
+            if (mMp4FileDataPtr->hasVideo == M4OSA_TRUE)
+            {
+                *state = mMp4FileDataPtr->videoTrackPtr->microState;
+            }
+            else
+            {
+                return M4ERR_BAD_STREAM_ID;
+            }
+            break;
+
+        default:
+            return M4ERR_BAD_STREAM_ID;
+    }
+
+    return err;
+}
+
+/*******************************************************************************/
+M4OSA_ERR M4MP4W_getCurrentFileSize( M4OSA_Context context,
+                                    M4OSA_UInt32 *pCurrentFileSize )
+/*******************************************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4MP4W_Mp4FileData *mMp4FileDataPtr = (M4MP4W_Mp4FileData *)context;
+    ERR_CHECK(context != M4OSA_NULL, M4ERR_PARAMETER);
+
+    ERR_CHECK(pCurrentFileSize != M4OSA_NULL, M4ERR_PARAMETER);
+    *pCurrentFileSize = mMp4FileDataPtr->filesize;
+
+    return err;
+}
+
+#endif /* _M4MP4W_USE_CST_MEMORY_WRITER */
diff --git a/libvideoeditor/vss/Android.mk b/libvideoeditor/vss/Android.mk
new file mode 100755
index 0000000..1d4ec7f
--- /dev/null
+++ b/libvideoeditor/vss/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
\ No newline at end of file
diff --git a/libvideoeditor/vss/common/inc/From2iToMono_16.h b/libvideoeditor/vss/common/inc/From2iToMono_16.h
new file mode 100755
index 0000000..433bb78
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/From2iToMono_16.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FROM2ITOMONO_16_H_
+#define _FROM2ITOMONO_16_H_
+
+
+void From2iToMono_16(  const short *src,
+                             short *dst,
+                             short n);
+
+/**********************************************************************************/
+
+#endif  /* _FROM2ITOMONO_16_H_ */
+
+/**********************************************************************************/
+
diff --git a/libvideoeditor/vss/common/inc/LVM_Types.h b/libvideoeditor/vss/common/inc/LVM_Types.h
new file mode 100755
index 0000000..a9eecef
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/LVM_Types.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/****************************************************************************************
+ * @file name:          LVM_Types.h
+
+*****************************************************************************************/
+
+/****************************************************************************************/
+/*                                                                                      */
+/*  Header file defining the standard LifeVibes types for use in the application layer  */
+/*  interface of all LifeVibes modules                                                  */
+/*                                                                                      */
+/****************************************************************************************/
+
+#ifndef LVM_TYPES_H
+#define LVM_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/*  definitions                                                                         */
+/*                                                                                      */
+/****************************************************************************************/
+
+#define LVM_NULL                0                   /* NULL pointer */
+
+#define LVM_TRUE                1                   /* Booleans */
+#define LVM_FALSE               0
+
+#define LVM_MAXINT_8            127                 /* Maximum positive integer size */
+#define LVM_MAXINT_16           32767
+#define LVM_MAXINT_32           2147483647
+#define LVM_MAXENUM             2147483647
+
+#define LVM_MODULEID_MASK       0xFF00              /* Mask to extract the calling module ID
+                                                        from callbackId */
+#define LVM_EVENTID_MASK        0x00FF              /* Mask to extract the callback event from
+                                                         callbackId */
+
+/* Memory table*/
+#define LVM_MEMREGION_PERSISTENT_SLOW_DATA      0   /* Offset to the instance memory region */
+#define LVM_MEMREGION_PERSISTENT_FAST_DATA      1   /* Offset to the persistent data memory
+                                                        region */
+#define LVM_MEMREGION_PERSISTENT_FAST_COEF      2   /* Offset to the persistent coefficient
+                                                        memory region */
+#define LVM_MEMREGION_TEMPORARY_FAST            3   /* Offset to temporary memory region */
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/*  Basic types                                                                         */
+/*                                                                                      */
+/****************************************************************************************/
+
+typedef     char                LVM_CHAR;           /* ASCII character */
+
+typedef     char                LVM_INT8;           /* Signed 8-bit word */
+typedef     unsigned char       LVM_UINT8;          /* Unsigned 8-bit word */
+
+typedef     short               LVM_INT16;          /* Signed 16-bit word */
+typedef     unsigned short      LVM_UINT16;         /* Unsigned 16-bit word */
+
+typedef     long                LVM_INT32;          /* Signed 32-bit word */
+typedef     unsigned long       LVM_UINT32;         /* Unsigned 32-bit word */
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/*  Standard Enumerated types                                                           */
+/*                                                                                      */
+/****************************************************************************************/
+
+/* Operating mode */
+typedef enum
+{
+    LVM_MODE_OFF    = 0,
+    LVM_MODE_ON     = 1,
+    LVM_MODE_DUMMY  = LVM_MAXENUM
+} LVM_Mode_en;
+
+
+/* Format */
+typedef enum
+{
+    LVM_STEREO          = 0,
+    LVM_MONOINSTEREO    = 1,
+    LVM_MONO            = 2,
+    LVM_SOURCE_DUMMY    = LVM_MAXENUM
+} LVM_Format_en;
+
+
+/* Word length */
+typedef enum
+{
+    LVM_16_BIT      = 0,
+    LVM_32_BIT      = 1,
+    LVM_WORDLENGTH_DUMMY = LVM_MAXENUM
+} LVM_WordLength_en;
+
+
+/* LVM sampling rates */
+typedef enum
+{
+    LVM_FS_8000  = 0,
+    LVM_FS_11025 = 1,
+    LVM_FS_12000 = 2,
+    LVM_FS_16000 = 3,
+    LVM_FS_22050 = 4,
+    LVM_FS_24000 = 5,
+    LVM_FS_32000 = 6,
+    LVM_FS_44100 = 7,
+    LVM_FS_48000 = 8,
+    LVM_FS_INVALID = LVM_MAXENUM-1,
+    LVM_FS_DUMMY = LVM_MAXENUM
+} LVM_Fs_en;
+
+
+/* Memory Types */
+typedef enum
+{
+    LVM_PERSISTENT_SLOW_DATA    = LVM_MEMREGION_PERSISTENT_SLOW_DATA,
+    LVM_PERSISTENT_FAST_DATA    = LVM_MEMREGION_PERSISTENT_FAST_DATA,
+    LVM_PERSISTENT_FAST_COEF    = LVM_MEMREGION_PERSISTENT_FAST_COEF,
+    LVM_TEMPORARY_FAST          = LVM_MEMREGION_TEMPORARY_FAST,
+    LVM_MEMORYTYPE_DUMMY        = LVM_MAXENUM
+} LVM_MemoryTypes_en;
+
+
+/* Memory region definition */
+typedef struct
+{
+    LVM_UINT32                  Size;                   /* Region size in bytes */
+    LVM_MemoryTypes_en          Type;                   /* Region type */
+    void                        *pBaseAddress;          /* Pointer to the region base address */
+} LVM_MemoryRegion_st;
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/*  Standard Function Prototypes                                                        */
+/*                                                                                      */
+/****************************************************************************************/
+typedef LVM_INT32 (*LVM_Callback)(void          *pCallbackData,     /* Pointer to the callback
+                                                                     data structure */
+                                  void          *pGeneralPurpose,   /* General purpose pointer
+                                                                    (e.g. to a data structure
+                                                                    needed in the callback) */
+                                  LVM_INT16     GeneralPurpose );   /* General purpose variable
+                                  (e.g. to be used as callback ID) */
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/*  End of file                                                                         */
+/*                                                                                      */
+/****************************************************************************************/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif  /* LVM_TYPES_H */
diff --git a/libvideoeditor/vss/common/inc/M4AD_Common.h b/libvideoeditor/vss/common/inc/M4AD_Common.h
new file mode 100755
index 0000000..12314f3
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AD_Common.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @fil        M4AD_Common.h
+ * @brief    Audio Shell Decoder common interface declaration
+ * @note    This file declares the common interfaces that audio decoder shells must implement
+ ************************************************************************
+*/
+#ifndef __M4AD_COMMON_H__
+#define __M4AD_COMMON_H__
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_OptionID.h"
+#include "M4OSA_CoreID.h"
+#include "M4DA_Types.h"
+#include "M4TOOL_VersionInfo.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+typedef M4OSA_Void* M4AD_Context;
+
+/**
+ ************************************************************************
+ * enum     M4AD_OptionID
+ * @brief    This enum defines the Audio decoder options.
+ * @note    These options can be read from or written to a decoder via
+ *            M4AD_getOption_fct/M4AD_setOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+    /**
+     * Set the flag of presence of protection */
+    M4AD_kOptionID_ProtectionAbsent = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x01),
+
+    /**
+     * Set the number of frames per bloc */
+    M4AD_kOptionID_NbFramePerBloc    = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x02),
+
+    /**
+     * Set the AAC decoder user parameters */
+    M4AD_kOptionID_UserParam        = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4DECODER_AUDIO, 0x03),
+
+
+    /**
+     * Get the AAC steam type */
+    M4AD_kOptionID_StreamType        = M4OSA_OPTION_ID_CREATE(M4_READ , M4DECODER_AUDIO, 0x10),
+
+    /**
+     * Get the number of used bytes in the latest decode
+     (used only when decoding AAC from ADIF file) */
+    M4AD_kOptionID_UsedBytes        = M4OSA_OPTION_ID_CREATE(M4_READ , M4DECODER_AUDIO, 0x11)
+
+} M4AD_OptionID;
+
+
+
+typedef enum
+{
+    M4_kUnknown = 0,    /* Unknown stream type */
+    M4_kAAC,            /* M4_kAAC_MAIN or M4_kAAC_LC or M4_kAAC_SSR or M4_kAAC_LTP    */
+    M4_kAACplus,        /* Decoder type is AAC plus */
+    M4_keAACplus        /* Decoder type is enhanced AAC plus */
+} M4_AACType;
+
+/**
+ ************************************************************************
+ * enum     M4AD_Type
+ * @brief    This enum defines the audio types used to create decoders
+ * @note    This enum is used internally by the VPS to identify a currently supported
+ *            audio decoder interface. Each decoder is registered with one of this type associated.
+ *            When a decoder instance is needed, this type is used to identify
+ *            and retrieve its interface.
+ ************************************************************************
+*/
+typedef enum
+{
+    M4AD_kTypeAMRNB = 0,
+    M4AD_kTypeAMRWB,
+    M4AD_kTypeAAC,
+    M4AD_kTypeMP3,
+    M4AD_kTypePCM,
+    M4AD_kTypeBBMusicEngine,
+    M4AD_kTypeWMA,
+    M4AD_kTypeRMA,
+    M4AD_kTypeADPCM,
+    M4AD_kType_NB  /* number of decoders, keep it as last enum entry */
+
+} M4AD_Type ;
+
+
+
+/**
+ ************************************************************************
+ * structure    M4AD_Buffer
+ * @brief        Structure to describe a buffer
+ ************************************************************************
+*/
+typedef struct
+{
+    M4OSA_MemAddr8    m_dataAddress;
+    M4OSA_UInt32    m_bufferSize;
+} M4AD_Buffer;
+
+/**
+ ************************************************************************
+ * @brief    Creates an instance of the decoder
+ * @note    Allocates the context
+ *
+ * @param    pContext:        (OUT)    Context of the decoder
+ * @param    pStreamHandler:    (IN)    Pointer to an audio stream description
+ * @param    pUserData:        (IN)    Pointer to User data
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return    M4ERR_ALLOC                a memory allocation has failed
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+
+typedef M4OSA_ERR  (M4AD_create_fct)(M4AD_Context *pContext,
+                                     M4_AudioStreamHandler *pStreamHandler, void* pUserData);
+
+
+/**
+ ************************************************************************
+ * @brief    Destroys the instance of the decoder
+ * @note    After this call the context is invalid
+ *
+ * @param    context:    (IN)    Context of the decoder
+ *
+ * @return    M4NO_ERROR             There is no error
+ * @return  M4ERR_PARAMETER     The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4AD_destroy_fct)    (M4AD_Context context);
+
+/**
+ ************************************************************************
+ * @brief   Decodes the given audio data
+ * @note    Parses and decodes the next audio frame, from the given buffer.
+ *            This function changes pInputBufferSize value according to the amount
+ *            of data actually read.
+ *
+ * @param    context:            (IN)    Context of the decoder
+ * @param    inputBuffer:        (IN/OUT)Input Data buffer. It contains at least one audio frame.
+ *                                       The size of the buffer must be updated inside the
+ *                                       function to reflect the size of the actually decoded data.
+ *                                       (e.g. the first frame in pInputBuffer)
+ * @param   decodedPCMBuffer:    (OUT)   Output PCM buffer (decoded data).
+ * @param   jumping:            (IN)    M4OSA_TRUE if a jump was just done, M4OSA_FALSE otherwise.
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4AD_step_fct)    (M4AD_Context context, M4AD_Buffer *pInputBuffer,
+                                     M4AD_Buffer *pDecodedPCMBuffer, M4OSA_Bool jumping);
+
+/**
+ ************************************************************************
+ * @brief    Gets the decoder version
+ * @note    The version is given in a M4_VersionInfo structure
+ *
+ * @param    pValue:        (OUT)        Pointer to the version structure
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return  M4ERR_PARAMETER         The given pointer is null (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4AD_getVersion_fct)(M4_VersionInfo* pVersionInfo);
+
+
+/**
+ ************************************************************************
+ * @brief    This function creates the AAC core decoder according to
+ *            the stream properties and to the options that may
+ *            have been set using M4AD_setOption_fct
+ * @note    Creates an instance of the AAC decoder
+ * @note    This function is used especially by the AAC decoder
+ *
+ * @param    pContext:        (IN/OUT)    Context of the decoder
+ * @param    pStreamHandler:    (IN)    Pointer to an audio stream description
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return    M4ERR_ALLOC                a memory allocation has failed
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4AD_start_fct)    (M4AD_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief    Reset the instance of the decoder
+ *
+ * @param    context:    (IN)    Context of the decoder
+ *
+ * @return    M4NO_ERROR             There is no error
+ * @return  M4ERR_PARAMETER     The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4AD_reset_fct)    (M4AD_Context context);
+
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the audio decoder
+ *
+ * @param    context:        (IN)    Context of the decoder
+ * @param    optionId:        (IN)    indicates the option to set
+ * @param    pValue:            (IN)    pointer to structure or value (allocated by user)
+ *                                  where option is stored
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_setOption_fct) (M4AD_Context context,
+                                         M4OSA_OptionID optionId, M4OSA_DataOption pValue);
+
+/**
+ ************************************************************************
+ * @brief   Get en option value of the audio decoder
+ *
+ * @param    context:        (IN)    Context of the decoder
+ * @param    optionId:        (IN)    indicates the option to set
+ * @param    pValue:            (OUT)    pointer to structure or value (allocated by user)
+ *                                  where option is stored
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4AD_getOption_fct) (M4AD_Context context, M4OSA_OptionID optionId,
+                                         M4OSA_DataOption pValue);
+/**
+ ************************************************************************
+ * structure    M4AD_Interface
+ * @brief        This structure defines the generic audio decoder interface
+ * @note        This structure stores the pointers to functions of one audio decoder type.
+ *                The decoder type is one of the M4AD_Type
+ ************************************************************************
+*/
+typedef struct _M4AD_Interface
+{
+
+    M4AD_create_fct*        m_pFctCreateAudioDec;
+    M4AD_start_fct*            m_pFctStartAudioDec;
+    M4AD_step_fct*            m_pFctStepAudioDec;
+    M4AD_getVersion_fct*    m_pFctGetVersionAudioDec;
+    M4AD_destroy_fct*        m_pFctDestroyAudioDec;
+    M4AD_reset_fct*            m_pFctResetAudioDec;
+    M4AD_setOption_fct*        m_pFctSetOptionAudioDec;
+    M4AD_getOption_fct*        m_pFctGetOptionAudioDec;
+
+} M4AD_Interface;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*__M4AD_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4AD_Null.h b/libvideoeditor/vss/common/inc/M4AD_Null.h
new file mode 100755
index 0000000..d00d6d7
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AD_Null.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+*************************************************************************
+ * @file    M4AD_Null.h
+ * @brief    Implementation of the decoder public interface that do nothing
+ * @note    This file defines the getInterface function.
+*************************************************************************
+*/
+#ifndef __M4AD_NULL_H__
+#define __M4AD_NULL_H__
+
+#include "M4AD_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ ************************************************************************
+ * @brief Retrieves the interface implemented by the decoder
+ * @param pDecoderType        : pointer on an M4AD_Type (allocated by the caller)
+ *                              that will be filled with the decoder type supported by this decoder
+ * @param pDecoderInterface   : address of a pointer that will be set to the interface implemented
+ *                              by this decoder. The interface is a structure allocated by the
+ *                              function and must be un-allocated by the caller.
+ *
+ * @return : M4NO_ERROR  if OK
+ *           M4ERR_ALLOC if allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_getInterface( M4AD_Type *pDecoderType, M4AD_Interface **pDecoderInterface);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4AD_NULL_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4AIR_API.h b/libvideoeditor/vss/common/inc/M4AIR_API.h
new file mode 100755
index 0000000..954b77a
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AIR_API.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file   M4AIR_API.h
+ * @brief  Area of Interest Resizer  API
+ * @note
+*************************************************************************
+*/
+#ifndef M4AIR_API_H
+#define M4AIR_API_H
+
+/******************************* INCLUDES *******************************/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Mutex.h"
+#include "M4OSA_Memory.h"
+#include "M4VIFI_FiltersAPI.h"
+#include "M4Common_types.h"
+
+/************************ M4AIR TYPES DEFINITIONS ***********************/
+
+/**
+ ******************************************************************************
+ * enum        M4AIR_InputFormatType
+ * @brief     The following enumeration lists the different accepted format for the AIR.
+ * To be available, the associated compilation flag must be defined, else,
+ * the AIR will return an error (compilation flag : M4AIR_XXXXXX_FORMAT_SUPPORTED).
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4AIR_kYUV420P,
+    M4AIR_kYUV420AP,
+    M4AIR_kJPG
+}M4AIR_InputFormatType ;
+
+
+/**
+ ******************************************************************************
+ * struct         M4AIR_Coordinates
+ * @brief     The following structure is used to retrieve X and Y coordinates in a given picture.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32    m_x;    /**< X coordinate */
+    M4OSA_UInt32    m_y;    /**< Y coordinate */
+}M4AIR_Coordinates;
+
+
+/**
+ ******************************************************************************
+ * struct         M4AIR_Size
+ * @brief     The following structure is used to retrieve the dimension of a given picture area.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32    m_width;    /**< Width */
+    M4OSA_UInt32    m_height;    /**< Height */
+}M4AIR_Size;
+
+
+/**
+ ******************************************************************************
+ * struct         M4AIR_Params
+ * @brief     The following structure is used to retrieve the parameters needed to get a resized ROI (Region of interest).
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4AIR_Coordinates        m_inputCoord;            /**< X and Y positionning in the input of the first interesting pixel (top-left) */
+    M4AIR_Size                m_inputSize;            /**< Size of the interesting area inside input (width and height)*/
+    M4AIR_Size                m_outputSize;            /**< Size of the output */
+    M4OSA_Bool                m_bOutputStripe;            /**< Flag to know if we will have to provide output per stripe or not */
+    M4COMMON_Orientation        m_outputOrientation;    /**< Desired orientation of the AIR output */
+}M4AIR_Params;
+
+
+
+
+/*********************** M4AIR ERRORS DEFINITIONS **********************/
+
+/* This error means that the requested video format is not supported. */
+#define M4ERR_AIR_FORMAT_NOT_SUPPORTED    M4OSA_ERR_CREATE(M4_ERR,M4AIR,0x000001)
+
+/* This error means that the input or output size is incorrect */
+#define M4ERR_AIR_ILLEGAL_FRAME_SIZE    M4OSA_ERR_CREATE(M4_ERR,M4AIR,0x000002)
+
+
+
+/********************** M4AIR PUBLIC API DEFINITIONS ********************/
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat);
+ * @brief        This function initialize an instance of the AIR.
+ * @param    pContext:    (IN/OUT) Address of the context to create
+ * @param    inputFormat:    (IN) input format type.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
+ * @return    M4ERR_ALLOC: No more memory is available
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+ * @brief        This function destroys an instance of the AIR component
+ * @param    pContext:    (IN) Context identifying the instance to destroy
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return    M4ERR_STATE: Internal state is incompatible with this function call.
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+ * @brief    This function will configure the AIR.
+ * @note    It will set the input and output coordinates and sizes,
+ *            and indicates if we will proceed in stripe or not.
+ *            In case a M4AIR_get in stripe mode was on going, it will cancel this previous
+ *            processing and reset the get process.
+ * @param    pContext:                (IN) Context identifying the instance
+ * @param    pParams->m_bOutputStripe:(IN) Stripe mode.
+ * @param    pParams->m_inputCoord:    (IN) X,Y coordinates of the first valid pixel in input.
+ * @param    pParams->m_inputSize:    (IN) input ROI size.
+ * @param    pParams->m_outputSize:    (IN) output size.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return    M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+ * @brief    This function will provide the requested resized area of interest according to
+ *            settings provided in M4AIR_configure.
+ * @note    In case the input format type is JPEG, input plane(s)
+ *            in pIn is not used. In normal mode, dimension specified in output plane(s) structure
+ *            must be the same than the one specified in M4AIR_configure. In stripe mode, only
+ *            the width will be the same, height will be taken as the stripe height (typically 16).
+ *            In normal mode, this function is call once to get the full output picture. In stripe
+ *            mode, it is called for each stripe till the whole picture has been retrieved,and
+ *            the position of the output stripe in the output picture is internally incremented
+ *            at each step.
+ *            Any call to M4AIR_configure during stripe process will reset this one to the
+ *              beginning of the output picture.
+ * @param    pContext:    (IN) Context identifying the instance
+ * @param    pIn:            (IN) Plane structure containing input Plane(s).
+ * @param    pOut:        (IN/OUT)  Plane structure containing output Plane(s).
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ ******************************************************************************
+*/
+M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut);
+
+
+
+#endif /* M4AIR_API_H */
diff --git a/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h b/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h
new file mode 100755
index 0000000..8f0ca62
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4AMRR_CoreReader.h
@@ -0,0 +1,252 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file        M4AMRR_CoreReader.h
+ * @brief        Implementation of AMR parser
+ * @note        This file contains the API def. for AMR Parser.
+ ******************************************************************************
+*/
+#ifndef __M4AMR_COREREADER_H__
+#define __M4AMR_COREREADER_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "M4OSA_Types.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_Stream.h"
+#include "M4SYS_AccessUnit.h"
+#include "M4OSA_Time.h"
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ ******************************************************************************
+ * AMR reader Errors & Warnings definition
+ ******************************************************************************
+*/
+#define M4ERR_AMR_INVALID_FRAME_TYPE    M4OSA_ERR_CREATE(M4_ERR,M4AMR_READER, 0x000001)
+#define M4ERR_AMR_NOT_COMPLIANT    M4OSA_ERR_CREATE(M4_ERR,M4AMR_READER, 0x000002)
+
+/**
+ ******************************************************************************
+ * enumeration    M4AMRR_State
+ * @brief        This enum defines the AMR reader states
+ * @note        These states are used internaly, but can be retrieved from outside the reader.
+ ******************************************************************************
+*/
+typedef enum{
+    M4AMRR_kOpening    = 0x0100,
+    M4AMRR_kOpened    = 0x0101,
+    M4AMRR_kReading = 0x0200,
+    M4AMRR_kReading_nextAU = 0x0201,
+    M4AMRR_kClosed = 0x300
+}M4AMRR_State;
+
+/**
+*******************************************************************************
+* M4OSA_ERR M4AMRR_openRead (M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+*                               M4OSA_FileReaderPointer* pFileFunction);
+* @brief    M4AMRR_OpenRead parses the meta data of the AMR and allocates data structure
+* @note        This function opens the file and creates a context for AMR  Parser.
+*            - sets context to null if error occured.
+* @param    pContext(OUT)        : AMR Reader context allocated in the function
+* @param    pFileDesscriptor(IN): File descriptor of the input file
+* @param    pFileFunction(IN)    : pointer to file function for file access
+*
+* @returns    M4NO_ERROR        : There is no error
+* @returns    M4ERR_PARAMETER    : pContext and/or pFileDescriptor is NULL
+* @returns    M4ERR_ALLOC        : Memory allocation failed
+* @returns    M4ERR_FILE_NOT_FOUND : file cannot be found
+* @returns    M4AMRR_ERR_AMR_NOT_COMPLIANT : Tthe input is not a AMR file
+* @returns    M4OSA_FILE        : See OSAL file Spec. for details.
+*******************************************************************************
+*/
+M4OSA_ERR M4AMRR_openRead (M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+                            M4OSA_FileReadPointer* pFileFunction);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc );
+* @brief    Reads the next available stream in the file
+* @note        Get the stream description of the stream.
+*            - This function assumes that there is only one stream in AMR file.
+* @param    Context(IN/OUT)    : AMR Reader context
+* @param    pStreamDesc(OUT): Description of the next read stream
+*
+* @returns     M4NO_ERROR        : There is no error
+* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
+* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
+* @returns     M4ERR_ALLOC        : Memory allocation failed
+* @returns     M4ERR_STATE        : this function cannot be called in this state.
+* @returns     M4AMRR_WAR_NO_MORE_STREAM : There are no more streams in the file.
+******************************************************************************
+*/
+
+M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc );
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs );
+* @brief    Prepares the AMR reading of the specified stream Ids
+* @note        This function changes the state of the reader reading.
+* @param    Context(IN/OUT)    : AMR Reader context
+* @param    pStreamIDs(IN)    : Array of stream Ids to be prepared.
+*
+* @returns     M4NO_ERROR        : There is no error
+* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
+* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
+* @returns     M4ERR_ALLOC        : Memory allocation failed
+* @returns     M4ERR_STATE        : this function cannot be called in this state.
+* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs );
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+* @brief    Reads the access unit into the providing stream
+* @note        This function allocates the memory to dataAddress filed and copied the data.
+*            -The Application should not free the dataAddress pointer.
+* @param    Context(IN/OUT)    : AMR Reader context
+* @param    StreamID(IN)    : Selects the stream
+* @param    pAu(IN/OUT)        : Access Unit
+*
+* @returns    M4NO_ERROR        : There is no error
+* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
+* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
+* @returns     M4ERR_ALLOC        : Memory allocation failed
+* @returns     M4ERR_STATE        : this function cannot be called in this state.
+* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
+* @returns     M4WAR_NO_DATA_YET    : there    is no enough data on the stream for new access unit
+* @returns     M4WAR_END_OF_STREAM    : There are no more access unit in the stream
+* @returns     M4AMRR_ERR_INVALID_FRAME_TYPE : current frame has no valid frame type.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+* @brief    Notify the ARM Reader that application will no longer use "AU"
+* @note        This function frees the memory pointed by pAu->dataAddress pointer
+*            -Changes the state of the reader back to reading.
+* @param    Context(IN/OUT)    : AMR Reader context
+* @param    StreamID(IN)    : Selects the stream
+* @param    pAu(IN)            : Access Unit
+*
+* @returns     M4NO_ERROR        : There is no error
+* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
+* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
+* @returns     M4ERR_ALLOC        : Memory allocation failed
+* @returns     M4ERR_STATE        : this function cannot be called in this state.
+* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+*                        M4SYS_seekAccessMode    seekMode, M4OSA_Time* pObtainCTS);
+* @brief    The function seeks the targeted time in the give stream by streamId.
+* @note        Each frame is of 20 ms duration,, builds the seek table and points
+*            the file pointer to starting for the required AU.
+* @param    Context(IN/OUT)    : AMR Reader context
+* @param    StreamID(IN)    : Array of stream IDs.
+* @param    time(IN)        : targeted time
+* @param    seekMode(IN)    : Selects the seek mode
+* @param    pObtainCTS(OUT)    : Returned time nearest to target.
+*
+* @returns     M4NO_ERROR        : There is no error
+* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
+* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
+* @returns     M4ERR_ALLOC        : Memory allocation failed
+* @returns     M4ERR_STATE        : this function cannot be called in this state.
+* @returns     M4ERR_BAD_STREAM_ID    : Atleast one of the stream Id. does not exist.
+* @returns     M4WAR_INVALID_TIME    : time cannot be reached.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+                         M4SYS_SeekAccessMode    seekMode, M4OSA_Time* pObtainCTS);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context);
+* @brief    AMR reader closes the file
+* @param    Context(IN?OUT)    : AMR Reader context
+* @returns     M4NO_ERROR        : There is no error
+* @returns     M4ERR_PARAMETER    : atleast one parament is NULL
+* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
+* @returns     M4ERR_ALLOC        : Memory allocation failed
+* @returns     M4ERR_STATE        : this function cannot be called in this state.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context);
+
+/**
+******************************************************************************
+* M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId);
+* @brief    Gets the current state of the AMR reader
+* @param    Context(IN/OUT)    : AMR Reader context
+* @param    pState(OUT)        : Core AMR reader state
+* @param    streamId(IN)    : Selects the stream 0 for all
+*
+* @returns     M4NO_ERROR            :    There is no error
+* @returns     M4ERR_PARAMETER        :    atleast one parament is NULL
+* @returns     M4ERR_BAD_CONTEXT    :    The provided context is not valid
+* @returns     M4ERR_BAD_STREAM_ID    :    Atleast one of the stream Id. does not exist.
+******************************************************************************
+*/
+M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AMRR_getVersion    (M4_VersionInfo *pVersion)
+ * @brief    Gets the current version of the AMR reader
+ * @param    version(OUT)    : the structure that stores the version numbers
+ *
+ * @returns     M4NO_ERROR            :    There is no error
+ * @returns     M4ERR_PARAMETER        :    version is NULL
+ ******************************************************************************
+*/
+M4OSA_ERR M4AMRR_getVersion    (M4_VersionInfo *pVersion);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AMRR_getmaxAUsize    (M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize)
+ * @brief    Computes the maximum access unit size of a stream
+ *
+ * @param    Context        (IN)  Context of the reader
+ * @param    pMaxAuSize    (OUT) Maximum Access Unit size in the stream
+ *
+ * @return    M4NO_ERROR: No error
+ * @return    M4ERR_PARAMETER: One of the input pointer is M4OSA_NULL (Debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4AMRR_getmaxAUsize(M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus*/
+#endif /*__M4AMR_COREREADER_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4CLOCK.h b/libvideoeditor/vss/common/inc/M4CLOCK.h
new file mode 100755
index 0000000..963b135
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4CLOCK.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file   M4CLOCK.h
+ * @brief  Clock and sleep functions types
+ *
+*************************************************************************
+*/
+#ifndef __M4CLOCK_H__
+#define __M4CLOCK_H__
+
+#include "M4OSA_Types.h"
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+/**
+ * Type of a function that returns time.
+ */
+typedef M4OSA_Double    (*M4CLOCK_getTime_fct) ( M4OSA_Void* pContext ) ;
+
+/**
+ * Type of a function that suspends a task for a certain amount of time.
+ */
+typedef M4OSA_Void        (*M4CLOCK_sleep_fct)    ( M4OSA_Void* pContext,\
+                                                     M4OSA_UInt32 durationInMs ) ;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4CLOCK_H__ */
+
diff --git a/libvideoeditor/vss/common/inc/M4Common_types.h b/libvideoeditor/vss/common/inc/M4Common_types.h
new file mode 100755
index 0000000..0ae59ee
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4Common_types.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4Common_Types.h
+ * @brief  defines common structures
+ * @note
+ *
+ ************************************************************************
+*/
+#ifndef M4COMMON_TYPES_H
+#define M4COMMON_TYPES_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+
+/**
+ ************************************************************************
+ * structure M4COMMON_MetadataType
+ ************************************************************************
+*/
+typedef enum
+{
+    M4COMMON_kUnknownMetaDataType,
+    /* Local files */
+    M4COMMON_kTagID3v1,                /**<  Metadata from TAG ID3 V1 */
+    M4COMMON_kTagID3v2,                /**<  Metadata from TAG ID3 V2 */
+    M4COMMON_kASFContentDesc,        /**<  Metadata from ASF content description  */
+
+    M4COMMON_k3GppAssetMovieBox,    /**<  Metadata from a 3gpp file (movie box) */
+    M4COMMON_k3GppAssetTrackBox,    /**<  Metadata from a 3gpp file (track box) */
+
+    /* Streaming */
+    M4COMMON_kMetaDataSdpSession,    /**<  Metadata from an SDP file (Session level) */
+    M4COMMON_kMetaDataSdpAudio,        /**<  Metadata from an SDP file (media audio level) */
+    M4COMMON_kMetaDataSdpVideo,        /**<  Metadata from an SDP file (media video level) */
+
+    M4COMMON_kJpegExif                /**< EXIF in JPEG */
+} M4COMMON_MetadataType;
+
+/**
+ ************************************************************************
+ * enumeration    M4VPS_EncodingFormat
+ * @brief        Text encoding format
+ ************************************************************************
+*/
+typedef enum
+{
+    M4COMMON_kEncFormatUnknown    = 0,      /**< Unknown format                                 */
+    M4COMMON_kEncFormatASCII    = 1,        /**< ISO-8859-1. Terminated with $00                */
+    M4COMMON_kEncFormatUTF8        = 2,     /**< UTF-8 encoded Unicode . Terminated with $00    */
+    M4COMMON_kEncFormatUTF16    = 3         /**< UTF-16 encoded Unicode. Terminated with $00 00 */
+}  M4COMMON_EncodingFormat;
+
+/**
+ ************************************************************************
+ * structure    M4VPS_String
+ * @brief        This structure defines string attribute
+ ************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Void*            m_pString;                /**< Pointer to text        */
+    M4OSA_UInt32        m_uiSize;                /**< Text size in bytes        */
+    M4COMMON_EncodingFormat    m_EncodingFormat;    /**< Text encoding format    */
+
+} M4COMMON_String;
+
+/**
+ ************************************************************************
+ * structure    M4COMMON_Buffer
+ * @brief        This structure defines generic buffer attribute
+ ************************************************************************
+*/
+typedef struct
+{
+    M4OSA_MemAddr8         m_pBuffer;        /**< Pointer to buffer        */
+    M4OSA_UInt32        m_size;            /**< size of buffer in bytes    */
+} M4COMMON_Buffer;
+
+typedef enum
+{
+    M4COMMON_kMimeType_NONE,
+    M4COMMON_kMimeType_JPG,
+    M4COMMON_kMimeType_PNG,
+    M4COMMON_kMimeType_BMP,   /* bitmap, with header */
+    M4COMMON_kMimeType_RGB24, /* raw RGB 24 bits */
+    M4COMMON_kMimeType_RGB565, /* raw, RGB 16 bits */
+    M4COMMON_kMimeType_YUV420,
+    M4COMMON_kMimeType_MPEG4_IFrame /* RC: to support PV art */
+
+} M4COMMON_MimeType;
+
+/* picture type definition from id3v2 tag*/
+typedef enum
+{
+    M4COMMON_kPicType_Other                = 0x00,
+    M4COMMON_kPicType_32_32_Icon            = 0x01,
+    M4COMMON_kPicType_Other_Icon            = 0x02,
+    M4COMMON_kPicType_FrontCover            = 0x03,
+    M4COMMON_kPicType_BackCover            = 0x04,
+    M4COMMON_kPicType_LeafletPage            = 0x05,
+    M4COMMON_kPicType_Media                = 0x06,
+    M4COMMON_kPicType_LeadArtist            = 0x07,
+    M4COMMON_kPicType_Artist                = 0x08,
+    M4COMMON_kPicType_Conductor            = 0x09,
+    M4COMMON_kPicType_Orchestra            = 0x0A,
+    M4COMMON_kPicType_Composer            = 0x0B,
+    M4COMMON_kPicType_Lyricist            = 0x0C,
+    M4COMMON_kPicType_RecordingLocation    = 0x0D,
+    M4COMMON_kPicType_DuringRecording        = 0x0E,
+    M4COMMON_kPicType_DuringPerformance    = 0x0F,
+    M4COMMON_kPicType_MovieScreenCapture    = 0x10,
+    M4COMMON_kPicType_BrightColouredFish    = 0x11,
+    M4COMMON_kPicType_Illustration        = 0x12,
+    M4COMMON_kPicType_ArtistLogo            = 0x13,
+    M4COMMON_kPicType_StudioLogo            = 0x14
+} M4COMMON_PictureType;
+
+/**
+ ******************************************************************************
+ * enum        M4COMMON_Orientation
+ * @brief        This enum defines the possible orientation of a frame as described
+ *            in the EXIF standard.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4COMMON_kOrientationUnknown = 0,
+    M4COMMON_kOrientationTopLeft,
+    M4COMMON_kOrientationTopRight,
+    M4COMMON_kOrientationBottomRight,
+    M4COMMON_kOrientationBottomLeft,
+    M4COMMON_kOrientationLeftTop,
+    M4COMMON_kOrientationRightTop,
+    M4COMMON_kOrientationRightBottom,
+    M4COMMON_kOrientationLeftBottom
+}M4COMMON_Orientation ;
+
+/**
+ ******************************************************************************
+ * structure    M4EXIFC_Location
+ * @brief        The Image GPS location (example : 48°52.21' )
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Float    degrees;
+    M4OSA_Float    minsec;
+} M4COMMON_Location;
+
+/**
+ ************************************************************************
+ * structure    M4COMMON_MetaDataAlbumArt
+ * @brief        This structure defines fields of a album art
+ ************************************************************************
+*/
+typedef struct
+{
+    M4COMMON_MimeType    m_mimeType;
+    M4OSA_UInt32        m_uiSize;
+    M4OSA_Void*            m_pData;
+
+    M4COMMON_String        m_pDescription;
+
+} M4COMMON_MetaDataAlbumArt;
+
+/**
+ ************************************************************************
+ * structure    M4COMMON_MetaDataFields
+ * @brief        This structure defines fields of metadata information
+ ************************************************************************
+*/
+typedef struct
+{
+    M4COMMON_MetadataType    m_MetadataType;
+
+    /* Meta data fields */
+    M4COMMON_String    m_pTitle;            /**< Title for the media  */
+    M4COMMON_String    m_pArtist;            /**< Performer or artist */
+    M4COMMON_String    m_pAlbum;            /**< Album title for the media */
+    M4COMMON_String    m_pAuthor;            /**< Author of the media */
+    M4COMMON_String    m_pGenre;            /**< Genre (category and style) of the media */
+    M4COMMON_String    m_pDescription;        /**< Caption or description for the media */
+    M4COMMON_String    m_pCopyRights;        /**< Notice about organization holding copyright
+                                                     for the media file */
+    M4COMMON_String    m_pRecordingYear;    /**< Recording year for the media */
+    M4COMMON_String    m_pRating;            /**< Media rating */
+
+    M4COMMON_String    m_pClassification;    /**< Classification of the media */
+    M4COMMON_String    m_pKeyWords;        /**< Media keywords */
+    M4COMMON_String    m_pLocation;        /**< Location information */
+    M4COMMON_String    m_pUrl;                /**< Reference of the resource */
+
+    M4OSA_UInt8        m_uiTrackNumber;    /**< Track number for the media*/
+    M4OSA_UInt32    m_uiDuration;        /**< The track duration in milliseconds */
+
+    M4COMMON_MetaDataAlbumArt    m_albumArt;    /**< AlbumArt description */
+    M4COMMON_String                m_pMood;    /**< Mood of the media */
+
+    /**< Modifs ACO 4/12/07 : add Exif specific infos */
+    M4COMMON_String    m_pCreationDateTime;    /**< date and time original image was generated */
+    M4COMMON_String    m_pLastChangeDateTime;    /**< file change date and time */
+    M4COMMON_String    m_pManufacturer;        /**< manufacturer of image input equipment */
+    M4COMMON_String    m_pModel;                /**< model of image input equipment */
+    M4COMMON_String    m_pSoftware;            /**< software used */
+    M4COMMON_Orientation m_Orientation;        /**< Orientation of the picture */
+
+    /**< Modifs FS 29/08/08 : additionnal Exif infos */
+    M4OSA_UInt32    m_width;            /**< image width in pixels */
+    M4OSA_UInt32    m_height;            /**< image height in pixels */
+    M4OSA_UInt32    m_thumbnailSize;    /**< size of the thumbnail */
+    M4COMMON_String    m_pLatitudeRef;        /**< Latitude reference */
+    M4COMMON_Location m_latitude;        /**< Latitude */
+    M4COMMON_String    m_pLongitudeRef;    /**< Longitude reference */
+    M4COMMON_Location m_longitude;        /**< Longitude  */
+
+} M4COMMON_MetaDataFields;
+
+
+#endif /*M4COMMON_TYPES_H*/
+
diff --git a/libvideoeditor/vss/common/inc/M4DA_Types.h b/libvideoeditor/vss/common/inc/M4DA_Types.h
new file mode 100755
index 0000000..d41f934
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4DA_Types.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file    M4DA_Types.h
+ * @brief    Data access type definition
+ * @note    This file implements media specific types
+ ************************************************************************
+*/
+
+#ifndef __M4DA_TYPES_H__
+#define __M4DA_TYPES_H__
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /*__cplusplus*/
+
+/**
+ ************************************************************************
+ * enumeration    M4_StreamType
+ * @brief        Type used to describe a stream (audio or video data flow).
+ ************************************************************************
+*/
+typedef enum
+{
+    M4DA_StreamTypeUnknown                = -1,    /**< Unknow type */
+    M4DA_StreamTypeVideoMpeg4            = 0,    /**< MPEG-4 video */
+    M4DA_StreamTypeVideoH263            = 1,    /**< H263 video */
+    M4DA_StreamTypeAudioAmrNarrowBand    = 2,    /**< Amr narrow band audio */
+    M4DA_StreamTypeAudioAmrWideBand        = 3,    /**< Amr wide band audio */
+    M4DA_StreamTypeAudioAac                = 4,    /**< AAC audio */
+    M4DA_StreamTypeAudioMp3                = 5,    /**< MP3 audio */
+    M4DA_StreamTypeVideoMJpeg            = 6,    /**< MJPEG video */
+    M4DA_StreamTypeAudioPcm                = 7,    /**< Wav audio */
+    M4DA_StreamTypeAudioMidi            = 8,    /**< Midi audio */
+    M4DA_StreamTypeVideoMpeg4Avc        = 9,    /**< MPEG-4 AVC video (h264) */
+    M4DA_StreamTypeAudioAacADTS            = 10,    /**< AAC ADTS audio */
+    M4DA_StreamTypeAudioAacADIF            = 11,    /**< AAC ADIF audio */
+    M4DA_StreamTypeAudioWma                = 12,    /**< WMA audio */
+    M4DA_StreamTypeVideoWmv                = 13,    /**< WMV video */
+    M4DA_StreamTypeAudioReal            = 14,   /**< REAL audio */
+    M4DA_StreamTypeVideoReal            = 15,   /**< REAL video */
+    M4DA_StreamTypeAudioEvrc            = 16,   /**< Evrc audio */
+    M4DA_StreamTypeTimedText            = 20,    /**< Timed Text */
+    M4DA_StreamTypeAudioBba                = 21,    /**< Beat Brew audio fomat */
+    M4DA_StreamTypeAudioSmaf            = 22,    /**< SMAF audio */
+    M4DA_StreamTypeAudioImelody            = 23,    /**< IMELODY audio*/
+    M4DA_StreamTypeAudioXmf                = 24,    /**< XMF audio */
+    M4DA_StreamTypeAudioBpc                = 25,    /**< BPC audio */
+
+    /* ADPCM */
+    M4DA_StreamTypeAudioADPcm            = 26    /**< ADPCM */
+
+} M4_StreamType;
+
+/**
+ ************************************************************************
+ * structure    M4_StreamHandler
+ * @brief        Base structure to describe a stream.
+ ************************************************************************
+*/
+typedef struct
+{
+    M4_StreamType    m_streamType;                /**< Stream type */
+    M4OSA_UInt32    m_streamId;                    /**< Stream Id (unique number definning
+                                                        the stream) */
+    M4OSA_Int64        m_duration;                    /**< Duration of the stream in milli
+                                                            seconds */
+    M4OSA_UInt32    m_averageBitRate;            /**< Average bitrate in kb/s */
+    M4OSA_UInt32    m_maxAUSize;                /**< Maximum size of an Access Unit */
+    M4OSA_UInt8*    m_pDecoderSpecificInfo;        /**< Pointer on specific information required
+                                                        to create a decoder */
+    M4OSA_UInt32    m_decoderSpecificInfoSize;    /**< Size of the specific information
+                                                         pointer above */
+    void*            m_pUserData;                /**< Pointer on User Data
+                                                    (initialized by the user) */
+    M4OSA_UInt32    m_structSize;                /**< Size of the structure in bytes */
+    M4OSA_Bool      m_bStreamIsOK;              /**< Flag to know if stream has no errors after
+                                                        parsing is finished */
+    M4OSA_UInt8*    m_pH264DecoderSpecificInfo;        /**< Pointer on specific information
+                                                            required to create a decoder */
+    M4OSA_UInt32    m_H264decoderSpecificInfoSize;    /**< Size of the specific
+                                                            information pointer above */
+    // MPEG4 & AAC decoders require ESDS info
+    M4OSA_UInt8*    m_pESDSInfo;                /**< Pointer on MPEG4 or AAC ESDS box */
+    M4OSA_UInt32    m_ESDSInfoSize;             /**< Size of the MPEG4 or AAC ESDS box */
+} M4_StreamHandler;
+
+/**
+ ************************************************************************
+ * structure    M4_VideoStreamHandler
+ * @brief        Extended structure to describe a video stream.
+ ************************************************************************
+*/
+typedef struct
+{
+    M4_StreamHandler    m_basicProperties;        /**< Audio-Video stream common parameters */
+    M4OSA_UInt32        m_videoWidth;            /**< Width of the video in the stream */
+    M4OSA_UInt32        m_videoHeight;            /**< Height of the video in the stream */
+    M4OSA_Float            m_averageFrameRate;        /**< Average frame rate of the video
+                                                            in the stream */
+    M4OSA_UInt32        m_structSize;            /**< Size of the structure in bytes */
+} M4_VideoStreamHandler;
+
+/**
+ ************************************************************************
+ * structure    M4_AudioStreamHandler
+ * @brief        Extended structure to describe an audio stream.
+ ************************************************************************
+*/
+typedef struct
+{
+    M4_StreamHandler    m_basicProperties;        /**< Audio-Video stream common parameters */
+    M4OSA_UInt32        m_nbChannels;            /**< Number of channels in the audio stream
+                                                        (1-mono, 2-stereo) */
+    M4OSA_UInt32        m_byteFrameLength;        /**< Size of frame samples in bytes */
+    M4OSA_UInt32        m_byteSampleSize;        /**< Number of bytes per sample */
+    M4OSA_UInt32        m_samplingFrequency;    /**< Sample frequency in kHz */
+    M4OSA_UInt32        m_structSize;            /**< Size of the structure in bytes */
+} M4_AudioStreamHandler;
+
+#ifdef M4VPS_SUPPORT_TTEXT
+
+/**
+ ************************************************************************
+ * structure    M4_TextStreamHandler
+ * @brief        Extended structure to describe a text stream.
+ ************************************************************************
+*/
+typedef struct
+{
+    M4_StreamHandler    m_basicProperties;    /**< Audio-Video stream common parameters */
+    M4OSA_UInt32        m_trackWidth;        /**< Width of the video in the stream */
+    M4OSA_UInt32        m_trackHeight;        /**< Height of the video in the stream */
+    M4OSA_UInt32        m_trackXpos;        /**< X position of the text track in video area */
+    M4OSA_UInt32        m_trackYpos;        /**< Y position of the text track in video area */
+    M4OSA_UInt8            back_col_rgba[4];    /**< the background color in RGBA */
+    M4OSA_UInt16        uiLenght;            /**< the string lenght in bytes */
+    M4OSA_UInt32        disp_flag;            /**< the way text will be displayed */
+    M4OSA_UInt8            horiz_justif;        /**< the horizontal justification of the text */
+    M4OSA_UInt8            verti_justif;        /**< the vertical justification of the text */
+    /* style */
+    M4OSA_UInt16        styl_start_char;    /**< the first character impacted by style */
+    M4OSA_UInt16        styl_end_char;        /**< the last character impacted by style */
+    M4OSA_UInt16        fontID;                /**< ID of the font */
+    M4OSA_UInt8            face_style;            /**< the text face-style: bold, italic,
+                                                         underlined, plain(default) */
+    M4OSA_UInt8            font_size;            /**< size in pixel of font */
+    M4OSA_UInt8            text_col_rgba[4];    /**< the text color in RGBA */
+    /* box */
+    M4OSA_UInt16        box_top;         /**< the top position of text box in the track area */
+    M4OSA_UInt16        box_left;        /**< the left position of text box in the track area */
+    M4OSA_UInt16        box_bottom;      /**< the bottom position of text box in the track area */
+    M4OSA_UInt16        box_right;       /**< the right position of text box in the track area */
+    M4OSA_UInt32        m_structSize;    /**< Size of the structure in bytes */
+} M4_TextStreamHandler;
+
+#endif /*M4VPS_SUPPORT_TTEXT*/
+
+/**
+ ************************************************************************
+ * structure    M4_AccessUnit
+ * @brief        Structure to describe an access unit.
+ ************************************************************************
+*/
+typedef struct
+{
+  M4OSA_UInt32            m_streamID;       /**< Id of the stream to get an AU from */
+  M4OSA_MemAddr8        m_dataAddress;      /**< Pointer to a memory area with the encoded data */
+  M4OSA_UInt32            m_size;           /**< Size of the dataAdress area */
+  M4OSA_Double            m_CTS;            /**< Composition Time Stamp for the Access Unit */
+  M4OSA_Double            m_DTS ;           /**< Decoded Time Stamp for the Access Unit */
+  M4OSA_UInt8            m_attribute;       /**< RAP information & AU corrupted */
+  M4OSA_UInt32            m_maxsize;        /**< Maximum size of the AU */
+  M4OSA_UInt32            m_structSize;     /**< Structure size */
+} M4_AccessUnit;
+
+#ifdef __cplusplus
+}
+#endif /*__cplusplus*/
+
+#endif /* __M4DA_TYPES_H__ */
+
diff --git a/libvideoeditor/vss/common/inc/M4DECODER_Common.h b/libvideoeditor/vss/common/inc/M4DECODER_Common.h
new file mode 100755
index 0000000..889efd5
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4DECODER_Common.h
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4DECODER_Common.h
+ * @brief  Shell Decoder common interface declaration
+ * @note   This file declares the common interfaces that decoder shells must implement
+ *
+ ************************************************************************
+*/
+#ifndef __M4DECODER_COMMON_H__
+#define __M4DECODER_COMMON_H__
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_OptionID.h"
+#include "M4OSA_CoreID.h"
+
+#include "M4READER_Common.h"
+#include "M4VIFI_FiltersAPI.h"
+
+#include "M4_Utils.h"
+
+/* ----- Errors and Warnings ----- */
+
+/**
+ * Warning: there is no new decoded frame to render since the last rendering
+ */
+#define M4WAR_VIDEORENDERER_NO_NEW_FRAME M4OSA_ERR_CREATE(M4_WAR, M4DECODER_COMMON, 0x0001)
+/**
+ * Warning: the deblocking filter is not implemented
+ */
+#define M4WAR_DEBLOCKING_FILTER_NOT_IMPLEMENTED M4OSA_ERR_CREATE(M4_WAR, M4DECODER_COMMON,\
+                                                                     0x000002)
+
+
+/* Error: Stream H263 profiles (other than  0) are not supported */
+#define M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED            M4OSA_ERR_CREATE(M4_ERR,\
+                                                                 M4DECODER_MPEG4, 0x0001)
+/* Error: Stream H263 not baseline not supported (Supported sizes are CIF, QCIF or SQCIF) */
+#define M4ERR_DECODER_H263_NOT_BASELINE                        M4OSA_ERR_CREATE(M4_ERR,\
+                                                                 M4DECODER_MPEG4, 0x0002)
+
+/**
+ ************************************************************************
+ * enum     M4DECODER_AVCProfileLevel
+ * @brief    This enum defines the AVC decoder profile and level for the current instance
+ * @note    This options can be read from decoder via M4DECODER_getOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+    M4DECODER_AVC_kProfile_0_Level_1 = 0,
+    M4DECODER_AVC_kProfile_0_Level_1b,
+    M4DECODER_AVC_kProfile_0_Level_1_1,
+    M4DECODER_AVC_kProfile_0_Level_1_2,
+    M4DECODER_AVC_kProfile_0_Level_1_3,
+    M4DECODER_AVC_kProfile_0_Level_2,
+    M4DECODER_AVC_kProfile_0_Level_2_1,
+    M4DECODER_AVC_kProfile_0_Level_2_2,
+    M4DECODER_AVC_kProfile_0_Level_3,
+    M4DECODER_AVC_kProfile_0_Level_3_1,
+    M4DECODER_AVC_kProfile_0_Level_3_2,
+    M4DECODER_AVC_kProfile_0_Level_4,
+    M4DECODER_AVC_kProfile_0_Level_4_1,
+    M4DECODER_AVC_kProfile_0_Level_4_2,
+    M4DECODER_AVC_kProfile_0_Level_5,
+    M4DECODER_AVC_kProfile_0_Level_5_1,
+    M4DECODER_AVC_kProfile_and_Level_Out_Of_Range = 255
+} M4DECODER_AVCProfileLevel;
+
+/**
+ ************************************************************************
+ * enum     M4DECODER_OptionID
+ * @brief    This enum defines the decoder options
+ * @note    These options can be read from or written to a decoder via M4DECODER_getOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+    /**
+    Get the version of the core decoder
+    */
+    M4DECODER_kOptionID_Version        = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x01),
+    /**
+    Get the size of the currently decoded video
+    */
+    M4DECODER_kOptionID_VideoSize    = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x02),
+    /**
+    Set the conversion filter to use at rendering
+    */
+    M4DECODER_kOptionID_OutputFilter = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x03),
+    /**
+    Activate the Deblocking filter
+    */
+    M4DECODER_kOptionID_DeblockingFilter = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON, 0x04),
+    /**
+    Get nex rendered frame CTS
+    */
+    M4DECODER_kOptionID_NextRenderedFrameCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_COMMON,\
+                                                                         0x05),
+
+
+    /* common to MPEG4 decoders */
+    /**
+     * Get the DecoderConfigInfo */
+    M4DECODER_MPEG4_kOptionID_DecoderConfigInfo = M4OSA_OPTION_ID_CREATE(M4_READ,\
+                                                         M4DECODER_MPEG4, 0x01),
+
+    /* only for H.264 decoder. */
+    /**
+    Get AVC profile and level.
+    */
+    M4DECODER_kOptionID_AVCProfileAndLevel = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AVC, 0x01),
+
+    /* last decoded cts */
+    M4DECODER_kOptionID_AVCLastDecodedFrameCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4DECODER_AVC,\
+                                                                             0x02)
+/* Last decoded cts */
+
+} M4DECODER_OptionID;
+
+
+/**
+ ************************************************************************
+ * struct    M4DECODER_MPEG4_DecoderConfigInfo
+ * @brief    Contains info read from the MPEG-4 VideoObjectLayer.
+ ************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt8        uiProfile;                /**< profile and level as defined in the Visual
+                                                         Object Sequence header, if present */
+    M4OSA_UInt32    uiTimeScale;            /**< time scale as parsed in VOL header */
+    M4OSA_UInt8        uiUseOfResynchMarker;    /**< Usage of resynchronization marker */
+    M4OSA_Bool        bDataPartition;            /**< If 1 data partitioning is used. */
+    M4OSA_Bool        bUseOfRVLC;                /**< Usage of RVLC for the stream */
+
+} M4DECODER_MPEG4_DecoderConfigInfo;
+
+
+/**
+ ***********************************************************************
+ * structure    M4DECODER_VideoSize
+ * @brief        This structure defines the video size (width and height)
+ * @note        This structure is used to retrieve via the M4DECODER_getOption_fct
+ *                function the size of the current decoded video
+ ************************************************************************
+*/
+typedef struct _M4DECODER_VideoSize
+{
+    M4OSA_UInt32   m_uiWidth;    /**< video width  in pixels */
+    M4OSA_UInt32   m_uiHeight;    /**< video height in pixels */
+
+} M4DECODER_VideoSize;
+
+/**
+ ************************************************************************
+ * structure    M4DECODER_OutputFilter
+ * @brief        This structure defines the conversion filter
+ * @note        This structure is used to retrieve the filter function
+ *                pointer and its user data via the function
+ *                M4DECODER_getOption_fct    with the option
+ *                M4DECODER_kOptionID_OutputFilter
+ ************************************************************************
+*/
+typedef struct _M4DECODER_OutputFilter
+{
+    M4OSA_Void   *m_pFilterFunction;    /**< pointer to the filter function */
+    M4OSA_Void   *m_pFilterUserData;    /**< user data of the filter        */
+
+} M4DECODER_OutputFilter;
+
+/**
+ ************************************************************************
+ * enum     M4DECODER_VideoType
+ * @brief    This enum defines the video types used to create decoders
+ * @note    This enum is used internally by the VPS to identify a currently supported
+ *            video decoder interface. Each decoder is registered with one of this type associated.
+ *            When a decoder instance is needed, this type is used to identify and
+ *            and retrieve its interface.
+ ************************************************************************
+*/
+typedef enum
+{
+    M4DECODER_kVideoTypeMPEG4 = 0,
+    M4DECODER_kVideoTypeMJPEG,
+    M4DECODER_kVideoTypeAVC,
+    M4DECODER_kVideoTypeWMV,
+    M4DECODER_kVideoTypeREAL,
+
+    M4DECODER_kVideoType_NB  /* number of decoders, keep it as last enum entry */
+
+} M4DECODER_VideoType ;
+
+/**
+ ************************************************************************
+ * @brief    creates an instance of the decoder
+ * @note    allocates the context
+ *
+ * @param    pContext:        (OUT)    Context of the decoder
+ * @param    pStreamHandler:    (IN)    Pointer to a video stream description
+ * @param    pSrcInterface:    (IN)    Pointer to the M4READER_DataInterface structure that must
+ *                                       be used by the decoder to read data from the stream
+ * @param    pAccessUnit        (IN)    Pointer to an access unit (allocated by the caller)
+ *                                      where the decoded data are stored
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return    M4ERR_ALLOC                a memory allocation has failed
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4DECODER_create_fct)    (M4OSA_Context *pContext,
+                                                 M4_StreamHandler *pStreamHandler,
+                                                 M4READER_DataInterface *pSrcInterface,
+                                                 M4_AccessUnit *pAccessUnit,
+                                                 M4OSA_Void* pUserData);
+
+/**
+ ************************************************************************
+ * @brief    destroy the instance of the decoder
+ * @note    after this call the context is invalid
+ *
+ * @param    context:    (IN)    Context of the decoder
+ *
+ * @return    M4NO_ERROR             There is no error
+ * @return  M4ERR_PARAMETER     The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4DECODER_destroy_fct)    (M4OSA_Context context);
+
+/**
+ ************************************************************************
+ * @brief    get an option value from the decoder
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to retrieve a property value:
+ *          -the version number of the decoder
+ *          -the size (widthxheight) of the image
+ *
+ * @param    context:    (IN)        Context of the decoder
+ * @param    optionId:    (IN)        indicates the option to set
+ * @param    pValue:        (IN/OUT)    pointer to structure or value (allocated by user) where
+ *                                      option is stored
+ * @return    M4NO_ERROR                 there is no error
+ * @return  M4ERR_PARAMETER         The context is invalid (in DEBUG only)
+ * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4DECODER_getOption_fct)(M4OSA_Context context, M4OSA_OptionID optionId,
+                                             M4OSA_DataOption pValue);
+
+/**
+ ************************************************************************
+ * @brief   set an option value of the decoder
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to set a property value:
+ *          -the conversion filter to use at rendering
+ *
+ * @param   context:    (IN)        Context of the decoder
+ * @param   optionId:   (IN)        Identifier indicating the option to set
+ * @param   pValue:     (IN)        Pointer to structure or value (allocated by user)
+ *                                     where option is stored
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_PARAMETER         The option parameter is invalid
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4DECODER_setOption_fct)(M4OSA_Context context, M4OSA_OptionID optionId,
+                                                 M4OSA_DataOption pValue);
+
+/**
+ ************************************************************************
+ * @brief   Decode Access Units up to a target time
+ * @note    Parse and decode the stream until it is possible to output a decoded image for which
+ *            the composition time is equal or greater to the passed targeted time
+ *          The data are read from the reader data interface
+ *
+ * @param    context:    (IN)        Context of the decoder
+ * @param    pTime:        (IN/OUT)    IN: Time to decode up to (in milli secondes)
+ *                                    OUT:Time of the last decoded frame (in ms)
+ * @param   bJump:      (IN)        0 if no jump occured just before this call
+ *                                  1 if a a jump has just been made
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4WAR_NO_MORE_AU        there is no more access unit to decode (end of stream)
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4DECODER_decode_fct)    (M4OSA_Context context, M4_MediaTime* pTime,
+                                                 M4OSA_Bool bJump);
+
+/**
+ ************************************************************************
+ * @brief    Renders the video at the specified time.
+ * @note
+ * @param    context:     (IN)        Context of the decoder
+ * @param   pTime:       (IN/OUT)   IN: Time to render to (in milli secondes)
+ *                OUT:Time of the actually rendered frame (in ms)
+ * @param    pOutputPlane:(OUT)        Output plane filled with decoded data (converted)
+ * @param   bForceRender:(IN)       1 if the image must be rendered even it has already been
+ *                                  0 if not (in which case the function can return
+ *                                       M4WAR_VIDEORENDERER_NO_NEW_FRAME)
+ * @return    M4NO_ERROR                 There is no error
+ * @return    M4ERR_PARAMETER            At least one parameter is not properly set
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_ALLOC             There is no more available memory
+ * @return    M4WAR_VIDEORENDERER_NO_NEW_FRAME    If the frame to render has already been rendered
+ ************************************************************************
+*/
+typedef M4OSA_ERR  (M4DECODER_render_fct)    (M4OSA_Context context, M4_MediaTime* pTime,
+                                              M4VIFI_ImagePlane* pOutputPlane,
+                                              M4OSA_Bool bForceRender);
+
+/**
+ ************************************************************************
+ * structure    M4DECODER_VideoInterface
+ * @brief        This structure defines the generic video decoder interface
+ * @note        This structure stores the pointers to functions of one video decoder type.
+ *                The decoder type is one of the M4DECODER_VideoType
+ ************************************************************************
+*/
+typedef struct _M4DECODER_VideoInterface
+{
+    M4DECODER_create_fct*        m_pFctCreate;
+    M4DECODER_destroy_fct*        m_pFctDestroy;
+    M4DECODER_getOption_fct*    m_pFctGetOption;
+    M4DECODER_setOption_fct*    m_pFctSetOption;
+    M4DECODER_decode_fct*        m_pFctDecode;
+    M4DECODER_render_fct*        m_pFctRender;
+} M4DECODER_VideoInterface;
+
+#endif /*__M4DECODER_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h b/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h
new file mode 100755
index 0000000..1386c8c
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4ENCODER_AudioCommon.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4ENCODER_AudioCommon.h
+ * @brief    VES audio encoders shell interface.
+ * @note    This file defines the types internally used by the VES to abstract audio encoders
+ ******************************************************************************
+*/
+#ifndef __M4ENCODER_AUDIOCOMMON_H__
+#define __M4ENCODER_AUDIOCOMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "M4OSA_OptionID.h"     /* for M4OSA_OPTION_ID_CREATE() */
+
+#define M4ENCODER_AUDIO_NB_CHANNELS_MAX 2
+/* WARNING: this value must be equal to the number of samples grabbed */
+//#define M4ENCODER_AUDIO_PCM_SAMPLE_NUMBER 960    /* imposed by the AAC encoder. */
+#define M4ENCODER_AUDIO_PCM_SAMPLE_NUMBER 1024    /* imposed by the AAC encoder. */
+
+
+/**
+ ******************************************************************************
+ * enumeration    M4ENCODER_Audio_OptionID
+ * @brief        This enum defines the core AAC shell encoder options
+ ******************************************************************************
+*/
+typedef enum
+{
+ /* Maximum generated AU size */
+    M4ENCODER_Audio_maxAUsize     = M4OSA_OPTION_ID_CREATE(M4_READ,      M4ENCODER_AUDIO, 0x01)
+
+} M4ENCODER_Audio_OptionID;
+
+
+ /**
+ ******************************************************************************
+ * enum        M4ENCODER_SamplingFrequency
+ * @brief    Thie enum defines the audio sampling frequency.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_k8000Hz = 8000,
+    M4ENCODER_k11025Hz = 11025,
+    M4ENCODER_k12000Hz = 12000,
+    M4ENCODER_k16000Hz = 16000,
+    M4ENCODER_k22050Hz = 22050,
+    M4ENCODER_k24000Hz = 24000,
+    M4ENCODER_k32000Hz = 32000,
+    M4ENCODER_k44100Hz = 44100,
+    M4ENCODER_k48000Hz = 48000
+} M4ENCODER_SamplingFrequency;
+
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_AudioFormat
+ * @brief    This enum defines the audio compression formats.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kAMRNB = 0,
+    M4ENCODER_kAAC,
+    M4ENCODER_kAudioNULL,    /**< No compression */
+    M4ENCODER_kMP3,
+    M4ENCODER_kAudio_NB        /* number of encoders, keep it as last enum entry */
+
+} M4ENCODER_AudioFormat;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_ChannelNumber
+ * @brief    Thie enum defines the number of audio channels.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kMono  = 0,
+    M4ENCODER_kStereo,
+    M4ENCODER_kStereoNoInterleave
+} M4ENCODER_ChannelNumber;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_AudioBitrate
+ * @brief    Thie enum defines the avalaible bitrates.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kAudio_4_75_KBPS    = 4750,
+    M4ENCODER_kAudio_5_15_KBPS    = 5150,
+    M4ENCODER_kAudio_5_9_KBPS    = 5900,
+    M4ENCODER_kAudio_6_7_KBPS    = 6700,
+    M4ENCODER_kAudio_7_4_KBPS    = 7400,
+    M4ENCODER_kAudio_7_95_KBPS    = 7950,
+    M4ENCODER_kAudio_8_KBPS        = 8000,
+    M4ENCODER_kAudio_10_2_KBPS    = 10200,
+    M4ENCODER_kAudio_12_2_KBPS    = 12200,
+    M4ENCODER_kAudio_16_KBPS    = 16000,
+    M4ENCODER_kAudio_24_KBPS    = 24000,
+    M4ENCODER_kAudio_32_KBPS    = 32000,
+    M4ENCODER_kAudio_40_KBPS    = 40000,
+    M4ENCODER_kAudio_48_KBPS    = 48000,
+    M4ENCODER_kAudio_56_KBPS    = 56000,
+    M4ENCODER_kAudio_64_KBPS    = 64000,
+    M4ENCODER_kAudio_80_KBPS    = 80000,
+    M4ENCODER_kAudio_96_KBPS    = 96000,
+    M4ENCODER_kAudio_112_KBPS    = 112000,
+    M4ENCODER_kAudio_128_KBPS    = 128000,
+    M4ENCODER_kAudio_144_KBPS    = 144000,
+    M4ENCODER_kAudio_160_KBPS    = 160000,
+    M4ENCODER_kAudio_192_KBPS    = 192000,
+    M4ENCODER_kAudio_224_KBPS    = 224000,
+    M4ENCODER_kAudio_256_KBPS    = 256000,
+    M4ENCODER_kAudio_320_KBPS    = 320000
+} M4ENCODER_AudioBitrate;
+
+
+/**
+ ******************************************************************************
+ * enum            M4ENCODER_AacRegulation
+ * @brief        The current mode of the bitrate regulation.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kAacRegulNone = 0,    /**< no bitrate regulation */
+    M4ENCODER_kAacBitReservoir        /**< better quality, but more CPU consumed */
+} M4ENCODER_AacRegulation;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_AmrSID
+ * @brief    This enum defines the SID of the AMR encoder.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kAmrNoSID = 0     /**< no SID */
+} M4ENCODER_AmrSID;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_AacParams
+ * @brief    This structure defines all the settings specific to the AAC encoder.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4ENCODER_AacRegulation    Regulation;
+    M4OSA_Bool                bHighSpeed;
+    M4OSA_Bool                bTNS;
+    M4OSA_Bool                bPNS;
+    M4OSA_Bool                bIS;
+    M4OSA_Bool                bMS;
+} M4ENCODER_AacParams;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_AudioParams
+ * @brief    This structure defines all the settings avalaible when encoding audio.
+ ******************************************************************************
+*/
+typedef struct s_M4ENCODER_AudioParams
+{
+    M4ENCODER_SamplingFrequency    Frequency;    /**< the sampling frequency */
+    M4ENCODER_ChannelNumber        ChannelNum;    /**< the numbe of channels (mono, stereo, ..) */
+    M4ENCODER_AudioBitrate        Bitrate;    /**<  bitrate, see enum  */
+    M4ENCODER_AudioFormat        Format;        /**<  audio compression format, AMR, AAC ...  */
+    union {
+        M4ENCODER_AacParams        AacParam;
+        M4ENCODER_AmrSID        AmrSID;
+    } SpecifParam;                            /**< the audio encoder specific parameters */
+} M4ENCODER_AudioParams;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_AudioDecSpecificInfo
+ * @brief    This structure describes the decoder specific info buffer.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_MemAddr8    pInfo;        /**< the buffer adress */
+    M4OSA_UInt32    infoSize;    /**< the buffer size in bytes */
+} M4ENCODER_AudioDecSpecificInfo;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_AudioBuffer
+ * @brief    This structure defines the data buffer.
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**< the table of buffers (unused buffers are set to NULL) */
+    M4OSA_MemAddr8    pTableBuffer[M4ENCODER_AUDIO_NB_CHANNELS_MAX];
+    /**< the table of the size of corresponding buffer at same index */
+    M4OSA_UInt32    pTableBufferSize[M4ENCODER_AUDIO_NB_CHANNELS_MAX];
+} M4ENCODER_AudioBuffer;
+
+typedef M4OSA_ERR (M4AE_init)        (M4OSA_Context* hContext, M4OSA_Void* pUserData);
+typedef M4OSA_ERR (M4AE_cleanUp)    (M4OSA_Context pContext);
+typedef M4OSA_ERR (M4AE_open)        (M4OSA_Context pContext, M4ENCODER_AudioParams *params,
+                                        M4ENCODER_AudioDecSpecificInfo *decSpecInfo,
+                                        M4OSA_Context grabberContext);
+typedef M4OSA_ERR (M4AE_close)        (M4OSA_Context pContext);
+typedef M4OSA_ERR (M4AE_step)         (M4OSA_Context pContext, M4ENCODER_AudioBuffer *inBuffer,
+                                        M4ENCODER_AudioBuffer *outBuffer);
+typedef M4OSA_ERR (M4AE_getOption)    (M4OSA_Context pContext, M4OSA_OptionID    option,
+                                        M4OSA_DataOption *valuePtr);
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_AudioGlobalInterface
+ * @brief    Defines all the functions required for an audio encoder shell.
+ ******************************************************************************
+*/
+typedef struct _M4ENCODER_AudioGlobalInterface
+{
+    M4AE_init*        pFctInit;
+    M4AE_cleanUp*    pFctCleanUp;
+    M4AE_open*        pFctOpen;
+    M4AE_close*        pFctClose;
+    M4AE_step*        pFctStep;
+    M4AE_getOption*    pFctGetOption;
+} M4ENCODER_AudioGlobalInterface;
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4ENCODER_AUDIOCOMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4ENCODER_common.h b/libvideoeditor/vss/common/inc/M4ENCODER_common.h
new file mode 100755
index 0000000..75d1c31
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4ENCODER_common.h
@@ -0,0 +1,468 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4ENCODER_common.h
+ * @note    This file defines the types internally used by the VES to abstract encoders
+
+ ******************************************************************************
+*/
+#ifndef __M4ENCODER_COMMON_H__
+#define __M4ENCODER_COMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/**
+ * Video preprocessing common interface */
+#include "M4VPP_API.h"
+
+/**
+ * Writer common interface */
+#include "M4WRITER_common.h"
+
+/* IMAGE STAB */
+/* percentage of image suppressed (computed from the standard dimension).*/
+#define M4ENCODER_STAB_FILTER_CROP_PERCENTAGE 10
+        /* WARNING: take the inferior even dimension, ex: 10% for QCIF output => 192x158 */
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_OpenMode
+ * @brief    Definition of open mode for the encoder.
+ * @note    DEFAULT  : pointer to M4ENCODER_open() which use default parameters
+ *          ADVANCED : pointer to M4ENCODER_open_advanced() which allow to customize
+ *                     various encoding parameters
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_OPEN_DEFAULT,
+    M4ENCODER_OPEN_ADVANCED
+} M4ENCODER_OpenMode;
+
+ /**
+ ******************************************************************************
+ * enum        M4ENCODER_FrameRate
+ * @brief    Thie enum defines the encoded video framerates.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_k5_FPS,
+    M4ENCODER_k7_5_FPS,
+    M4ENCODER_k10_FPS,
+    M4ENCODER_k12_5_FPS,
+    M4ENCODER_k15_FPS,
+    M4ENCODER_k20_FPS,
+    M4ENCODER_k25_FPS,
+    M4ENCODER_k30_FPS,
+    M4ENCODER_kVARIABLE_FPS,            /**< Variable video bitrate */
+    M4ENCODER_kUSE_TIMESCALE            /**< Advanced encoding, use timescale indication rather
+                                                than framerate */
+} M4ENCODER_FrameRate;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_InputFormat
+ * @brief    Thie enum defines the video format of the grabbing.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kIYUV420=0,   /**< YUV 4:2:0 planar (standard input for mpeg-4 video) */
+    M4ENCODER_kIYUV422,        /**< YUV422 planar */
+    M4ENCODER_kIYUYV,        /**< YUV422 interlaced, luma first */
+    M4ENCODER_kIUYVY,        /**< YUV422 interlaced, chroma first */
+    M4ENCODER_kIJPEG,        /**< JPEG compressed frames */
+    M4ENCODER_kIRGB444,        /**< RGB 12 bits 4:4:4 */
+    M4ENCODER_kIRGB555,        /**< RGB 15 bits 5:5:5 */
+    M4ENCODER_kIRGB565,        /**< RGB 16 bits 5:6:5 */
+    M4ENCODER_kIRGB24,        /**< RGB 24 bits 8:8:8 */
+    M4ENCODER_kIRGB32,        /**< RGB 32 bits  */
+    M4ENCODER_kIBGR444,        /**< BGR 12 bits 4:4:4 */
+    M4ENCODER_kIBGR555,        /**< BGR 15 bits 5:5:5 */
+    M4ENCODER_kIBGR565,        /**< BGR 16 bits 5:6:5 */
+    M4ENCODER_kIBGR24,        /**< BGR 24 bits 8:8:8 */
+    M4ENCODER_kIBGR32        /**< BGR 32 bits  */
+} M4ENCODER_InputFormat;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_Format
+ * @brief    Thie enum defines the video compression formats.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kMPEG4 = 0,
+    M4ENCODER_kH263,
+    M4ENCODER_kH264,
+    M4ENCODER_kJPEG,
+    M4ENCODER_kMJPEG,
+    M4ENCODER_kNULL,
+    M4ENCODER_kYUV420,            /**< No compression */
+    M4ENCODER_kYUV422,            /**< No compression */
+
+    M4ENCODER_kVideo_NB /* number of decoders, keep it as last enum entry */
+} M4ENCODER_Format;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_FrameWidth
+ * @brief    Thie enum defines the avalaible frame Width.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_SQCIF_Width = 128,    /**< SQCIF 128x96 */
+    M4ENCODER_QQVGA_Width = 160,    /**< QQVGA 160x120 */
+    M4ENCODER_QCIF_Width  = 176,    /**< QCIF 176x144 */
+    M4ENCODER_QVGA_Width  = 320,    /**< QVGA 320x240 */
+    M4ENCODER_CIF_Width   = 352,    /**< CIF 352x288 */
+    M4ENCODER_VGA_Width   = 640,    /**< VGA 640x480 */
+    M4ENCODER_SVGA_Width  = 800,    /**< SVGA 800x600 */
+    M4ENCODER_XGA_Width   = 1024,    /**< XGA 1024x768 */
+    M4ENCODER_XVGA_Width  = 1280,    /**< XVGA 1280x1024 */
+/* +PR LV5807 */
+    M4ENCODER_WVGA_Width  = 800,    /**< WVGA 800 x 480 */
+    M4ENCODER_NTSC_Width  = 720,    /**< NTSC 720 x 480 */
+/* -PR LV5807 */
+
+/* +CR Google */
+    M4ENCODER_640_360_Width       = 640,        /**< 640x360 */
+    // StageFright encoders require %16 resolution
+    M4ENCODER_854_480_Width     = 848, /**< 848x480 */
+    M4ENCODER_HD1280_Width         = 1280,            /**< 720p 1280x720 */
+    // StageFright encoders require %16 resolution
+    M4ENCODER_HD1080_Width      = 1088, /**< 720p 1088x720 */
+    M4ENCODER_HD960_Width          = 960            /**< 720p 960x720 */
+
+/* -CR Google */
+
+} M4ENCODER_FrameWidth;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_FrameHeight
+ * @brief    Thie enum defines the avalaible frame Height.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_SQCIF_Height = 96,    /**< SQCIF 128x96 */
+    M4ENCODER_QQVGA_Height = 120,    /**< QQVGA 160x120 */
+    M4ENCODER_QCIF_Height  = 144,    /**< QCIF 176x144 */
+    M4ENCODER_QVGA_Height  = 240,    /**< QVGA 320x240 */
+    M4ENCODER_CIF_Height   = 288,    /**< CIF 352x288 */
+    M4ENCODER_VGA_Height   = 480,    /**< VGA 340x480 */
+    M4ENCODER_SVGA_Height  = 600,    /**< SVGA 800x600 */
+    M4ENCODER_XGA_Height   = 768,    /**< XGA 1024x768 */
+    M4ENCODER_XVGA_Height  = 1024,    /**< XVGA 1280x1024 */
+/* +PR LV5807 */
+    M4ENCODER_WVGA_Height  = 480,    /**< WVGA 800 x 480 */
+    M4ENCODER_NTSC_Height  = 480,    /**< NTSC 720 x 480 */
+/* -PR LV5807 */
+
+/* +CR Google */
+    M4ENCODER_640_360_Height       = 360,        /**< 640x360 */
+    M4ENCODER_854_480_Height       = 480,        /**< 854x480 */
+    M4ENCODER_HD1280_Height     = 720,        /**< 720p 1280x720 */
+    M4ENCODER_HD1080_Height     = 720,        /**< 720p 1080x720 */
+    M4ENCODER_HD960_Height      = 720        /**< 720p 960x720 */
+
+/* -CR Google */
+} M4ENCODER_FrameHeight;
+
+/**
+ ******************************************************************************
+ * enum        M4ENCODER_Bitrate
+ * @brief    Thie enum defines the avalaible bitrates.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_k28_KBPS    = 28000,
+    M4ENCODER_k40_KBPS    = 40000,
+    M4ENCODER_k64_KBPS    = 64000,
+    M4ENCODER_k96_KBPS    = 96000,
+    M4ENCODER_k128_KBPS = 128000,
+    M4ENCODER_k192_KBPS = 192000,
+    M4ENCODER_k256_KBPS = 256000,
+    M4ENCODER_k384_KBPS = 384000,
+    M4ENCODER_k512_KBPS = 512000,
+    M4ENCODER_k800_KBPS = 800000
+
+} M4ENCODER_Bitrate;
+
+/* IMAGE STAB */
+
+/**
+ ******************************************************************************
+ * enum            M4ENCODER_StabMode
+ * @brief        The current mode of the stabilization filter.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kStabOff = 0,        /**< stabilization filter is disabled */
+    M4ENCODER_kStabCentered,    /**< stabilization filter is enabled. */
+                                /**< Video input and output must have the same dimensions. Output
+                                    image will have black borders */
+    M4ENCODER_kStabGrabMore        /**< stabilization filter is enabled. */
+                                /**< Video input dimensions must be bigger than output. The ratio
+                                        is indicated by M4ENCODER_STAB_FILTER_CROP_PERCENTAGE */
+
+} M4ENCODER_StabMode;
+
+/**
+ ******************************************************************************
+ * enum            M4ENCODER_FrameMode
+ * @brief        Values to drive the encoder behaviour (type of frames produced)
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4ENCODER_kNormalFrame = 0,   /**< let the encoder decide which type of frame to encode */
+    M4ENCODER_kLastFrame   = 1,   /**< force encoder the flush all its buffers because it is
+                                         last frame  */
+    M4ENCODER_kIFrame      = 2    /**< force encoder to generate an I frame */
+
+} M4ENCODER_FrameMode;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_Params
+ * @brief    This structure defines all the settings avalaible when encoding.
+ ******************************************************************************
+*/
+typedef struct
+{
+    /* Input */
+    M4ENCODER_InputFormat    InputFormat;        /**< Input video format (grabbing) */
+    M4ENCODER_FrameWidth    InputFrameWidth;    /**< Input Frame width (grabbing) */
+    M4ENCODER_FrameHeight    InputFrameHeight;    /**< Input Frame height (grabbing) */
+
+    /* Output */
+    M4ENCODER_FrameWidth    FrameWidth;            /**< Frame width  */
+    M4ENCODER_FrameHeight    FrameHeight;        /**< Frame height  */
+    M4ENCODER_Bitrate        Bitrate;            /**< Bitrate, see enum  */
+    M4ENCODER_FrameRate        FrameRate;            /**< Framerate, see enum  */
+    M4ENCODER_Format        Format;                /**< Video compression format, H263, MPEG4,
+                                                         MJPEG ...  */
+
+} M4ENCODER_Params;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_AdvancedParams
+ * @brief    This structure defines the advanced settings available for MPEG-4 encoding.
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**
+     * Input parameters (grabber coupled with encoder): */
+    M4ENCODER_InputFormat    InputFormat;                /**< Input video format */
+    M4ENCODER_FrameWidth    InputFrameWidth;            /**< Input Frame width */
+    M4ENCODER_FrameHeight    InputFrameHeight;            /**< Input Frame height */
+
+    /**
+     * Common settings for H263 and MPEG-4: */
+    M4ENCODER_FrameWidth    FrameWidth;                    /**< Frame width  */
+    M4ENCODER_FrameHeight    FrameHeight;                /**< Frame height  */
+    M4OSA_UInt32            Bitrate;                    /**< Free value for the bitrate */
+    /**< Framerate (if set to M4ENCODER_kUSE_TIMESCALE use uiRateFactor & uiTimeScale instead) */
+    M4ENCODER_FrameRate        FrameRate;
+    /**< Video compression format: H263 or MPEG4 */
+    M4ENCODER_Format        Format;
+    M4OSA_UInt32            uiHorizontalSearchRange; /**< Set to 0 will use default value (15) */
+    M4OSA_UInt32            uiVerticalSearchRange;   /**< Set to 0 will use default value (15) */
+    /**< Set to 0 will use default value (0x7FFF i.e. let engine decide when to put an I) */
+    M4OSA_UInt32            uiStartingQuantizerValue;
+    /**< Enable if priority is quality, Disable if priority is framerate */
+    M4OSA_Bool                bInternalRegulation;
+    /**< Ratio between the encoder frame rate and the actual frame rate */
+    M4OSA_UInt8                uiRateFactor;
+    /**< I frames periodicity, set to 0 will use default value */
+    M4OSA_UInt32            uiIVopPeriod;
+    /**< Motion estimation [default=0 (all tools), disable=8 (no tool)] */
+    M4OSA_UInt8             uiMotionEstimationTools;
+
+    /**
+     * Settings for MPEG-4 only: */
+    M4OSA_UInt32            uiTimeScale;                /**< Free value for the timescale */
+    M4OSA_Bool                bErrorResilience;           /**< Disabled by default */
+    /**< Disabled by default (if enabled, bErrorResilience should be enabled too!) */
+    M4OSA_Bool                bDataPartitioning;
+    M4OSA_Bool              bAcPrediction;           /**< AC prediction [default=1, disable=0] */
+
+} M4ENCODER_AdvancedParams;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_StillPictureParams
+ * @brief    This structure defines all the settings avalaible when encoding still
+ *            picture.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4ENCODER_FrameWidth    FrameWidth;            /**< Frame width  */
+    M4ENCODER_FrameHeight    FrameHeight;        /**< Frame height  */
+    M4OSA_UInt32            Quality;            /**< Bitrate, see enum  */
+    M4ENCODER_Format        InputFormat;        /**< YUV 420 or 422  */
+    M4ENCODER_Format        Format;                /**< Video compression format, H263, MPEG4,
+                                                         MJPEG ...  */
+    M4OSA_Bool                PreProcessNeeded;    /**< Is the call to the VPP is necessary */
+    M4OSA_Bool                EncodingPerStripes;    /**< Is encoding per stripes */
+
+} M4ENCODER_StillPictureParams;
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_Header
+ * @brief    This structure defines the buffer where the sequence header is put.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_MemAddr8    pBuf;        /**< Buffer for the header */
+    M4OSA_UInt32    Size;        /**< Size of the data */
+
+} M4ENCODER_Header;
+
+/**
+ ******************************************************************************
+ * enum    M4ENCODER_OptionID
+ * @brief This enums defines all avalaible options.
+ ******************************************************************************
+*/
+typedef enum
+{
+    /**< set the fragment size, option value is M4OSA_UInt32 type */
+    M4ENCODER_kOptionID_VideoFragmentSize    = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
+                                                     M4ENCODER_COMMON, 0x01),
+
+    /**< set the stabilization filtering, option value is M4ENCODER_StabMode type */
+    M4ENCODER_kOptionID_ImageStabilization    = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
+                                                          M4ENCODER_COMMON, 0x02),
+
+    /**< prevent writting of any AU, option value is M4OSA_Bool type */
+    M4ENCODER_kOptionID_InstantStop            = M4OSA_OPTION_ID_CREATE (M4_WRITE,\
+                                                         M4ENCODER_COMMON, 0x03),
+
+    /**< get the DSI (encoder header) generated by the encoder */
+    M4ENCODER_kOptionID_EncoderHeader        = M4OSA_OPTION_ID_CREATE (M4_READ ,\
+                                                             M4ENCODER_COMMON, 0x04),
+/*+ CR LV6775 -H.264 Trimming  */
+
+    M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr= M4OSA_OPTION_ID_CREATE (M4_READ ,\
+                                                             M4ENCODER_COMMON, 0x05),
+    M4ENCODER_kOptionID_H264ProcessNALUContext        = M4OSA_OPTION_ID_CREATE (M4_READ ,\
+                                                             M4ENCODER_COMMON, 0x06)
+/*-CR LV6775 -H.264 Trimming  */
+} M4ENCODER_OptionID;
+
+/*+ CR LV6775 -H.264 Trimming  */
+typedef M4OSA_ERR (H264MCS_ProcessEncodedNALU_fct)(M4OSA_Void*ainstance,M4OSA_UInt8* inbuff,
+                               M4OSA_Int32  inbuf_size,
+                               M4OSA_UInt8 *outbuff, M4OSA_Int32 *outbuf_size);
+//*- CR LV6775 -H.264 Trimming  */
+
+typedef M4OSA_Void* M4ENCODER_Context;
+
+typedef M4OSA_ERR (M4ENCODER_init) (
+        M4ENCODER_Context* pContext,
+        M4WRITER_DataInterface* pWriterDataInterface,
+        M4VPP_apply_fct* pVPPfct,
+        M4VPP_Context pVPPctxt,
+        M4OSA_Void* pExternalAPI,
+        M4OSA_Void* pUserData
+);
+
+typedef M4OSA_ERR (M4ENCODER_open) (
+        M4ENCODER_Context pContext,
+        M4SYS_AccessUnit* pAU,
+        M4OSA_Void* pParams     /* Can be M4ENCODER_Params, M4ENCODER_AdvancedParams or
+                                    M4ENCODER_StillPictureParams */
+);
+
+typedef M4OSA_ERR (M4ENCODER_start) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_stop) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_pause) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_resume) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_close) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_cleanup) (M4ENCODER_Context pContext);
+typedef M4OSA_ERR (M4ENCODER_regulBitRate) (M4ENCODER_Context pContext);
+
+typedef M4OSA_ERR (M4ENCODER_encode) (
+        M4ENCODER_Context pContext,
+        M4VIFI_ImagePlane* pInPlane,
+        M4OSA_Double Cts,
+        M4ENCODER_FrameMode FrameMode
+);
+
+typedef M4OSA_ERR (M4ENCODER_setOption)    (
+        M4ENCODER_Context pContext,
+        M4OSA_UInt32 optionID,
+        M4OSA_DataOption optionValue
+);
+
+typedef M4OSA_ERR (M4ENCODER_getOption)    (
+        M4ENCODER_Context pContext,
+        M4OSA_UInt32 optionID,
+        M4OSA_DataOption optionValue
+);
+
+/**
+ ******************************************************************************
+ * struct    M4ENCODER_GlobalInterface
+ * @brief    Defines all the functions required for an encoder shell.
+ ******************************************************************************
+*/
+
+typedef struct _M4ENCODER_GlobalInterface
+{
+    M4ENCODER_init*                pFctInit;
+    M4ENCODER_open*                pFctOpen;
+
+    M4ENCODER_start*            pFctStart;          /* Grabber mode */
+    M4ENCODER_stop*                pFctStop;           /* Grabber mode */
+
+    M4ENCODER_pause*            pFctPause;          /* Grabber mode */
+    M4ENCODER_resume*            pFctResume;         /* Grabber mode */
+
+    M4ENCODER_close*            pFctClose;
+    M4ENCODER_cleanup*            pFctCleanup;
+
+    M4ENCODER_regulBitRate*     pFctRegulBitRate;
+    M4ENCODER_encode*            pFctEncode;         /* Standalone mode */
+
+    M4ENCODER_setOption*        pFctSetOption;
+    M4ENCODER_getOption*        pFctGetOption;
+} M4ENCODER_GlobalInterface;
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4ENCODER_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4MDP_API.h b/libvideoeditor/vss/common/inc/M4MDP_API.h
new file mode 100755
index 0000000..1000cd8
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4MDP_API.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file   M4MDP_API.h
+ * @brief  Parser of metadata
+ *
+*************************************************************************
+*/
+
+#ifndef __M4MDP_API_H__
+#define __M4MDP_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MD4MDP_close M4MDP_close
+
+#include "M4READER_Common.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+
+/*define the buffer size for content detection*/
+#define M4MDP_INPUT_BUFFER_SIZE    8192
+
+/**
+ ************************************************************************
+ * Public type of the M4MDP_osaFilePtrSt
+ ************************************************************************
+*/
+typedef struct
+{
+    M4OSA_FileReadPointer*     m_pFileReaderFcts;
+    M4OSA_FileWriterPointer* m_pFileWriterFcts;
+} M4MDP_osaFilePtrSt;
+
+/**
+ ************************************************************************
+ * Public type of the MDP execution context
+ ************************************************************************
+*/
+typedef M4OSA_Void* M4MDP_Context;
+
+/**
+ ************************************************************************
+ * Metadata Parser Errors & Warnings definition
+ ************************************************************************
+*/
+#define M4WAR_MDP_MEDIATYPE_NOT_DETECTED        M4OSA_ERR_CREATE(M4_WAR, M4MDP, 0x000001)
+
+#define    M4ERR_MDP_FATAL                            M4OSA_ERR_CREATE(M4_ERR, M4MDP, 0x000000)
+#define    M4ERR_MDP_UNSUPPORTED_TAG_VERSION        M4OSA_ERR_CREATE(M4_ERR, M4MDP, 0x000001)
+#define    M4ERR_MDP_UNSUPPORTED_ENCODING_TYPE        M4OSA_ERR_CREATE(M4_ERR, M4MDP, 0x000002)
+#define    M4ERR_MDP_INIT_FAILED                    M4OSA_ERR_CREATE(M4_ERR, M4MDP, 0x000003)
+#define    M4ERR_MDP_ASSET_PARSING_ERROR            M4OSA_ERR_CREATE(M4_ERR, M4MDP, 0x000004)
+#define M4ERR_MDP_FILE_NOT_FOUND                M4OSA_ERR_CREATE(M4_ERR, M4MDP, 0x000005)
+#define M4ERR_MDP_INVALID_PATH                    M4OSA_ERR_CREATE(M4_ERR, M4MDP, 0x000006)
+
+/**
+ ************************************************************************
+ * Metadata parser FUNCTIONS
+ ************************************************************************
+*/
+
+/**
+ ************************************************************************
+ * @brief    Getting the version of the metadata parser
+ *            This function allows getting the version of the MDP library.
+ *
+ * @param    pVersionInfo    (OUT) Pointer on an allocated version info structure
+ *                            After M4MDP_getVersion() successfully returns, this
+ *                            structure is filled with the version numbers.
+ *                            The structure must be allocated and further de-allocated
+ *                            by the application.
+ *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pVersionInfo is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR  M4MDP_getVersion(M4_VersionInfo* pVersionInfo);
+
+
+/**
+ ************************************************************************
+ * @brief    Initializing the MDP
+ *            This function initializes the MDP and allocates the MDP execution
+ *            context and parses the metadata
+ * @note    This function allocates the memory needed to store metadata in
+ *            TAG ID3 V1&V2, ASF or 3gpp asset structure with the OSAL allocation
+ *            function.
+ *            This memory will be freed in M4MDP_cleanUp function
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pContext        (OUT)    Execution Context
+ * @param    pFilePath        (IN)    Pointer to the multimedia file path
+ * @param    pFileReaderFcts    (IN)    Pointer to a structure containing OSAL file reader
+ *                                       functions pointers
+ *
+ * @return    M4NO_ERROR                        No error
+ * @return    M4ERR_PARAMETER                    At least, one parameter is null (in DEBUG only)
+ * @return    M4ERR_ALLOC                        There is no more memory available
+ * @return    M4WAR_READER_NO_METADATA        The input file doesn't contain metadata
+ * @return    M4ERR_UNSUPPORTED_MEDIA_TYPE    The input file is not recognized
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_init(M4MDP_Context* pContext, M4OSA_Char* pFilePath,
+                      M4OSA_FileReadPointer*    pFileReaderFcts);
+
+/**
+ ************************************************************************
+ * @brief    This function frees the MDP execution context and all metadata
+ *            structures already allocated by M4MDP_init
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pContext                (IN) Execution Context
+ *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pContext is NULL. (in DEBUG only)
+************************************************************************
+*/
+M4OSA_ERR M4MDP_cleanUp(M4MDP_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief    This function Initializes the meta data parser only once to check several files one
+ *            after another.
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pContext                (IN) Execution Context
+  * @param    pFileReaderFcts    (IN)    Pointer to a structure containing OSAL file reader
+  *                                          functions pointers
+*
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pContext is NULL. (in DEBUG only)
+************************************************************************
+*/
+M4OSA_ERR M4MDP_globalInit(M4MDP_Context* pContext, M4OSA_FileReadPointer*    pFileReaderFcts);
+
+/**
+ ************************************************************************
+ * @brief    This function opens a file in the meta data parser
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pContext                (IN) Execution Context
+ * @param    pFilePath        (IN)    Pointer to the multimedia file path
+  *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pContext is NULL. (in DEBUG only)
+************************************************************************
+*/
+M4OSA_ERR M4MDP_open(M4MDP_Context* pContext, M4OSA_Char* pFilePath);
+
+/**
+ ************************************************************************
+ * @brief    This function closes a file in the meta data parser
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pContext                (IN) Execution Context
+  *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pContext is NULL. (in DEBUG only)
+************************************************************************
+*/
+M4OSA_ERR M4MDP_close(M4MDP_Context* pContext);
+
+
+/**
+ ************************************************************************
+ * @brief    The function allows the retrieval of all fields of the
+ *            M4_MetaDataFields structure
+ *            It basically sets M4_MetaDataFields structure fields pointers to
+ *            the corresponding already retrieved metadata
+ *
+ * @note    If metadata are retrieved from an MP3 or an AAC files, and both
+ *            TAG ID3 V1 and V2 are present, then, priority is for metadata of TAG ID3 V2
+ *
+ * @note    This function is synchronous.
+ * @note    This function is used specially by the music manager project
+ *
+ * @param    pContext        (IN) Execution Context
+ * @param    pMetadata        (OUT) Pointer to M4_MetaDataFields structure
+ *
+ * @return    M4NO_ERROR                        No error
+ * @return    M4ERR_PARAMETER                    pContext or pMetadata is NULL. (in DEBUG only)
+ * @return    M4WAR_READER_NO_METADATA        The input file doesn't contain metadata
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_getMetadata(M4MDP_Context pContext, M4_MetaDataFields* pMetadata);
+
+/**
+ ************************************************************************
+ * @brief    This function returns the audio and video media type
+ *
+ * @note    This function is synchronous.
+ * @note    This function is used specially by the music manager project
+ *
+ * @param    pContext        (IN)    Execution Context
+ * @param    pAudio            (OUT)    Audio media type pointer
+ * @param    pVideo            (OUT)    Video media type pointer
+ *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        At least one parameter is NULL. (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_getStreamsType(M4MDP_Context pContext,M4_StreamType* pAudio,M4_StreamType* pVideo);
+
+
+/**
+ ************************************************************************
+ * @brief    This function returns the mediaType
+ *
+ * @note    This function is synchronous.
+ * @note    This function is used specially by the music manager project
+ *
+ * @param    pContext        (IN)    Execution Context
+ * @param    pMediaType        (OUT)    MediaType pointer
+ *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        At least one parameter is NULL. (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_getMediaType(M4MDP_Context pContext,M4READER_MediaType* pMediaType);
+
+/******************************************************************************
+* @brief        returns mediaType found in a file
+* @note
+* @param        pFileDescriptor (IN) : pointer to file descriptor
+* @param        pFileFunction (IN)   : pointer to file function
+* @param        pMediaType (OUT)     : mediaType if found
+* @return       M4NO_ERROR / M4ERR_ALLOC
+******************************************************************************/
+M4OSA_ERR M4MDP_getMediaTypeFromFile(M4OSA_Void *pFileDescriptor,
+                                       M4OSA_FileReadPointer *pFileFunction,
+                                       M4READER_MediaType *pMediaType);
+
+/******************************************************************************
+* @brief        return media type by extension and content detections
+* @note
+* @param        pFileDescriptor (IN) : pointer to file descriptor
+* @param        dataBuffer (IN)  : memory buffer
+* @param        bufferSize (IN)  : buffer size
+* @param        pMediaType (OUT) : mediaType if found
+* @return       M4NO_ERROR / M4ERR_ALLOC
+******************************************************************************/
+M4OSA_ERR    M4MDP_getMediaTypeFromExtensionAndContent(M4OSA_Void *pFileDescriptor,
+                                                        M4OSA_UInt8 *dataBuffer,
+                                                        M4OSA_UInt32 bufferSize,
+                                                        M4READER_MediaType *pMediaType);
+
+/******************************************************************************
+* @brief        return media type by content detection
+* @note
+* @param        dataBuffer (IN)  : memory buffer
+* @param        bufferSize (IN)  : buffer size
+* @param        pMediaType (OUT) : mediaType if found
+* @return       M4NO_ERROR / M4ERR_ALLOC
+******************************************************************************/
+M4OSA_ERR    M4MDP_getMediaTypeFromContent(M4OSA_UInt8 *dataBuffer, M4OSA_UInt32 bufferSize,
+                                             M4READER_MediaType *pMediaType);
+
+/**
+ ************************************************************************
+ * @brief    The function parses the buffer pAsfBuffer, extracts metadata,
+ *            allocates memory for pMetaData and fills in.
+ *
+ * @note    pAsfBuffer owns the application (caller).
+ *            The application free pAsfBuffer and pMetaData
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pAsfBuffer            (IN)    input buffer
+ * @param    pMetaData            (OUT)    Pointer to the metadata structure
+ *
+ * @return    M4NO_ERROR                        No error
+ * @return    M4ERR_PARAMETER                    pContext or pAsfBuffer is NULL. (in DEBUG only)
+ * @return    M4ERR_ALLOC                        There is no more memory available
+ * @return    M4WAR_READER_NO_METADATA        The M4READER_Buffer doesn't contain metadata
+ * @return    M4ERR_UNSUPPORTED_MEDIA_TYPE    The input file is not recognized
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_parseASFContentDesc(M4READER_Buffer* pAsfBuffer, M4_MetaDataFields *pMetaData);
+
+
+/**
+ ************************************************************************
+ * @brief    The function allocates memory for pMetaData and copies its
+ *            pAssetFields fields
+ *
+ * @note    The application which calls M4MDP_parse3GppAssetField MUST free pMetaData.
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pAssetFields    (IN)    Asset fields structure filled by the 3gpp reader
+ * @param    pMetaData        (OUT)    Metadata structure to be filled in
+ *
+ * @return    M4NO_ERROR                        No error
+ * @return    M4ERR_PARAMETER                    pContext or pAssetFields is NULL. (in DEBUG only)
+ * @return    M4ERR_ALLOC                        There is no more memory available
+ * @return    M4ERR_UNSUPPORTED_MEDIA_TYPE    The input file is not recognized
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_parse3GppAssetField(M4_MetaDataFields* pAssetFields, M4_MetaDataFields *pMetaData);
+
+
+/**
+ ************************************************************************
+ * @brief    The function allocates memory for pMetaData and copies its
+ *            pExifFields fields
+ *
+ * @note    The application which calls M4MDP_parseExifField MUST free pMetaData.
+ *
+ * @note    This function is synchronous.
+ *
+ * @param    pExifFields    (IN)    Exif fields structure filled by the exif reader
+ * @param    pMetaData    (OUT)    Metadata structure to be filled in
+ *
+ * @return    M4NO_ERROR                        No error
+ * @return    M4ERR_PARAMETER                    pContext or pAssetFields is NULL. (in DEBUG only)
+ * @return    M4ERR_ALLOC                        There is no more memory available
+ * @return    M4ERR_UNSUPPORTED_MEDIA_TYPE    The input file is not recognized
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_parseExifField(M4_MetaDataFields *pExifFields, M4_MetaDataFields *pMetaData);
+
+
+/**
+ ************************************************************************
+ * @brief    The function allocates and fills the pMetaDataStruct by parsing
+ *            a buffer
+ *
+ * @note    pMetaDataStruct owns the application (caller).
+ *            It is the responsibility of the application (caller) to free it
+ *
+ * @note    This function is synchronous.
+ *
+ * @param        pBuffer            (IN)    input buffer
+ * @param        mediaType        (IN)    media type of the buffer
+ * @param        pMetaDataStruct    (OUT)    Pointer to an array of metadata
+ * @param        pSize            (OUT)    pMetaDataStruct size
+ *
+ * @return    M4NO_ERROR                    No error
+ * @return    M4ERR_PARAMETER                pContext or pBuffer or pMetaDataStruct is NULL.
+ *                                          (in DEBUG only)
+ * @return    M4ERR_ALLOC                    There is no more memory available
+ * @return    M4ERR_UNSUPPORTED_MEDIA_TYPE The media type is not supported
+ * @return    M4WAR_READER_NO_METADATA    No metadata detected
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_getMetaDataFromBuffer(M4_MetadataBuffer*    pBuffer,
+                                      M4READER_MediaType    mediaType,
+                                      M4_MetaDataFields**    pMetaDataStruct,
+                                      M4OSA_UInt32*            pSize);
+
+/**
+ ************************************************************************
+ * @brief    The function initializes the metadata  structure
+ *
+ * @param    pMetadata        (OUT) Pointer to M4_MetaDataFields structure
+ *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pContext or pMetadata is NULL. (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_InitMetaDataFields(M4_MetaDataFields *pMetaDataTab);
+
+/**
+ ************************************************************************
+ * @brief    The function frees the metadata  structure
+ *
+ * @param    pMetadata        (IN) Pointer to M4_MetaDataFields structure
+ *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pContext or pMetadata is NULL. (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_FreeMetaDataFields(M4_MetaDataFields *pMetaDataTab);
+
+/******************************************************************************
+* @brief        returns mediaType found in a file
+* @note
+* @param        pContext (IN) : pointer to file descriptor
+* @param        pFileDescriptor (IN) : pointer to file descriptor
+* @param        pFileFunction (IN)   : pointer to file function
+* @param        pMediaType (OUT)     : mediaType if found
+* @return       M4NO_ERROR / M4ERR_ALLOC
+******************************************************************************/
+M4OSA_ERR M4MDP_getMediaTypeFromFileExtended(    M4MDP_Context pContext,
+                                                M4OSA_Void *pFileDescriptor,
+                                                M4OSA_FileReadPointer *pFileFunction,
+                                                M4READER_MediaType *pMediaType);
+
+/**
+ ************************************************************************
+ * @brief    The function to get file size
+ *
+ * @param    pContext        (IN) Pointer to M4MDP Context structure
+ * @param    pSize            (OUT)Pointer to file size
+ *
+ * @return    M4NO_ERROR            No error
+ * @return    M4ERR_PARAMETER        pContext or pMetadata is NULL. (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4MDP_getMetaDataFileSize(M4MDP_Context pContext, M4OSA_UInt32 *pSize);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4MDP_API_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4OSA_CoreID.h b/libvideoeditor/vss/common/inc/M4OSA_CoreID.h
new file mode 100755
index 0000000..425c8ec
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4OSA_CoreID.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4OSA_CoreID.h
+ * @brief  defines the uniques component identifiers used for memory management
+ *         and optionID mechanism
+ * @note
+ *
+ ************************************************************************
+*/
+#ifndef __M4OSA_COREID_H__
+#define __M4OSA_COREID_H__
+
+/* CoreId are defined on 14 bits */
+/* we start from 0x0100, lower values are reserved for osal core components */
+
+/* reader shells*/
+#define M4READER_COMMON     0x0100
+#define M4READER_AVI        0x0101
+#define M4READER_AMR        0x0102
+#define M4READER_3GP        0x0103
+#define M4READER_NET        0x0104
+#define M4READER_3GP_HTTP   0x0105
+#define M4READER_MP3        0x0106
+#define M4READER_WAV        0x0107
+#define M4READER_MIDI       0x0108
+#define M4READER_ASF        0x0109
+#define M4READER_REAL        0x010A
+#define M4READER_AAC        0x010B
+#define M4READER_FLEX        0x010C
+#define M4READER_BBA        0x010D
+#define M4READER_SYNTHESIS_AUDIO    0x010E
+#define M4READER_JPEG        0x010F
+
+
+/* writer shells*/
+#define M4WRITER_COMMON     0x0110
+#define M4WRITER_AVI        0x0111
+#define M4WRITER_AMR        0x0112
+#define M4WRITER_3GP        0x0113
+#define M4WRITER_JPEG        0x0116
+#define M4WRITER_MP3        0x0117
+
+/* decoder shells */
+#define M4DECODER_COMMON    0x0120
+#define M4DECODER_JPEG      0x0121
+#define M4DECODER_MPEG4     0x0122
+#define M4DECODER_AUDIO     0x0123
+#define M4DECODER_AVC       0x0124
+#define M4DECODER_MIDI      0x0125
+#define M4DECODER_WMA        0x0126
+#define M4DECODER_WMV        0x0127
+#define M4DECODER_RMV        0x0128
+#define M4DECODER_RMA        0x0129
+#define M4DECODER_AAC       0x012A
+#define M4DECODER_BEATBREW  0x012B
+#define M4DECODER_EXTERNAL  0x012C
+
+/* encoder shells */
+#define M4ENCODER_COMMON    0x0130
+#define M4ENCODER_JPEG      0x0131
+#define M4ENCODER_MPEG4     0x0132
+#define M4ENCODER_AUDIO     0x0133
+#define M4ENCODER_VID_NULL  0x0134
+#define M4ENCODER_MJPEG        0x0135
+#define M4ENCODER_MP3        0x0136
+#define M4ENCODER_H264        0x0137
+#define M4ENCODER_AAC        0x0138
+#define M4ENCODER_AMRNB        0x0139
+#define M4ENCODER_AUD_NULL  0x013A
+#define M4ENCODER_EXTERNAL  0x013B
+
+/* cores */
+#define M4JPG_DECODER       0x0140
+#define M4JPG_ENCODER       0x0141
+
+#define M4MP4_DECODER       0x0142
+#define M4MP4_ENCODER       0x0143
+
+#define M4AVI_COMMON        0x0144
+#define M4AVI_READER        0x0145
+#define M4AVI_WRITER        0x0146
+
+#define M4HTTP_ENGINE       0x0147
+
+#define M4OSA_TMPFILE       0x0148
+#define M4TOOL_TIMER        0x0149
+
+#define M4AMR_READER        0x014A
+
+#define M4MP3_READER        0x014B
+
+#define M4WAV_READER        0x014C
+#define M4WAV_WRITER        0x014D
+#define M4WAV_COMMON        0x014E
+
+#define M4ADTS_READER        0x014F
+#define M4ADIF_READER        0x016A
+
+#define M4SPS               0x0150
+#define M4EXIF_DECODER      0x0151
+#define M4EXIF_ENCODER      0x0152
+#define M4GIF_DECODER       0x0153
+#define M4GIF_ENCODER       0x0154
+#define M4PNG_DECODER       0x0155
+#define M4PNG_ENCODER       0x0156
+#define M4WBMP_DECODER      0x0157
+#define M4WBMP_ENCODER      0x0158
+
+#define M4AMR_WRITER        0x0159    /**< no room to put it along M4AMR_READER */
+
+
+#define M4AVC_DECODER       0x015A
+#define M4AVC_ENCODER       0x015B
+
+#define M4ASF_READER        0x015C
+#define M4WMDRM_AGENT        0x015D
+#define M4MIDI_READER        0x0162    /**< no room before the presenters */
+#define M4RM_READER         0x163
+#define M4RMV_DECODER        0x164
+#define M4RMA_DECODER        0x165
+
+#define M4TOOL_XML            0x0166
+#define M4TOOL_EFR            0x0167    /**< Decryption module for Video Artist */
+#define M4IAL_FTN            0x0168    /* FTN implementation of the IAL */
+#define M4FTN                0x0169    /* FTN library */
+
+/* presenter */
+#define M4PRESENTER_AUDIO   0x0160
+#define M4PRESENTER_VIDEO   0x0161
+
+/* high level interfaces (vps, etc..)*/
+#define M4VPS               0x0170
+#define M4VTS               0x0171
+#define M4VXS               0x0172
+#define M4CALLBACK          0x0173
+#define M4VES               0x0174
+#define M4PREPROCESS_VIDEO  0x0175
+#define M4GRAB_AUDIO        0x0176
+#define M4GRAB_VIDEO        0x0177
+#define M4VSSAVI            0x0178
+#define M4VSS3GPP           0x0179
+#define M4PTO3GPP           0x017A
+#define M4PVX_PARSER        0x017B
+#define M4VCS                0x017C
+#define M4MCS                0x017D
+#define M4MNMC                0x0180    /**< mnm controller */
+#define M4TTEXT_PARSER      0x0181    /**< timed text */
+#define M4MM                0x0182    /**< Music manager */
+#define M4MDP                0x0183    /**< Metadata parser */
+#define M4MMSQLCORE            0x0184
+#define M4VPSIL                0x0185
+#define M4FILEIL            0x0186 /* IL file Interface */
+#define M4MU                0x0187
+#define M4VEE                0x0188  /**< Video effect engine */
+#define M4VA                0x0189 /* VideoArtist */
+#define M4JTS                0x018A
+#define M4JTSIL                0x018B
+#define M4AIR                0x018C  /**< AIR */
+#define M4SPE                0x018D  /**< Still picture editor */
+#define M4VS                0x018E    /**< Video Studio (xVSS) */
+#define M4VESIL                0x018F    /**< VES il */
+#define M4ID3                0x0190    /**< ID3 Tag Module */
+#define M4SC                0x0191    /**< Media Scanner */
+#define M4TG                0x0192  /**< Thumbnail Generator*/
+#define M4TS                0x0193    /**< Thumbnail storage */
+#define M4MB                0x0194    /**< Media browser */
+
+/* high level application (test or client app) */
+#define M4APPLI             0x0200
+#define M4VA_APPLI            0x0201    /**< Video Artist test application */
+
+/* external components (HW video codecs, etc.) */
+#define M4VD_EXTERNAL        0x0300
+#define M4VE_EXTERNAL        0x0301
+
+
+/* priority to combine with module ids */
+#define M4HIGH_PRIORITY     0xC000
+#define M4MEDIUM_PRIORITY   0x8000
+#define M4LOW_PRIORITY      0x4000
+#define M4DEFAULT_PRIORITY  0x0000
+
+
+#endif /*__M4OSA_COREID_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h b/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h
new file mode 100755
index 0000000..78fe910
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4PCMR_CoreReader.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file    M4WAV_WavReader.h
+ * @brief   WAV Reader declarations
+ * @note    This file implements functions of the WAV reader
+ ************************************************************************
+*/
+
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_FileReader.h"
+#include "M4SYS_AccessUnit.h"
+#include "M4TOOL_VersionInfo.h"
+
+
+#define M4PCMC_ERR_PCM_NOT_COMPLIANT    M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000001)
+#define M4PCMC_ERR_PCM_NO_SPACE_AVAIL   M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000002)
+#define M4PCMC_ERR_PCM_NOT_SUPPORTED    M4OSA_ERR_CREATE(M4_ERR, M4WAV_COMMON,0x000003)
+
+#define M4PCMC_WAR_END_OF_STREAM        M4OSA_ERR_CREATE(M4_WAR, M4WAV_COMMON ,0x000001)
+
+/**
+ ************************************************************************
+ * structure    M4WAVC_DecoderSpecificInfo
+ * @brief       This structure defines the decoder Specific informations
+ * @note        This structure is used by the WAV reader to store all
+ *              decoder specific informations:
+ *              - Sample Frequency
+ *              - Average Bytes per second
+ *              - Number of channels (1 or 2)
+ *              - Number of bits per sample (8 or 16)
+ ************************************************************************
+*/
+typedef struct {
+    M4OSA_UInt32    SampleFrequency;
+    M4OSA_UInt32    AvgBytesPerSec;
+    M4OSA_UInt32    DataLength;
+    M4OSA_UInt16    nbChannels;
+    M4OSA_UInt16    BitsPerSample;
+} M4PCMC_DecoderSpecificInfo;
+
+/**
+ ************************************************************************
+ * enum     M4WAVR_State
+ * @brief   This enum defines the WAV Reader States
+ * @note    The state automaton is documented separately
+ *          consult the design specification for details
+ ************************************************************************
+*/
+typedef enum {
+    M4PCMR_kInit    = 0x0000,
+    M4PCMR_kOpening = 0x0100,
+    M4PCMR_kOpening_streamRetrieved = 0x0101,
+    M4PCMR_kReading = 0x0200,
+    M4PCMR_kReading_nextAU  = 0x0201,
+    M4PCMR_kClosed  = 0x0300
+} M4PCMR_State;
+
+/**
+ ************************************************************************
+ * enum     M4WAVR_OptionID
+ * @brief   This enum defines the WAV Reader options
+ * @note    Only one option is available:
+ *          - M4WAVR_kPCMblockSize: sets the size of the PCM block to read
+ *            from WAV file
+ ************************************************************************
+*/
+typedef enum {
+    M4PCMR_kPCMblockSize    = M4OSA_OPTION_ID_CREATE(M4_READ, M4WAV_READER, 0x01)
+} M4PCMR_OptionID;
+
+/**
+ ************************************************************************
+ * structure    M4WAVR_Context
+ * @brief       This structure defines the WAV Reader context
+ * @note        This structure is used for all WAV Reader calls to store
+ *              the context
+ ************************************************************************
+*/
+typedef struct {
+    M4OSA_MemAddr32             m_pDecoderSpecInfo;/**< Pointer to the decoder specific info
+                                                        structure contained in pStreamDesc
+                                                        (only used to free...) */
+    M4OSA_FileReadPointer*      m_pFileReadFunc;/**< The OSAL set of pointer to function for
+                                                         file management */
+    M4OSA_Context               m_fileContext;  /**< The context needed by OSAL to manage File */
+    M4PCMC_DecoderSpecificInfo  m_decoderConfig;/**< Specific configuration for decoder */
+    M4PCMR_State                m_state;        /**< state of the wav reader */
+    M4PCMR_State                m_microState;   /**< state of the read wav stream */
+    M4OSA_UInt32                m_blockSize;    /**< Size of the read block */
+    M4OSA_UInt32                m_offset;       /**< Offset of the PCM read (i.e m_offset of the
+                                                        file without wav header) */
+    M4OSA_MemAddr32             m_pAuBuffer;    /**< Re-used buffer for AU content storage */
+    M4OSA_FilePosition          m_dataStartOffset;/**< offset of the pcm data beginning into
+                                                         the file */
+} M4PCMR_Context;
+
+/*************************************************************************
+ *
+ *  Prototypes of all WAV reader functions
+ *
+ ************************************************************************/
+M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+                             M4OSA_FileReadPointer* pFileFunction);
+M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc);
+M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs);
+M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU);
+M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU);
+M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+                         M4SYS_SeekAccessMode seekAccessMode, M4OSA_Time* pObtainCTS);
+M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context);
+M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+                              M4OSA_DataOption* pValue);
+M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+                              M4OSA_DataOption Value);
+M4OSA_ERR M4PCMR_getVersion(M4_VersionInfo *pVersion);
diff --git a/libvideoeditor/vss/common/inc/M4READER_3gpCom.h b/libvideoeditor/vss/common/inc/M4READER_3gpCom.h
new file mode 100755
index 0000000..ab07d50
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_3gpCom.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file    M4READER_3gpCom.h
+ * @brief    Generic encapsulation of the core 3gp reader
+ * @note    This file declares the generic shell interface retrieving function
+ *            of the 3GP reader
+ ************************************************************************
+*/
+
+#ifndef __M4READER_3GPCOM_H__
+#define __M4READER_3GPCOM_H__
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4READER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Error: Function M4READER_Com3GP_getNextStreamHandler must be called before.
+ */
+#define M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET        M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000001)
+
+/**
+ * Error: No video stream H263 in file.
+ */
+#define M4ERR_VIDEO_NOT_H263                    M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000002)
+
+/**
+ * There has been a problem with the decoder configuration information, seems to be invalid */
+#define M4ERR_READER3GP_DECODER_CONFIG_ERROR    M4OSA_ERR_CREATE(M4_ERR, M4READER_3GP, 0x000003)
+
+#define M4READER_COM3GP_MAXVIDEOSTREAM  5
+#define M4READER_COM3GP_MAXAUDIOSTREAM  5
+#define M4READER_COM3GP_MAXTEXTSTREAM   5
+
+typedef struct
+{
+    M4OSA_Context                m_pFFContext;    /**< core file format context */
+
+    M4_StreamHandler*            m_AudioStreams[M4READER_COM3GP_MAXAUDIOSTREAM];
+    M4_StreamHandler*            m_pAudioStream;    /**< pointer to the current allocated audio
+                                                            stream handler */
+
+    M4_StreamHandler*            m_VideoStreams[M4READER_COM3GP_MAXVIDEOSTREAM];
+    M4_StreamHandler*            m_pVideoStream;    /**< pointer to the current allocated video
+                                                            stream handler */
+
+#ifdef M4VPS_SUPPORT_TTEXT
+    M4_StreamHandler*            m_TextStreams[M4READER_COM3GP_MAXTEXTSTREAM];
+    M4_StreamHandler*            m_pTextStream;    /**< pointer to the current allocated text
+                                                            stream handler */
+#endif /*M4VPS_SUPPORT_TTEXT*/
+
+} M4READER_Com3GP_Context;
+
+/**
+ ************************************************************************
+ * structure M4READER_3GP_Buffer (but nothing specific to 3GP, nor to a reader !)
+ * @brief     This structure defines a buffer that can be used to exchange data (should be in OSAL)
+ ************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32    size;            /**< the size in bytes of the buffer */
+    M4OSA_MemAddr8    dataAddress;    /**< the pointer to the buffer */
+} M4READER_3GP_Buffer;
+
+/**
+ ************************************************************************
+ * enum     M4READER_3GP_OptionID
+ * @brief    This enum defines the reader options specific to the 3GP format.
+ * @note    These options can be read from or written to a 3GP reader via M4READER_3GP_getOption.
+ ************************************************************************
+*/
+typedef enum
+{
+    /**
+     * Get the DecoderConfigInfo for H263,
+     * option value must be a pointer to M4READER_3GP_H263Properties allocated by caller */
+    M4READER_3GP_kOptionID_H263Properties = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x01),
+
+    /**
+     * Get the Purple Labs drm information */
+    M4READER_3GP_kOptionID_PurpleLabsDrm = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x02),
+
+    /**
+     * Set the Fast open mode (Only the first AU of each stream will be parsed -> less CPU,
+                                 less RAM). */
+    M4READER_3GP_kOptionID_FastOpenMode = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x03),
+
+    /**
+     * Set the Audio only mode (the video stream won't be opened) */
+    M4READER_3GP_kOptionID_AudioOnly = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x04),
+
+    /**
+     * Set the Video only mode (the audio stream won't be opened) */
+    M4READER_3GP_kOptionID_VideoOnly = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_3GP, 0x05),
+
+    /**
+     * Get the next video CTS */
+    M4READER_3GP_kOptionID_getNextVideoCTS = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_3GP, 0x06)
+
+} M4READER_3GP_OptionID;
+
+
+/**
+ ************************************************************************
+ * struct    M4READER_3GP_H263Properties
+ * @brief    Contains info about H263 stream read from the 3GP file.
+ ************************************************************************
+*/
+typedef struct
+{
+    /**< the profile as defined in the Visual Object Sequence header, if present */
+    M4OSA_UInt8        uiProfile;
+    /**< the level as defined in the Visual Object Sequence header, if present */
+    M4OSA_UInt8        uiLevel;
+
+} M4READER_3GP_H263Properties;
+
+/**
+ ************************************************************************
+ * @brief    Get the next stream found in the 3gp file
+ * @note
+ * @param    pContext:        (IN)    Context of the reader
+ * @param    pMediaFamily:    (OUT)    Pointer to a user allocated M4READER_MediaFamily that will
+ *                                      be filled with the media family of the found stream
+ * @param    pStreamHandler:    (OUT)    Pointer to a stream handler that will be allocated and
+ *                                          filled with the found stream description
+ * @return    M4NO_ERROR                 There is no error
+ * @return    M4ERR_PARAMETER            At least one parameter is not properly set
+ * @return    M4WAR_NO_MORE_STREAM    No more available stream in the media (all streams found)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_Com3GP_getNextStreamHandler(M4OSA_Context context,
+                                                 M4READER_MediaFamily *pMediaFamily,
+                                                 M4_StreamHandler **pStreamHandler);
+
+/**
+ ************************************************************************
+ * @brief    Prepare the  access unit (AU)
+ * @note    An AU is the smallest possible amount of data to be decoded by a decoder.
+ * @param    pContext:        (IN)        Context of the reader
+ * @param    pStreamHandler    (IN)        The stream handler of the stream to make jump
+ * @param    pAccessUnit        (IN/OUT)    Pointer to an access unit to fill with read data
+ *                                          (the au structure is allocated by the user, and must
+ *                                          be initialized by calling M4READER_fillAuStruct_fct
+ *                                          after creation)
+ * @return    M4NO_ERROR                     There is no error
+ * @return    M4ERR_PARAMETER                At least one parameter is not properly set
+ * @returns    M4ERR_ALLOC                    Memory allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_Com3GP_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                         M4_AccessUnit *pAccessUnit);
+
+/**
+ ************************************************************************
+ * @brief    Cleans up the stream handler
+ * @param    pContext: (IN/OUT) Context of the reader shell
+ * @param    pStreamHandler: (IN/OUT) Stream handler
+ * @return    M4ERR_PARAMETER:    The context is null
+ * @return    M4NO_ERROR:            No error
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_Com3GP_cleanUpHandler(M4_StreamHandler* pStreamHandler);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4READER_3GPCOM_H__ */
+
diff --git a/libvideoeditor/vss/common/inc/M4READER_Amr.h b/libvideoeditor/vss/common/inc/M4READER_Amr.h
new file mode 100755
index 0000000..630f657
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_Amr.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ************************************************************************
+ * @file   M4READER_Amr.h
+ * @brief  Generic encapsulation of the core amr reader
+ * @note   This file declares the generic shell interface retrieving function
+ *         of the AMR reader
+ ************************************************************************
+*/
+#ifndef __M4READER_AMR_H__
+#define __M4READER_AMR_H__
+
+#include "M4READER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+*************************************************************************
+* @brief Retrieves the generic interfaces implemented by the reader
+*
+* @param pMediaType             : Pointer on a M4READER_MediaType (allocated by the caller)
+*                              that will be filled with the media type supported by this reader
+* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
+*                              implemented by this reader. The interface is a structure allocated
+*                              by the function and must be un-allocated by the caller.
+* @param pRdrDataInterface   : Address of a pointer that will be set to the data interface
+*                              implemented by this reader. The interface is a structure allocated
+*                              by the function and must be un-allocated by the caller.
+*
+* @returns : M4NO_ERROR     if OK
+*             ERR_ALLOC      if an allocation failed
+*            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
+*************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+                                      M4READER_GlobalInterface **pRdrGlobalInterface,
+                                      M4READER_DataInterface **pRdrDataInterface);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4READER_AMR_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4READER_Common.h b/libvideoeditor/vss/common/inc/M4READER_Common.h
new file mode 100755
index 0000000..cf310a5
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_Common.h
@@ -0,0 +1,718 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4READER_Common.h
+ * @brief  Shell Reader common interface declaration
+ * @note   This file declares the common interfaces that reader shells must implement
+ *
+ ************************************************************************
+*/
+#ifndef __M4READER_COMMON_H__
+#define __M4READER_COMMON_H__
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_CoreID.h"
+#include "M4DA_Types.h"
+#include "M4Common_types.h"
+
+/* ERRORS */
+#define M4ERR_READER_UNKNOWN_STREAM_TYPE        M4OSA_ERR_CREATE(M4_ERR, M4READER_COMMON, 0x0001)
+
+/* WARNINGS */
+#define M4WAR_READER_NO_METADATA                M4OSA_ERR_CREATE(M4_WAR, M4READER_COMMON, 0x0001)
+#define M4WAR_READER_INFORMATION_NOT_PRESENT    M4OSA_ERR_CREATE(M4_WAR, M4READER_COMMON, 0x0002)
+
+
+/**
+ ************************************************************************
+ * enum        M4READER_MediaType
+ * @brief    This enum defines the Media types used to create media readers
+ * @note    This enum is used internally by the VPS to identify a currently supported
+ *          media reader interface. Each reader is registered with one of this type associated.
+ *          When a reader instance is needed, this type is used to identify and
+ *          and retrieve its interface.
+ ************************************************************************
+*/
+typedef enum
+{
+    M4READER_kMediaTypeUnknown        = -1,    /**< Unknown media type */
+    M4READER_kMediaType3GPP            = 0,    /**< 3GPP file media type */
+    M4READER_kMediaTypeAVI            = 1,    /**< AVI file media type */
+    M4READER_kMediaTypeAMR            = 2,    /**< AMR file media type */
+    M4READER_kMediaTypeMP3            = 3,    /**< MP3 file media type */
+    M4READER_kMediaTypeRTSP            = 4,    /**< RTSP network accessed media type */
+    M4READER_kMediaType3GPPHTTP        = 5,    /**< Progressively downloaded 3GPP file media type */
+    M4READER_kMediaTypePVHTTP        = 6,    /**< Packet Video HTTP proprietary type */
+    M4READER_kMediaTypeWAV            = 7,    /**< WAV file media type */
+    M4READER_kMediaType3GPEXTHTTP    = 8,    /**< An external progressively downloaded 3GPP file
+                                                     media type */
+    M4READER_kMediaTypeAAC            = 9,    /**< ADTS and ADIF AAC support */
+    M4READER_kMediaTypeREAL            = 10,    /**< REAL Media type */
+    M4READER_kMediaTypeASF            = 11,    /**< ASF Media type */
+    M4READER_kMediaTypeFLEXTIME        = 12,    /**< FlexTime Media type */
+    M4READER_kMediaTypeBBA            = 13,    /**< Beatbrew audio Media type */
+    M4READER_kMediaTypeSYNTHAUDIO    = 14,    /**< Synthesis audio Media type */
+    M4READER_kMediaTypePCM            = 15,    /**< PCM Media type */
+    M4READER_kMediaTypeJPEG            = 16,    /**< JPEG Media type */
+    M4READER_kMediaTypeGIF            = 17,    /**< GIF Media type */
+    M4READER_kMediaTypeADIF            = 18,    /**< AAC-ADTS Media type */
+    M4READER_kMediaTypeADTS            = 19,    /**< AAC-ADTS Media type */
+
+    M4READER_kMediaType_NB  /* number of readers, keep it as last enum entry */
+
+} M4READER_MediaType;
+
+/**
+ ************************************************************************
+ * enum        M4READER_MediaFamily
+ * @brief    This enum defines the Media family of a stream
+ * @note    This enum is used internally by the VPS to identify what kind of stream
+ *          has been retrieved via getNextStream() function.
+ ************************************************************************
+*/
+typedef enum
+{
+    M4READER_kMediaFamilyUnknown   = -1,
+    M4READER_kMediaFamilyVideo     = 0,
+    M4READER_kMediaFamilyAudio     = 1,
+    M4READER_kMediaFamilyText      = 2
+} M4READER_MediaFamily;
+
+
+
+/**
+ ************************************************************************
+ * enum        M4READER_OptionID
+ * @brief    This enum defines the reader options
+ * @note    These options can be read from a reader via M4READER_getOption_fct
+ ************************************************************************
+*/
+typedef enum
+{
+    /**
+    Get the duration of the movie (in ms)
+    */
+    M4READER_kOptionID_Duration = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0),
+
+    /**
+    Get the version of the core reader
+    */
+    M4READER_kOptionID_Version  = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 1),
+
+    /**
+    Get the copyright from the media (if present)
+    (currently implemented for 3GPP only: copyright get from the cprt atom in the udta if present)
+    */
+    M4READER_kOptionID_Copyright= M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 2),
+
+
+    /**
+    Set the OSAL file reader functions to the reader (type of value: M4OSA_FileReadPointer*)
+    */
+    M4READER_kOptionID_SetOsaFileReaderFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ,\
+                                                     M4READER_COMMON, 3),
+
+    /**
+    Set the OSAL file writer functions to the reader (type of value: M4OSA_FileWriterPointer*)
+    */
+    M4READER_kOptionID_SetOsaFileWriterFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ,\
+                                                     M4READER_COMMON, 4),
+
+    /**
+    Set the OSAL file writer functions to the reader (type of value: M4OSA_NetFunction*)
+    */
+    M4READER_kOptionID_SetOsaNetFctsPtr = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 5),
+
+    /**
+    Creation time in sec. since midnight, Jan. 1, 1970 (type of value: M4OSA_UInt32*)
+    (available only for 3GPP content, including PGD)
+    */
+    M4READER_kOptionID_CreationTime = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 6),
+
+    /**
+    Bitrate in bps (type of value: M4OSA_Double*)
+    */
+    M4READER_kOptionID_Bitrate = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 7),
+
+    /**
+    Tag ID3v1 of MP3 source (type of value: M4MP3R_ID3Tag*)
+    */
+    M4READER_kOptionID_Mp3Id3v1Tag = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 8),
+
+    /**
+    Tag ID3v2 of MP3 source (type of value: M4MP3R_ID3Tag*)
+    */
+    M4READER_kOptionID_Mp3Id3v2Tag = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 9),
+
+    /**
+    Number of Access Unit in the Audio stream (type of value: M4OSA_UInt32*)
+    */
+    M4READER_kOptionID_GetNumberOfAudioAu = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xA),
+
+    /**
+    Number of frames per bloc
+    */
+    M4READER_kOptionID_GetNbframePerBloc    = M4OSA_OPTION_ID_CREATE(M4_READ,\
+                                                             M4READER_COMMON, 0xB),
+
+    /**
+    Flag for protection presence
+    */
+    M4READER_kOptionID_GetProtectPresence    = M4OSA_OPTION_ID_CREATE(M4_READ,\
+                                                             M4READER_COMMON, 0xC),
+
+    /**
+    Set DRM Context
+    */
+    M4READER_kOptionID_SetDRMContext    = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xD),
+
+    /**
+    Get ASF Content Description Object
+    */
+    M4READER_kOptionID_ContentDescription = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0xE),
+
+    /**
+    Get ASF Content Description Object
+    */
+    M4READER_kOptionID_ExtendedContentDescription = M4OSA_OPTION_ID_CREATE(M4_READ,\
+                                                             M4READER_COMMON, 0xF),
+
+    /**
+    Get Asset 3gpp Fields
+    */
+    M4READER_kOptionID_3gpAssetFields = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x10),
+
+    /**
+    Set the max metadata size supported in the reader
+    Only relevant in 3gp parser till now, but can be used for other readers
+    */
+    M4READER_kOptionID_MaxMetadataSize = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_COMMON, 0x11),
+
+    M4READER_kOptionID_GetMetadata = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x12),
+    /**
+    Get 3gpp 'ftyp' atom
+    */
+    M4READER_kOptionID_3gpFtypBox  = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x13),
+
+
+    /* value is M4OSA_Bool* */
+    /* return the drm protection status of the file*/
+    M4READER_kOptionID_isProtected = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x14),
+
+    /* value is a void* */
+    /* return the aggregate rights of the file*/
+    /* The buffer must be allocated by the application and must be big enough*/
+    /* By default, the size for WMDRM is 76 bytes */
+    M4READER_kOptionID_getAggregateRights = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x15),
+    /**
+    Get ASF Content Description Object
+    */
+    M4READER_kOptionID_ExtendedContentEncryption = M4OSA_OPTION_ID_CREATE(M4_READ,\
+                                                         M4READER_COMMON, 0x16),
+
+    /**
+    Number of Access Unit in the Video stream (type of value: M4OSA_UInt32*)
+    */
+    M4READER_kOptionID_GetNumberOfVideoAu = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x17),
+
+    /**
+    Chunk mode activation  size in case of JPG reader */
+    M4READER_kOptionID_JpegChunckSize = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x18),
+
+    /**
+    Check if ASF file contains video */
+    M4READER_kOptionID_hasVideo = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x19),
+
+    /**
+     Set specific read mode for Random Access JPEG */
+    M4READER_kOptionID_JpegRAMode = M4OSA_OPTION_ID_CREATE(M4_WRITE, M4READER_COMMON, 0x20),
+
+    /**
+    Get Thumbnail buffer in case of JPG reader */
+    M4READER_kOptionID_JpegThumbnail = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x21),
+
+    /**
+    Get FPDATA buffer in case of JPG reader */
+    M4READER_kOptionID_JpegFPData = M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x22),
+
+    /**
+    Get JPEG info (progressive, subsampling) */
+    M4READER_kOptionID_JpegInfo= M4OSA_OPTION_ID_CREATE(M4_READ, M4READER_COMMON, 0x23)
+
+
+/*****************************************/
+} M4READER_OptionID;
+/*****************************************/
+
+/**
+ ************************************************************************
+ * structure    M4READER_CopyRight
+ * @brief        This structure defines a copyRight description
+ * @note        This structure is used to retrieve the copyRight of the media
+ *              (if present) via the getOption() function
+ ************************************************************************
+*/
+typedef struct _M4READER_CopyRight
+{
+    /**
+    Pointer to copyright data (allocated by user)
+    */
+    M4OSA_UInt8*   m_pCopyRight;
+
+    /**
+    Pointer to copyright size. The pCopyRightSize must
+    be Initialized with the size available in the pCopyRight buffer
+    */
+    M4OSA_UInt32   m_uiCopyRightSize;
+
+} M4READER_CopyRight;
+
+
+
+/**
+ ************************************************************************
+ * structure    M4READER_StreamDataOption
+ * @brief        This structure defines a generic stream data option
+ * @note        It is used is used to set or get a stream specific data defined
+ *              by a relevant reader option ID.
+ ************************************************************************
+*/
+typedef struct _M4READER_StreamDataOption
+{
+    M4_StreamHandler*     m_pStreamHandler; /**< identifier of the stream */
+    M4OSA_Void*           m_pOptionValue;   /**< value of the data option to get or to set */
+
+} M4READER_StreamDataOption;
+
+/**
+ ************************************************************************
+ * enumeration    M4_EncodingFormat
+ * @brief        Text encoding format
+ ************************************************************************
+*/
+// typedef enum
+// {
+//     M4_kEncFormatUnknown    = 0,    /**< Unknown format                                    */
+//     M4_kEncFormatASCII        = 1,  /**< ISO-8859-1. Terminated with $00                   */
+//     M4_kEncFormatUTF8        = 2,   /**< UTF-8 encoded Unicode . Terminated with $00       */
+//     M4_kEncFormatUTF16        = 3   /**< UTF-16 encoded Unicode. Terminated with $00 00    */
+/*}  M4_EncodingFormat;*/
+
+/**
+ ************************************************************************
+ * structure    M4_StringAttributes
+ * @brief        This structure defines string attribute
+ ************************************************************************
+*/
+// typedef struct
+// {
+//     M4OSA_Void*            m_pString;            /**< Pointer to text        */
+//     M4OSA_UInt32        m_uiSize;            /**< Size of text            */
+//     M4_EncodingFormat    m_EncodingFormat;    /**< Text encoding format    */
+// } M4_StringAttributes;
+
+
+/**
+ ************************************************************************
+ * structure    M4READER_Buffer
+ * @brief        This structure defines a buffer in all readers
+ ************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt8*   m_pData;
+    M4OSA_UInt32   m_uiBufferSize;
+} M4READER_Buffer;
+
+typedef struct
+{
+     M4OSA_UInt32            m_uiSessionId;
+    M4OSA_UInt32            m_uiMediaId;
+    M4OSA_UInt32            m_uiNbInstance;
+    M4OSA_Char**            m_pInstance;
+} M4_SdpAssetInstance;
+/*
+typedef enum
+{
+     M4READER_kUnknownFormat    = 0,
+     M4READER_kTagID3V1,
+     M4READER_kTagID3V2,
+    M4READER_kASFContentDesc,
+    M4READER_k3GppAssetBoxFromUDTA,
+    M4READER_k3GppAssetBoxFromSDP,
+    M4READER_kJpegExif
+} M4READER_MetaDataType;*/
+
+
+/**
+ ************************************************************************
+ * structure    M4_3gpAssetFields
+ * @brief        This structure defines fields of a 3gpp asset information
+ ************************************************************************
+*/
+typedef struct
+{
+    M4COMMON_MetaDataFields    m_metadata;
+
+    M4OSA_UInt32            m_uiSessionID;    /* For SDP */
+    M4OSA_UInt32            m_uiMediaID;    /* For SDP */
+
+
+    /* Note: The two following fields were added for internal use
+        (For Music manager project..) !! */
+    M4_StreamType       m_VideoStreamType;    /**< Video stream type */
+    M4_StreamType       m_AudioStreamType;    /**< Audio stream type */
+
+} M4_MetaDataFields;
+
+
+#define M4_METADATA_STR_NB    22 /* one string in album art structure*/
+
+typedef struct
+{
+    M4OSA_UInt32            m_uiNbBuffer;
+    M4_SdpAssetInstance*    m_pAssetInfoInst;    /* Set of 3gpp asset boxes */
+    M4COMMON_MetaDataAlbumArt        m_albumArt;            /* RC: PV specific album art:added
+                                                               here because this type is used by
+                                                               union below in streaming */
+
+} M4READER_netInfos;
+
+
+typedef union
+{
+    M4READER_Buffer        m_pTagID3Buffer[2];        /* Tag ID3 V1, V2 */
+    struct
+    {
+        M4READER_Buffer        m_pAsfDescContent;    /* ASF description content buffer */
+        M4READER_Buffer        m_pAsfExtDescContent; /* ASF extended description content buffer */
+    } m_asf;
+    M4_MetaDataFields    m_pMetadataFields;      /* Already parsed and filled 3gpp asset fields */
+    M4READER_netInfos    m_pAssetInfoInstance;   /* Set of 3gpp asset boxes in the sdp file */
+
+} M4_MetadataBuffer;
+
+
+
+
+/*********** READER GLOBAL Interface ************************************/
+
+/**
+ ************************************************************************
+ * @brief    create an instance of the reader
+ * @note    create the context
+ * @param    pContext:            (OUT)    pointer on a reader context
+ * @return    M4NO_ERROR                     there is no error
+ * @return    M4ERR_PARAMETER                at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                    a memory allocation has failed
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_create_fct)          (M4OSA_Context* pContext);
+
+/**
+ ************************************************************************
+ * @brief    destroy the instance of the reader
+ * @note    after this call the context is invalid
+ * @param    context:            (IN)    Context of the reader
+ * @return    M4NO_ERROR                     there is no error
+ * @return    M4ERR_PARAMETER                at least one parameter is not properly set
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_destroy_fct)         (M4OSA_Context context);
+
+
+/**
+ ************************************************************************
+ * @brief    open the reader and initializes its created instance
+ * @note    this function, for the network reader, sends the DESCRIBE
+ * @param    context:            (IN)    Context of the reader
+ * @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying the media to open
+ * @return    M4NO_ERROR                     there is no error
+ * @return    M4ERR_PARAMETER                the context is NULL
+ * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_open_fct)    (M4OSA_Context context, M4OSA_Void* pFileDescriptor);
+
+
+/**
+ ************************************************************************
+ * @brief    close the reader
+ * @note
+ * @param    context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            the context is NULL
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR   (M4READER_close_fct)    (M4OSA_Context context);
+
+
+
+/**
+ ************************************************************************
+ * @brief    Get the next stream found in the media
+ * @note
+ * @param    context:        (IN)    Context of the reader
+ * @param    pMediaFamily:    (OUT)    pointer to a user allocated M4READER_MediaFamily that will
+ *                                     be filled with the media family of the found stream
+ * @param    pStreamHandler:    (OUT)    pointer to a stream handler that will be allocated and
+ *                                       filled with the found stream description
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4WAR_NO_MORE_STREAM    no more available stream in the media (all streams found)
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_getNextStream_fct)   (M4OSA_Context context,
+                                                     M4READER_MediaFamily *pMediaFamily,
+                                                     M4_StreamHandler **pStreamHandler);
+
+
+/**
+ ************************************************************************
+ * @brief    fill the access unit structure with initialization values
+ * @note
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler:    (IN)     pointer to the stream handler to which the access unit
+ *                                           will be associated
+ * @param    pAccessUnit:    (IN/OUT) pointer to the access unit (allocated by the caller)
+ *                                           to initialize
+ * @return    M4NO_ERROR                  there is no error
+ * @return    M4ERR_BAD_CONTEXT         provided context is not a valid one
+ * @return    M4ERR_PARAMETER             at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                 there is no more memory available
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_fillAuStruct_fct)    (M4OSA_Context context,
+                                                   M4_StreamHandler *pStreamHandler,
+                                                   M4_AccessUnit *pAccessUnit);
+
+/**
+ ************************************************************************
+ * @brief    starts the instance of the reader
+ * @note    only needed for network until now...
+ * @param    context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            the context is NULL
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_start_fct)   (M4OSA_Context context);
+
+/**
+ ************************************************************************
+ * @brief    stop reading
+ * @note    only needed for network until now... (makes a pause)
+ * @param    context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            the context is NULL
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_stop_fct)   (M4OSA_Context context);
+
+
+/**
+ ************************************************************************
+ * @brief    get an option value from the reader
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to retrieve a property value:
+ *          -the duration of the longest stream of the media
+ *          -the version number of the reader
+ *
+ * @param    context:        (IN)    Context of the reader
+ * @param    optionId:        (IN)    indicates the option to get
+ * @param    pValue:            (OUT)    pointer to structure or value (allocated by user)
+ *                                          where option is stored
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_getOption_fct)       (M4OSA_Context context, M4OSA_OptionID optionId,
+                                                     M4OSA_DataOption pValue);
+
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the readder
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to set a property value:
+ *          - nothing for the moment
+ *
+ * @param    context:        (IN)    Context of the reader
+ * @param    optionId:        (IN)    indicates the option to set
+ * @param    pValue:            (IN)    pointer to structure or value (allocated by user) where
+ *                                          option is stored
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
+ ************************************************************************
+*/
+typedef M4OSA_ERR (M4READER_setOption_fct)       (M4OSA_Context context, M4OSA_OptionID optionId,
+                                                     M4OSA_DataOption pValue);
+
+
+/**
+ ************************************************************************
+ * @brief    jump into the stream at the specified time
+ * @note
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler    (IN)     the stream handler of the stream to make jump
+ * @param    pTime            (IN/OUT) IN:  the time to jump to (in ms)
+ *                                     OUT: the time to which the stream really jumped
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                there is no more memory available
+ * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
+ ************************************************************************
+*/
+typedef M4OSA_ERR   (M4READER_jump_fct)     (M4OSA_Context context,
+                                                M4_StreamHandler *pStreamHandler,
+                                                M4OSA_Int32* pTime);
+
+
+/**
+ ************************************************************************
+ * @brief    reset the stream, that is seek it to beginning and make it ready to be read
+ * @note
+ * @param    context:        (IN)    Context of the reader
+ * @param    pStreamHandler    (IN)    The stream handler of the stream to reset
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                there is no more memory available
+ * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
+ ************************************************************************
+*/
+typedef M4OSA_ERR   (M4READER_reset_fct)    (M4OSA_Context context,
+                                                M4_StreamHandler *pStreamHandler);
+
+
+/**
+ ************************************************************************
+ * @brief    get the time of the closest RAP access unit before the given time
+ * @note
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler    (IN)     the stream handler of the stream to search
+ * @param    pTime            (IN/OUT) IN:  the time to search from (in ms)
+ *                                     OUT: the time (cts) of the preceding RAP AU.
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
+ ************************************************************************
+*/
+typedef M4OSA_ERR   (M4READER_getPrevRapTime_fct) (M4OSA_Context context,
+                                                    M4_StreamHandler *pStreamHandler,
+                                                    M4OSA_Int32* pTime);
+
+
+/**
+ ************************************************************************
+ * structure    M4READER_GlobalInterface
+ * @brief        This structure defines the generic media reader GLOBAL interface
+ * @note        This structure stores the pointers to functions concerning
+ *                creation and control of one reader type.
+ *                The reader type is one of the M4READER_MediaType
+ ************************************************************************
+*/
+typedef struct _M4READER_GlobalInterface
+/*****************************************/
+{
+    M4READER_create_fct*            m_pFctCreate;
+    M4READER_destroy_fct*           m_pFctDestroy;
+    M4READER_open_fct*              m_pFctOpen;
+    M4READER_close_fct*             m_pFctClose;
+    M4READER_getOption_fct*         m_pFctGetOption;
+    M4READER_setOption_fct*         m_pFctSetOption;
+    M4READER_getNextStream_fct*     m_pFctGetNextStream;
+    M4READER_fillAuStruct_fct*      m_pFctFillAuStruct;
+    M4READER_start_fct*             m_pFctStart;
+    M4READER_stop_fct*              m_pFctStop;
+    M4READER_jump_fct*              m_pFctJump;
+    M4READER_reset_fct*             m_pFctReset;
+    M4READER_getPrevRapTime_fct*    m_pFctGetPrevRapTime;
+
+} M4READER_GlobalInterface;
+
+
+/************* READER DATA Interface ************************************/
+
+
+
+/**
+ ************************************************************************
+ * @brief    Gets an access unit (AU) from the stream handler source.
+ * @note    An AU is the smallest possible amount of data to be decoded by a decoder (audio/video).
+ *
+ * @param    context:        (IN)        Context of the reader
+ * @param    pStreamHandler    (IN)        The stream handler of the stream to make jump
+ * @param    pAccessUnit        (IN/OUT)   Pointer to an access unit to fill with read data
+ *                                         (the au structure is allocated by the user, and must be
+ *                                         initialized by calling M4READER_fillAuStruct_fct after
+ *                                         creation)
+ * @return    M4NO_ERROR                     there is no error
+ * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
+ * @return    M4ERR_PARAMETER                at least one parameter is not properly set
+ * @returns    M4ERR_ALLOC                    memory allocation failed
+ * @returns    M4ERR_BAD_STREAM_ID            at least one of the stream Id. does not exist.
+ * @returns    M4WAR_NO_DATA_YET            there is no enough data on the stream for new
+ *                                          access unit
+ * @returns    M4WAR_NO_MORE_AU            there are no more access unit in the stream
+ *                                          (end of stream)
+ ************************************************************************
+*/
+typedef M4OSA_ERR   (M4READER_getNextAu_fct)(M4OSA_Context context,
+                                             M4_StreamHandler *pStreamHandler,
+                                             M4_AccessUnit *pAccessUnit);
+
+
+/**
+ ************************************************************************
+ * structure    M4READER_DataInterface
+ * @brief        This structure defines the generic media reader DATA interface
+ * @note        This structure stores the pointers to functions concerning
+ *                data access for one reader type.(those functions are typically called from
+ *                a decoder) The reader type is one of the M4READER_MediaType
+ ************************************************************************
+*/
+typedef struct _M4READER_DataInterface
+{
+    M4READER_getNextAu_fct*   m_pFctGetNextAu;
+
+    /**
+    stores the context created by the M4READER_create_fct() function
+    so it is accessible without  decoder
+    */
+    M4OSA_Context m_readerContext;
+/*****************************************/
+} M4READER_DataInterface;
+/*****************************************/
+
+
+#endif /*__M4READER_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4READER_Pcm.h b/libvideoeditor/vss/common/inc/M4READER_Pcm.h
new file mode 100755
index 0000000..a600b34
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4READER_Pcm.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file    M4READER_Pcm.h
+ * @brief    Generic encapsulation of the core wav reader
+ * @note    This file declares the generic shell interface retrieving function
+ *            of the wav reader
+*************************************************************************
+*/
+#ifndef __M4READER_PCM_H__
+#define __M4READER_PCM_H__
+
+#include "M4READER_Common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+*************************************************************************
+* @brief Retrieves the generic interfaces implemented by the reader
+*
+* @param pMediaType             : Pointer on a M4READER_MediaType (allocated by the caller)
+*                              that will be filled with the media type supported by this reader
+* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
+*                              implemented by this reader. The interface is a structure allocated
+*                              by the function and must be un-allocated by the caller.
+* @param pRdrDataInterface   : Address of a pointer that will be set to the data interface
+*                              implemented by this reader. The interface is a structure allocated
+*                              by the function and must be un-allocated by the caller.
+*
+* @returns : M4NO_ERROR     if OK
+*             ERR_ALLOC      if an allocation failed
+*            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
+*************************************************************************
+*/
+M4OSA_ERR M4READER_PCM_getInterfaces(M4READER_MediaType *pMediaType,
+                                        M4READER_GlobalInterface **pRdrGlobalInterface,
+                                        M4READER_DataInterface **pRdrDataInterface);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4READER_PCM_H__*/
diff --git a/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h b/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h
new file mode 100755
index 0000000..08f8002
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4SYS_AccessUnit.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file         M4SYS_AccessUnit.h
+ * @brief        Access unit manipulation
+ * @note         This file defines the access unit structure,
+ *               and declares functions to manipulate it.
+ ************************************************************************
+*/
+
+#ifndef M4SYS_ACCESSUNIT_H
+#define M4SYS_ACCESSUNIT_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Time.h"
+#include "M4SYS_Stream.h"
+
+/** The attribute of a fragment*/
+typedef enum {
+  M4SYS_kFragAttrOk        = 01, /**< The fragment is correct, there is no error
+                                         (size cannot be 0)*/
+  M4SYS_kFragAttrCorrupted = 02, /**< The fragment is corrupted (there is at least a bit or byte
+                                        error somewhere in the fragment (size cannot be 0)*/
+  M4SYS_kFragAttrLost      = 03  /**< The fragment is lost, so the size must be 0.*/
+} M4SYS_FragAttr;
+
+
+/** A Fragment is a piece of access unit. It can be decoded without decoding the others*/
+typedef struct {
+  M4OSA_MemAddr8  fragAddress;   /**< The data pointer. All fragments of the same access unit
+                                        must be contiguous in memory*/
+  M4OSA_UInt32    size;          /**< The size of the fragment. It must be 0 if fragment is
+                                        flagged 'lost'*/
+  M4SYS_FragAttr  isCorrupted;   /**< The attribute of this fragment*/
+} M4SYS_Frag;
+
+/**< The attribute of an access unit*/
+typedef M4OSA_UInt8 M4SYS_AU_Attr;
+
+#define AU_Corrupted   0x01 /**< At least one fragment of the access unit is flagged corrupted.*/
+#define AU_B_Frame     0x02 /**< The access unit is a B_frame*/
+#define AU_RAP         0x04 /**< The access unit is a random access point.*/
+
+
+/** An access unit is the smallest piece of data with timing information.*/
+typedef struct {
+  M4SYS_StreamDescription*    stream ;
+  M4OSA_MemAddr32             dataAddress; /**< The data pointer. The size of this block
+                                            (allocated size) must be a 32-bits integer multiple*/
+  M4OSA_UInt32                size;        /**< The size in bytes of the dataAddress. The size may
+                                                 not match a 32-bits word boundary.*/
+  M4OSA_Time                  CTS;         /**< The Composition Time Stamp*/
+  M4OSA_Time                  DTS;         /**< The Decoded Time Stamp*/
+  M4SYS_AU_Attr               attribute;   /**< The attribute of the access unit*/
+  M4OSA_UInt8                 nbFrag;      /**< The number of fragments. It can be 0 if there is
+                                                no fragment.*/
+  M4SYS_Frag**                frag;        /**< An array of 'nbFrag' fragments. It stores the
+                                                fragments structure. The original definition
+                                              < of frag has been changed from M4SYS_Frag* frag[]
+                                                to M4SYS_Frag** frag since the support
+                                              < of such syntax is only a Microsoft extension of
+                                                the C compiler. */
+} M4SYS_AccessUnit;
+
+/* Error codes */
+#define M4ERR_AU_NO_MORE_FRAG      M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000001)
+#define M4ERR_AU_BUFFER_OVERFLOW   M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000002)
+#define M4ERR_AU_BAD_INDEX         M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000003)
+#define M4ERR_NOT_ENOUGH_FRAG      M4OSA_ERR_CREATE(M4_ERR,M4SYS_CMAPI,0x000004)
+
+
+
+#endif /*M4SYS_ACCESSUNIT_H*/
+
diff --git a/libvideoeditor/vss/common/inc/M4SYS_Stream.h b/libvideoeditor/vss/common/inc/M4SYS_Stream.h
new file mode 100755
index 0000000..5ed9f82
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4SYS_Stream.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+
+ ************************************************************************
+ * @file         M4SYS_Stream.h
+ * @brief        Stream manipulation
+ * @note         This file defines the stream structure.
+ ************************************************************************
+*/
+
+#ifndef M4SYS_STREAM_H
+#define M4SYS_STREAM_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Time.h"
+
+typedef M4OSA_UInt32 M4SYS_StreamID;
+
+/** The streamType type provides a way to distinguish all streams (AAC, AMR, YUV420, MPEG-4 Video,
+     H263). Stream types can be sorted in 2 ways:
+@arg   Some of them are raw data, others are encoded
+@arg   Some of them are related to an audio media, a video media...
+@n So a specific naming convention has been designed to allow a quick parsing of the streamType
+    value to return the above categories. StreamType is an un-signed integer on 16 bits.
+@arg   The first byte (MSB) defines the codec type. It can be either Audio,Video, Picture,
+         Text or Scene.
+@arg   The second byte (LSB) defines the sub-codecs type (ie YUV420, PCM_16 bits, AMR...).
+        Moreover if this value is greater than 0x80 the stream is a raw stream, else the stream
+        is an encoded one
+@n   0x0000 is a forbidden value, it describes an unknown stream */
+
+typedef enum {
+   M4SYS_kUnknown       = 0x0000,
+   /* Stream type definition
+       0xYYZZ   : YY is the codec type (Audio, Video, Picture, Scene ...)
+                  ZZ is the sub-codec type (AAC, AMR , ...)
+                     if ZZ is greater than 0x80 it is a raw format*/
+
+   /* Audio ones   : Range from [0x0100-0x01FF]*/
+   M4SYS_kAudioUnknown  = 0x0100,
+   M4SYS_kAAC           = 0x0101,
+   M4SYS_kCELP          = 0x0102,
+   M4SYS_kAMR           = 0x0103,
+   M4SYS_kAMR_WB        = 0x0104,
+   M4SYS_kMP3           = 0x0105,
+   M4SYS_kMIDI          = 0x0106,
+   M4SYS_kWMA           = 0x0107,
+   M4SYS_kREALAUDIO     = 0x0108,
+   M4SYS_kEVRC            = 0x0109,
+   M4SYS_kPCM_16bitsS   = 0x0181, /* PCM 16 bits Signed */
+   M4SYS_kPCM_16bitsU   = 0x0182, /* PCM 16 bits Un-signed */
+   M4SYS_kPCM_8bitsU    = 0x0183, /* PCM  8 bits Un-signed */
+/* FixAA 2008/03/03 types: M4SYS_kPCM_16bitsS, M4SYS_kPCM_16bitsU and M4SYS_kPCM_8bitsU
+   are now only used by AudioMixer and ReaderAVI => An update is necessary in the future for use
+   type M4SYS_kPCM */
+   M4SYS_kXMF            = 0x0184,
+   M4SYS_kSMAF          = 0x0185,
+   M4SYS_kIMEL          = 0x0186,
+   M4SYS_kBBA            = 0x0187,
+   M4SYS_kBPC            = 0x0188,
+   M4SYS_kADPCM         = 0x0189,  /* ADPCM added */
+   M4SYS_kPCM           = 0x0190,  /* stream type added: PCM;  PR2569 fixAA */
+   M4SYS_kAudioAll        = 0x01FF,  /* all audio streams */
+
+   /* Video ones   : Range [0x0200-0x02FF]*/
+   M4SYS_kVideoUnknown  = 0x0200,
+   M4SYS_kMPEG_4        = 0x0201,
+   M4SYS_kH263          = 0x0202,
+   M4SYS_kH263pp        = 0x0203,
+   M4SYS_kH264          = 0x0204,
+   M4SYS_kREALVIDEO     = 0x0205,
+   M4SYS_kYUV420        = 0x0281,
+   M4SYS_kRGB32         = 0x0282,
+   M4SYS_kBGR32         = 0x0283,
+   M4SYS_kRGB24         = 0x0284,
+   M4SYS_kBGR24         = 0x0285,
+   M4SYS_kVideoAll        = 0x02FF,  /* all video streams */
+
+  /* Picture ones : Range [0x0300-0x03FF]*/
+   M4SYS_kPictureUnknown = 0x0300,
+   M4SYS_kJPEG           = 0x0301,
+   M4SYS_kGIF            = 0x0302,
+   M4SYS_kBMP            = 0x0383,
+   M4SYS_kStillAll         = 0x03FF,  /* all still picture streams */
+
+   /* Text ones    : Range [0x0400-0x04FF]*/
+   M4SYS_kTextUnknown  = 0x0400,
+   M4SYS_kTimedText    = 0x0401,
+   M4SYS_kUTF8         = 0x0481,
+   M4SYS_kUTF16        = 0x0482,
+   M4SYS_kUCS2         = 0x0483,
+   M4SYS_kTextAll       = 0x04FF,  /* all text streams */
+
+   /* Scene & Graphics ones   : Range [0x0500-0x05FF]*/
+   M4SYS_kSceneUnknown  = 0x0500,
+   M4SYS_kSMIL          = 0x0501,
+   M4SYS_kBIFS          = 0x0502,
+   M4SYS_kSceneAll        = 0x05FF,  /* all scene streams */
+
+   /* hinted ones   : Range [0x0600-0x06FF]*/
+   M4SYS_kHintedUnknown = 0x0600,
+   M4SYS_kRTP           = 0x0601,
+   M4SYS_kMPEG2_TS      = 0x0602,
+   M4SYS_kHintedAll        = 0x06FF,  /* all packetized streams */
+
+   /* MPEG-4 system ones : Range [0x0700-0x07FF]*/
+   M4SYS_kSysUnknown    = 0x0700,
+   M4SYS_kODS           = 0x0701,
+   M4SYS_kIPMP          = 0x0702,
+   M4SYS_kOCI           = 0x0703,
+   M4SYS_kSysAll        = 0x07FF /* all system streams*/
+} M4SYS_StreamType ;
+
+typedef struct {
+   M4SYS_StreamID     streamID ;
+   M4OSA_UInt32      value ;
+} M4SYS_StreamIDValue ;
+
+typedef struct {
+   M4SYS_StreamID    streamID ;
+   M4OSA_UInt32      size ;
+   M4OSA_MemAddr32   addr ;
+} M4SYS_StreamIDmemAddr ;
+
+/** This strucure defines a set of properties associated to a stream*/
+typedef struct {
+  M4SYS_StreamID   streamID;    /**< The ID of the stream. It must be unique for a media
+                                (ie in a MP4 file, two tracks can not have two times the same ID).
+                                 0 is forbidden.*/
+  M4SYS_StreamType streamType;    /**< The stream type of the stream*/
+  M4OSA_UInt8      profileLevel;  /**< The profile & level of a stream. It is related to the
+                                       stream type & the definition comes from the standard bodies
+                                       (i.e. MPEG-4 Video & MPEG-4 Audio). Some values are
+                                       pre-defined: 0xFE=userPrivate 0xFF=no Profile &
+                                       Level specified*/
+  M4OSA_UInt32     decoderSpecificInfoSize;  /**< The decoder configuration. These bytes are
+                                                   needed to initialise a decoder.*/
+  M4OSA_MemAddr32  decoderSpecificInfo; /**< The size (in bytes) of the decoder specific info.*/
+  M4OSA_UInt32     timeScale;     /**< The time scale of the stream. It means that all timing
+                                        duration of this stream are computed in this timescale
+                                        (ie timeScale = 8000, means there are 8000 ticks in
+                                        one second)*/
+  M4OSA_Time       duration;        /**< The stream duration of this stream. The time unit is the
+                                        time scale. The value can be set to M4SYS_UnknownTime if
+                                        the duration is not known.*/
+  M4OSA_Int32      averageBitrate;  /**< The average bitrate (in bit per second) of this stream.
+                                         The average bitrate is computed on the stream duration.
+                                         -1 value means either there is no average bitrate or no
+                                         average bitrate is provided.*/
+  M4OSA_Int32      maxBitrate;      /**< The maximum bitrate (in bit per second) of this stream.
+                                         The maximum bitrate is computed on a sliding window of 1
+                                         second. -1 value means either there is no max. bitrate or
+                                         no max. bitrate is provided.*/
+} M4SYS_StreamDescription;
+
+typedef enum {
+   M4SYS_kPreviousRAP      = 0x01 ,
+   M4SYS_kNextRAP          = 0x02 ,
+   M4SYS_kClosestRAP       = 0x03 ,
+   M4SYS_kNoRAPprevious    = 0x11 ,
+   M4SYS_kNoRAPnext        = 0x12 ,
+   M4SYS_kNoRAPclosest     = 0x13 ,
+   M4SYS_kBeginning        = 0x20
+} M4SYS_SeekAccessMode ;
+
+#endif /*M4SYS_STREAM_H*/
+
+
+
diff --git a/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h b/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h
new file mode 100755
index 0000000..2661016
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4TOOL_VersionInfo.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4TOOL_VersionInfo.h
+ * @brief  defines a common version information structure
+ * @note
+ *
+ ************************************************************************
+*/
+#ifndef __M4TOOL_VERSIONINFO_H__
+#define __M4TOOL_VERSIONINFO_H__
+
+#include "M4OSA_Types.h"
+
+/**
+ * structure    M4_VersionInfo
+ * @brief        This structure describes version of core component
+ * @note        This structure is typically used to retrieve version information
+ *                of a component via getOption function
+ */
+typedef struct _M4_VersionInfo
+{
+    M4OSA_UInt32 m_major;        /*major version of the component*/
+    M4OSA_UInt32 m_minor;        /*minor version of the component*/
+    M4OSA_UInt32 m_revision;    /*revision version of the component*/
+
+    /* Structure size */
+    M4OSA_UInt32 m_structSize;
+
+} M4_VersionInfo;
+
+
+#endif /*__M4TOOL_VERSIONINFO_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h b/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h
new file mode 100755
index 0000000..170620c
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Interface.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VD_EXTERNAL_INTERFACE_H__
+#define __M4VD_EXTERNAL_INTERFACE_H__
+
+#include "M4DECODER_Common.h"
+
+#include "M4VD_HW_API.h"/* M4VD_Interface */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct
+{
+    M4VD_Interface*    externalFuncs;
+    M4OSA_Void*        externalUserData;
+}* M4DECODER_EXTERNAL_UserDataType;
+
+/* ----- Interface retrieval ----- */
+
+M4OSA_ERR M4DECODER_EXTERNAL_getInterface(M4DECODER_VideoInterface** pDecoderInterface);
+
+/* ----- DSI bitstream parser ----- */
+
+/* This function is available to clients of the shell to allow them to analyse clips
+(useful for video editing) without having to instanciate a decoder, which can be useful precisely
+if HW decoders are a possibility. */
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseVideoDSI(M4OSA_UInt8* pVol, M4OSA_Int32 aVolSize,
+                                             M4DECODER_MPEG4_DecoderConfigInfo* pDci,
+                                             M4DECODER_VideoSize* pVideoSize);
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseAVCDSI(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
+                                            M4DECODER_AVCProfileLevel *profile);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VD_EXTERNAL_INTERFACE_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Internal.h b/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Internal.h
new file mode 100755
index 0000000..f2dacb2
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VD_EXTERNAL_Internal.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VD_EXTERNAL_INTERNAL_H__
+#define __M4VD_EXTERNAL_INTERNAL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+#include "M4OSA_Semaphore.h"
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+/*typedef enum
+{
+    M4VS_THREAD_IS_IDLE = 0,
+    M4VS_THREAD_IS_RUNNING = 1,
+    M4VS_THREAD_IS_STOPPING = 2
+
+} M4VS_ThreadState_t;*/
+
+
+/* ----- internal VS context ----- */
+
+typedef struct
+{
+    /* READER */
+    /**< Reference to the reader data interface used to read access units */
+    M4READER_DataInterface*           m_pReader;
+    /**< Reference to the access unit used read and decode one frame (the AU could be passed by
+    the user instead of reading it from inside the decoder) */
+    M4_AccessUnit*                    m_pNextAccessUnitToDecode;
+    /**< Flag to know if we decode just after a (read) jump */
+    M4OSA_Bool                        m_bJump;
+    M4_MediaTime                      m_nextAUCts;                /**< CTS of the AU above */
+
+    /* DECODER */
+
+    M4_MediaTime             m_DecodeUpToCts;        /**< Target Cts for the decode up to loop */
+    M4_MediaTime             m_CurrentDecodeCts;     /**< Cts of the latest frame decoded */
+    M4_MediaTime             m_PreviousDecodeCts;    /**< Cts of the previous frame decoded */
+    M4OSA_UInt32             m_NbDecodedFrames;      /**< Number of frames decoded in the decode
+                                                          up to loop (can be 0) */
+    M4OSA_ERR                m_uiDecodeError;        /**< Error or warning code (from the VD
+                                                          reader or decoder) returned to the
+                                                          shell */
+    M4OSA_Bool               m_bDataDecodePending;   /**< There is some data to decode */
+    M4OSA_Bool               m_bIsWaitNextDecode;    /**< Do we need to wait for the anticipated
+                                                          decoding to finish ? */
+
+    /* RENDER */
+
+    M4_MediaTime                 m_TargetRenderCts;        /**< Cts for the rendering step */
+    M4_MediaTime                 m_CurrentRenderCts;       /**< Cts of the latest frame decoded */
+    M4OSA_ERR                    m_uiRenderError;          /**< Error or warning code (from the
+                                                                VD render) returned to the shell */
+    M4OSA_Bool                   m_bForceRender;           /**< Force rendering even if 0 frames
+                                                                are decoded (i.e. already
+                                                                previously decoded) */
+    M4OSA_Bool                   m_bDataRenderPending;     /**< There is some data to render */
+
+    /* STREAM PARAMS */
+
+    M4_VideoStreamHandler*            m_pVideoStreamhandler;    /**< reference to the video
+                                                                     stream description passed by
+                                                                     the user */
+    M4VD_StreamInfo*                  m_pStreamInfo;
+    M4DECODER_VideoSize                  m_VideoSize;
+    M4DECODER_MPEG4_DecoderConfigInfo m_Dci;                  /**< Information collected from
+                                                                   DSI parsing */
+    M4VIFI_ImagePlane*                m_pOutputPlane;         /**< Pointer to YUV output planes */
+
+    /* VD API */
+
+    M4VD_Interface*                   m_VD_Interface;           /**< pointers to HW functions */
+    M4VD_SignalingInterface           m_VD_SignalingInterface;  /**< pointers to Shell signaling
+                                                                     functions */
+    M4VD_Context                      m_VD_Context;             /**< pointer to the real hardware
+                                                                     context */
+
+    /* THREAD STUFF  */
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+    M4OSA_Context                      m_SemSync;
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+} M4VS_VideoDecoder_Context;
+
+
+/* ----- bitstream parser ----- */
+/*
+typedef struct
+{
+    M4OSA_UInt32 stream_byte;
+    M4OSA_UInt32 stream_index;
+    M4OSA_MemAddr8 in;
+
+} M4VS_Bitstream_ctxt;
+*/
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VD_EXTERNAL_INTERNAL_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VD_HW_API.h b/libvideoeditor/vss/common/inc/M4VD_HW_API.h
new file mode 100755
index 0000000..ba33d14
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VD_HW_API.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VD_HW_API_H__
+#define __M4VD_HW_API_H__
+
+#include "M4OSA_Types.h"
+#include "M4OSA_OptionID.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h" /* M4OSA_MemAddrN */
+
+#include "M4VIFI_FiltersAPI.h"
+
+/**
+ ************************************************************************
+ * @file   M4VD_HW_API.H
+ * @brief
+ * @note
+ ************************************************************************
+*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* ----- Hardware decoder errors and warnings ----- */
+
+#define M4ERR_VD_FATAL        M4OSA_ERR_CREATE(M4_ERR, M4VD_EXTERNAL, 0x0001)
+
+
+/* ----- enum definitions ----- */
+
+typedef enum
+{
+    M4VD_kOptionId_Dummy = 0
+
+} M4VD_OptionID;
+
+typedef enum
+{
+    M4VD_kMpeg4VideoDec,
+    M4VD_kH263VideoDec,
+    M4VD_kH264VideoDec,
+    M4VD_kVideoType_NB /* must remain last */
+} M4VD_VideoType;
+
+typedef enum
+{
+    M4VD_kNone,
+    M4VD_kYUV420,
+    M4VD_kYUV422,
+    M4VD_kYUYV422,
+    M4VD_kRGB565,
+    M4VD_kBGR565
+
+} M4VD_OutputFormat;
+
+
+/* ----- structure definitions ----- */
+
+typedef struct
+{
+    M4OSA_MemAddr32 pBuffer;              /**< pointer to video buffer - 32 bits aligned    */
+    M4OSA_UInt32  bufferSize;             /**< the size in bytes of the buffer            */
+
+} M4VD_VideoBuffer;
+
+typedef struct
+{
+    M4OSA_UInt32 aWidth;                        /**< Width of the Image        */
+    M4OSA_UInt32 aHeight;                        /**< Height of the Image    */
+
+} M4VD_ImageSize;
+
+typedef struct
+{
+    M4OSA_MemAddr8 pBuffer;                        /**< Pointer to the decoder configuration */
+    M4OSA_UInt32 aSize;                            /**< Size of the buffer */
+
+} M4VD_DecoderConfig;
+
+typedef struct
+{
+    M4VD_ImageSize        anImageSize;            /**<Size of the image*/
+    M4VD_DecoderConfig    decoderConfiguration;    /**<configuration of the decoder*/
+
+} M4VD_StreamInfo;
+
+
+/* ----- callbacks prototypes ----- */
+
+typedef M4OSA_ERR (M4VD_CB_signalDecoderOver_fct)( M4OSA_Void* signalTarget,
+                                                    M4OSA_Double frameTime, M4OSA_ERR err);
+typedef M4OSA_ERR (M4VD_CB_signalRenderOver_fct) ( M4OSA_Void* signalTarget,
+                                                    M4OSA_Double frameTime, M4OSA_ERR err);
+
+typedef struct
+{
+    M4OSA_Void*                        m_pSignalTarget;
+
+    /* decoder callbacks that need to be raised by HW decoder functions */
+    M4VD_CB_signalDecoderOver_fct*    m_pFctSignalDecoderOver;
+    M4VD_CB_signalRenderOver_fct*     m_pFctSignalRenderOver;
+
+} M4VD_SignalingInterface;
+
+
+/* ----- Hardware decoder functions set ----- */
+
+typedef void* M4VD_Context; /* Video Decoder context (for M4VD_HW_xxxx functions) */
+
+
+/* common */
+typedef M4OSA_ERR (M4VD_init_fct)          ( M4VD_Context*, M4VD_SignalingInterface* );
+typedef M4OSA_ERR (M4VD_setOption_fct)     ( M4VD_Context, M4VD_OptionID, M4OSA_DataOption );
+typedef M4OSA_ERR (M4VD_getOption_fct)     ( M4VD_Context, M4VD_OptionID, M4OSA_DataOption* );
+typedef M4OSA_ERR (M4VD_openDecoder_fct) ( M4VD_Context, M4VD_VideoType, M4VD_StreamInfo*,
+                                            M4VD_OutputFormat*, M4OSA_Void* );
+typedef M4OSA_ERR (M4VD_stepDecode_fct)    ( M4VD_Context, M4VD_VideoBuffer*, M4OSA_Double );
+typedef M4OSA_ERR (M4VD_stepRender_fct)    ( M4VD_Context, M4VIFI_ImagePlane*, M4OSA_Double );
+typedef M4OSA_ERR (M4VD_closeDecoder_fct)( M4VD_Context );
+typedef M4OSA_ERR (M4VD_cleanUp_fct)       ( M4VD_Context );
+typedef M4OSA_ERR (M4VD_setOutputFilter_fct)( M4VD_Context, M4VIFI_PlanConverterFunctionType*,
+                                                M4OSA_Void*);
+
+typedef struct
+{
+    M4VD_init_fct*                m_pFctInitVideoDecoder;
+    M4VD_setOption_fct*            m_pFctSetOption;
+    M4VD_getOption_fct*            m_pFctGetOption;
+    M4VD_openDecoder_fct*        m_pFctOpenDecoder;
+    M4VD_stepDecode_fct*        m_pFctStepDecode;
+    M4VD_stepRender_fct*        m_pFctStepRender;
+    M4VD_closeDecoder_fct*        m_pFctClose;
+    M4VD_cleanUp_fct*            m_pFctCleanUp;
+    M4VD_setOutputFilter_fct*    m_pFctSetOutputFilter;
+} M4VD_Interface;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VD_HW_API_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VD_Tools.h b/libvideoeditor/vss/common/inc/M4VD_Tools.h
new file mode 100644
index 0000000..83e036a
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VD_Tools.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VD_TOOLS_H__
+#define __M4VD_TOOLS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "NXPSW_CompilerSwitches.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Types.h"
+/* ----- bitstream parser ----- */
+
+typedef struct
+{
+    M4OSA_UInt32 stream_byte;
+    M4OSA_UInt32 stream_index;
+    M4OSA_MemAddr8 in;
+
+} M4VS_Bitstream_ctxt;
+
+M4OSA_UInt32 M4VD_Tools_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+                                            M4OSA_UInt32 nb_bits);
+M4OSA_ERR M4VD_Tools_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+                                         M4OSA_MemAddr32 dest_bits,
+                                         M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VD_TOOLS_H__ */
diff --git a/libvideoeditor/vss/common/inc/M4VE_API.h b/libvideoeditor/vss/common/inc/M4VE_API.h
new file mode 100755
index 0000000..5c27003
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VE_API.h
@@ -0,0 +1,824 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file   M4VE_API.h
+ * @note   This file declares the generic shell interface retrieving function
+ *         of any external encoder.
+******************************************************************************
+*/
+
+#ifndef __M4VE_API_H__
+#define __M4VE_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ *    OSAL types definition */
+#include "M4OSA_Types.h"
+#include "M4OSA_Time.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_OptionID.h"
+
+/**
+ *    Include Video filters interface definition (for the M4VIFI_ImagePlane type) */
+#include "M4VIFI_FiltersAPI.h"
+
+
+/**
+ ************************************************************************
+ * VE Errors & Warnings definition
+ ************************************************************************
+*/
+#define    M4ERR_VE_FATAL        ((M4OSA_ERR)M4OSA_ERR_CREATE(M4_ERR, M4VE_EXTERNAL, 0x000000))
+
+
+/**
+ *********************************************************************************************
+ * enum        M4VE_EncoderMode
+ * @brief    This enum defines in which mode the external encoder will be used
+ *            ("Standalone encoder" or "Encoder + Grabber").
+ *********************************************************************************************
+ */
+typedef enum
+{
+    M4VE_kSEMode,        /**< "Standalone Encoder" mode */
+    M4VE_kEGMode        /**< "Encoder + Grabber" mode */
+} M4VE_EncoderMode;
+
+
+/**
+ *********************************************************************************************
+ * enum        M4VE_EncoderType
+ * @brief    This enum defines the supported encoder types.
+ *********************************************************************************************
+ */
+typedef enum
+{
+    M4VE_kMpeg4VideoEnc,     /**< MPEG-4 */
+    M4VE_kH263VideoEnc,      /**< H263 */
+    M4VE_kH264VideoEnc,      /**< H264 */
+    M4VE_kMJPEGEnc,            /**< MJPEG */
+    M4VE_kEncoderType_NB
+} M4VE_EncoderType;
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_ImageSize
+ * @brief    This structure defines video frame size (for both grabbing and encoding).
+ *********************************************************************************************
+ */
+typedef struct
+{
+    M4OSA_UInt32     width;     /**< Width of the Image */
+    M4OSA_UInt32     height;    /**< Height of the Image */
+} M4VE_ImageSize;
+
+
+/**
+ *********************************************************************************************
+ * enum        M4VE_FormatConfig
+ * @brief    This enum defines the frame format we have in input for the grabbing
+ *            part of the external encoder.
+ *********************************************************************************************
+*/
+typedef enum
+{
+    M4VE_kYUV420=0,    /**< YUV 4:2:0 planar (standard input for mpeg-4 video) */
+    M4VE_kYUV422,    /**< YUV422 planar */
+    M4VE_kYUYV,        /**< YUV422 interlaced, luma first */
+    M4VE_kUYVY,        /**< YUV422 interlaced, chroma first */
+    M4VE_kJPEG,        /**< JPEG compressed frames */
+    M4VE_kRGB444,    /**< RGB 12 bits 4:4:4 */
+    M4VE_kRGB555,    /**< RGB 15 bits 5:5:5 */
+    M4VE_kRGB565,    /**< RGB 16 bits 5:6:5 */
+    M4VE_kRGB24,    /**< RGB 24 bits 8:8:8 */
+    M4VE_kRGB32,    /**< RGB 32 bits  */
+    M4VE_kBGR444,    /**< BGR 12 bits 4:4:4 */
+    M4VE_kBGR555,    /**< BGR 15 bits 5:5:5 */
+    M4VE_kBGR565,    /**< BGR 16 bits 5:6:5 */
+    M4VE_kBGR24,    /**< BGR 24 bits 8:8:8 */
+    M4VE_kBGR32        /**< BGR 32 bits  */
+} M4VE_FormatConfig;
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_Framerate
+ * @brief    This structure defines the maximum framerate the encoder will have
+ *            at input and will generate at output (in frames per second).
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32     framerateNum;    /**< Framerate numerator */
+    M4OSA_UInt32     framerateDen;    /**< Framrate denominator */
+} M4VE_Framerate;
+/**<     For example, a framerate of 29.97 fps for H263 encoding will be expressed as:
+    FramerateNum = 30000
+    FramerateDen = 1001 */
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_GrabbingParameters
+ * @brief    This structure defines the grabbing parameters set at open step.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_ImageSize        size;        /**< Size of grabbed frames */
+    M4VE_FormatConfig    format;        /**< Format of the grabbed frames (YUV420, RGB565,etc.) */
+} M4VE_GrabbingParameters;
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_EncodingParameters
+ * @brief    This structure defines the encoding parameters set at open step.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_EncoderType  type;             /**< coding type (H263/H264/MPEG-4)*/
+    M4VE_ImageSize    size;             /**< Size of frames to encode */
+    M4OSA_Bool          bRateControlEnable; /**< Flag to enable/disable rate control */
+    M4OSA_Bool          bLowDelay;        /**< force encoder in "low delay" mode */
+    M4OSA_UInt32      bitrate;             /**< Average targeted bitrate in bit per sec */
+    M4VE_Framerate    framerate;        /**< Maximum input framerate */
+    M4OSA_UInt32      timescale;       /**< timescale of the video bitstream */
+    M4OSA_Context     pUserSettings;   /**< Additionnal user settings passed by the
+                                            application to the service at Codec registration */
+} M4VE_EncodingParameters;
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_VideoBuffer
+ * @brief    This structure defines the output buffer where the encoded data
+ *            are stored by the encoder.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4OSA_MemAddr32 pBuffer;    /**< pointer to video buffer 32 bits aligned */
+    M4OSA_UInt32    bufferSize; /**< the size in bytes of the buffer */
+} M4VE_VideoBuffer;
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_ParameterSet
+ * @brief    Parameter set structure used for H264 headers.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt16    length;                /**< Number of items*/
+    M4OSA_UInt8*    pParameterSetUnit;  /**< Array of items*/
+} M4VE_ParameterSet;
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_H264HeaderBuffer
+ * @brief    This structure defines the buffer where the stream header is stored
+ *            by the encoder, in case of H264
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt8            NALUnitLength;             /**< length in bytes of a NAL access Unit */
+    M4OSA_UInt8            nOfSequenceParametersSets; /**< Number of sequence parameter sets*/
+    M4OSA_UInt8            nOfPictureParametersSets;  /**< Number of picture parameter sets*/
+    M4VE_ParameterSet    *pSequenceParameterSets;    /**< Sequence parameter set array */
+    M4VE_ParameterSet    *pPictureParameterSets;        /**< Picture parameter set array */
+} M4VE_H264HeaderBuffer;
+
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_HeaderBuffer
+ * @brief    This structure defines the buffer where the stream header is stored
+ *            by the encoder.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    union
+    {
+        M4VE_VideoBuffer         header;     /**< MPEG-4, H263, MJPEG */
+        M4VE_H264HeaderBuffer     H264Header; /**< H264 */
+    }M4VE_SpecificHeader;
+} M4VE_HeaderBuffer;
+
+
+/**
+ *********************************************************************************************
+ * enum        M4VE_OptionID
+ * @brief    This defines the supported options handled by the video encoder interface.
+ *********************************************************************************************
+*/
+typedef enum
+{
+    dummy=0
+} M4VE_OptionID;
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalOpenEncoderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+ * @brief    This function signals to the service that the external encoder is opened.
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalOpenEncoderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalHeaderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode,
+ *                                       M4VE_HeaderBuffer *pBuffer);
+ * @brief    This function signals to the service that the stream header is ready.
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @param    pBuffer :                (IN) Stream header.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_PARAMETER            pBuffer field is null or invalid.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalHeaderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode,
+                     M4VE_HeaderBuffer *pBuffer);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalStartGrabberDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+ * @brief    This function signals to the service that the grabbing part is started.
+ *            This callback is unused in the "standalone encoder" mode.
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalStartGrabberDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalStartEncoderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+ * @brief    This function signals to the service that the external encoder is started.
+ *            This callback is unused in the "standalone encoder" mode.
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalStartEncoderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalEncodeDone)(M4OSA_Context pUserData, M4OSA_ERR    errCode,
+                M4OSA_UInt32 cts, M4VE_VideoBuffer* pBuffer);
+ * @brief    This function signals to the service that the encoding of a frame is done.
+ *            The integrator must call this function when the encoding of the video
+ *            frame is completed (for example in an interrupt callback).
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @param    cts :                    (IN) Time of the encoded frame (from stepEncode).
+ * @param    pBuffer :                (IN) Encoded data Buffer.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_PARAMETER            At least one parameter is null or invalid.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalEncodeDone)(M4OSA_Context pUserData, M4OSA_ERR    errCode,
+                         M4OSA_Time cts, M4VE_VideoBuffer* pBuffer);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalStopGrabberDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+ * @brief    This function signals to the service that the grabbing part is stopped.
+ *            This callback is unused in the "standalone encoder" mode.
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalStopGrabberDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalStopEncoderDone)(M4OSA_Context    pUserData, M4OSA_ERR errCode);
+ * @brief    This function signals to the service that the external encoder is stopped.
+ *            This callback is unused in the "standalone encoder" mode.
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalStopEncoderDone)(M4OSA_Context    pUserData, M4OSA_ERR errCode);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_Int32 (*M4VE_SignalCloseEncoderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+ * @brief    This function signals to the service that the external encoder is closed.
+ * @note    The external encoder returns one of the following codes in the errCode parameter:
+ *            M4NO_ERROR    There is no error
+ *            M4ERR_VE_FATAL    a fatal error occurred
+ * @param    pUserData:                (IN) User data provided by the service at init step.
+ * @param    errCode :                (IN) Error code returned to the service internal layers.
+ * @return    M4NO_ERROR:                there is no error.
+ * @return    M4ERR_VE_FATAL:        a fatal error occurred.
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SignalCloseEncoderDone)(M4OSA_Context pUserData, M4OSA_ERR errCode);
+
+
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_GenericCallback
+ * @brief    This structure is used to pass the generic callbacks, i.e. the ones that are used
+ *            in both "Standalone Encoder" and "Encoder + Grabber" modes.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_SignalOpenEncoderDone        pOpenEncoderDone; /**< Callback to use at open completion */
+    M4VE_SignalHeaderDone             pHeaderDone;         /**< Callback to use when the stream
+                                                                 header is ready */
+    M4VE_SignalEncodeDone             pEncodeDone;         /**< Callback to use for any frame
+                                                                    encoding completion */
+    M4VE_SignalCloseEncoderDone       pCloseEncoderDone;/**< Callback to use at close completion */
+} M4VE_GenericCallback;    /**< Callbacks used in all encoder modes */
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_EGModeCallback
+ * @brief    This structure is used to pass the callbacks used in the "Encoder + Grabber" mode
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_SignalStartGrabberDone     pStartGrabberDone;/**< Callback to use at start
+                                                            completion of the grabber part*/
+    M4VE_SignalStartEncoderDone     pStartEncoderDone;/**< Callback to use at start
+                                                            completion of the encoder part*/
+    M4VE_SignalStopGrabberDone      pStopGrabberDone; /**< Callback to use at stop
+                                                            completion of the grabber part*/
+    M4VE_SignalStopEncoderDone      pStopEncoderDone; /**< Callback to use at stop
+                                                            completion of the encoder part*/
+} M4VE_EGModeCallback; /**< Callbacks used in "Encoder + Grabber" mode */
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_SEModeCallback
+ * @brief    This structure is used to pass the callbacks used in the "Standalone Encoder" mode
+ * @note    There's no specific callback for the standalone encoder mode,
+ *               but we have to declare one
+ * @note        for some compilers
+ *********************************************************************************************
+*/
+typedef M4OSA_Int32 (*M4VE_SEDummyCB)  (M4OSA_Context pUserData, M4OSA_ERR errCode);
+
+typedef struct
+{
+    M4VE_SEDummyCB                  pDummySECB; /**< No specific callback for
+                                                        Standalone encoder mode */
+} M4VE_SEModeCallback; /**< Callbacks used in "Standalone Encoder" mode */
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_CallbackInterface
+ * @brief    This structure is the container for the whole set of callback used by external encoder
+  *********************************************************************************************
+*/
+
+typedef struct
+{
+    M4VE_GenericCallback    genericCallback;/**< Callbacks used in all modes */
+    union
+    {
+        M4VE_EGModeCallback    EGModeCallback; /**< Callbacks used in "Encoder + Grabber" mode */
+        M4VE_SEModeCallback    SEModeCallback; /**< Callbacks used in "Standalone Encoder" mode */
+    } M4VE_SpecificModeCallBack;
+    M4OSA_Context            pUserData;      /**< Internal user data to be retrieved in each
+                                                    callbach above */
+} M4VE_CallbackInterface;
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_initEncoder_fct)(M4OSA_Context* pContext,
+ *                                       M4VE_CallbackInterface* pCallbackInterface);
+ * @brief    This function initializes the external video encoder API.
+ * @note    This function typically allocates the user context that will be provided
+ *            to the other functions as their first argument. The second argument is
+ *            the callback interface given by the service. Encoder implementation is supposed
+ *            to use these callbacks in response to each asynchronous API function.
+ *            All these callbacks must be called with the pUserData field specified
+ *            by the service inside the M4VE_CallbackInterface structure.
+ * @param    pContext:            (OUT) Execution context of the encoder.
+ * @param    pCallbackInterface:    (IN) Callback interface.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    At least one parameter is not correct (NULL or invalid).
+ * @return    M4ERR_ALLOC:        there is no more available memory.
+ *********************************************************************************************
+*/
+typedef    M4OSA_ERR (*M4VE_initEncoder_fct)(M4OSA_Context* pContext,
+                     M4VE_CallbackInterface*    pCallbackInterface);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_setOption_fct)(M4OSA_Context, M4VE_OptionID, M4OSA_DataOption);
+ * @brief    This function is used to set an option in the video encoder interface.
+ * @note    none
+ * @param    pContext:        (IN) Execution context of the encoder.
+ * @param    optionId:        (IN) Id of the option to set.
+ * @param    pValue:            (IN) Pointer of the option data to set.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    At least one parameter is not correct (NULL or invalid).
+ * @return    M4ERR_BAD_OPTION_ID:The requested option Id is invalid.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_setOption_fct)(M4OSA_Context pContext,    M4VE_OptionID optionId,
+                                        M4OSA_DataOption pValue);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_getOption_fct)(M4OSA_Context, M4VE_OptionID, M4OSA_DataOption*);
+ * @brief    This function is used to retrieve an option in the video interface.
+ * @note    none
+ * @param    pContext:        (IN) Execution context of the encoder.
+ * @param    optionId:        (IN) Id of the option to set.
+ * @param    pValue:            (OUT) Pointer to the location where the requested option will
+ *                                      be stored.
+ * @return    M4NO_ERROR:        there is no error.
+ * @return    M4ERR_PARAMETER:    At least one parameter is not correct (NULL or invalid).
+ * @return    M4ERR_BAD_OPTION_ID:The requested option Id is invalid.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_getOption_fct)(M4OSA_Context pContext, M4VE_OptionID optionId,
+                             M4OSA_DataOption* pValue);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_openEncoder_fct)(M4OSA_Context pContext,
+ *                                     M4VE_GrabbingParameters *pGrabbingParams,
+ *                                     M4VE_EncodingParameters *pEncodingParams);
+ * @brief    This function opens an instance of the video encoder.
+ *            Both encoding and grabbing parameters are specified here.
+ * @note    This function is asynchronous, thus the external encoder must call the corresponding
+ *            M4VE_SignalOpenEncoderDone callback function when the opening step is internally
+ *            completed.
+ *            Please note that both grabber and encoder components are opened at this step in
+ *            the "encoder + grabber" mode. In response to this open, the encoder must also return
+ *            the stream header (including VOS, VO & VOL) using the M4VE_SignalHeaderDone callback
+ *            function. Usually the service waits for this callback between the
+ *            M4VE_SignalOpenEncoderDone
+ *            callback and the M4VE_SignalCloseEncoderDone callback in order to handle it.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @param    pGrabbingParams:    (IN) Grabbing parameters (can be optional, in this case is
+ *                                    must be NULL).
+ * @param    pEncodingParams:    (IN) Encoding parameters.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    At least one parameter is not correct (NULL or invalid).
+ * @return    M4ERR_ALLOC:        there is no more available memory.
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    The encoder could not be opened
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_openEncoder_fct)(M4OSA_Context pContext,
+                         M4VE_GrabbingParameters *pGrabbingParams,
+                          M4VE_EncodingParameters *pEncodingParams);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_forceIFrame_fct)(M4OSA_Context pContext);
+ * @brief    This function is used by the service to signal the external encoder that an Intra
+ *           refresh frame must be encoded. This function is used in both "Standalone Encoder" and
+ *            "Encoder + grabber" modes and can be called at any time during the encoding session.
+ * @note    For the "Encoder + Grabber" mode, this function can be called between the reception
+ *            of the M4VE_SignalStartEncoderDone callback and the call to M4VE_stopEncoder_fct.
+ *            For the "Standalone Encoder" mode, this function can be called between the reception
+ *            of the M4VE_SignalOpenEncoderDone callback and the call to M4VE_closeEncoder_fct.
+ *            The expected behavior is that the external encoder encodes an intra refresh frame
+ *            for one of the frames coming next to the call of M4VE_forceIFrame_fct.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext field is not valid
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    The encoder could not handle this call
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_forceIFrame_fct)(M4OSA_Context pContext);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_releaseOutputBuffer_fct)(M4OSA_Context pContext, M4VE_VideoBuffer *pBuffer);
+ * @brief    This function is called by the service to signal that a particular output buffer,
+ *           provided in the M4VE_SignalEncodeDone callback by the external encoder, is no more
+ *           needed by the service and can be considered as free for any remaining data processing.
+ * @note    none.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @param    pBuffer:            (IN) Encoded data Buffer.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    At least one parameter is not correct (NULL or invalid).
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    The encoder could not acknowledge the buffer release for any
+ *                                other reason.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_releaseOutputBuffer_fct)(M4OSA_Context pContext,
+                                                    M4VE_VideoBuffer *pBuffer);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_closeEncoder_fct)(M4OSA_Context pContext);
+ * @brief    This function closes the encoding session.
+ * @note    This function is asynchronous, thus the external encoder must call the corresponding
+ *            M4VE_SignalCloseEncoderDone callback function when the closing step is internally
+ *            completed.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext pointer is null or invalid.
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    The encoder could not be closed for any other reason.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_closeEncoder_fct)(M4OSA_Context pContext);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_cleanUpEncoder_fct)(M4OSA_Context pContext);
+ * @brief    The function cleans up the encoder context.
+ * @note    none
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext pointer is null or invalid.
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    The encoder could not be closed for any other reason.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_cleanUpEncoder_fct)(M4OSA_Context pContext);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_stepEncode_fct)(M4OSA_Context pContext,M4VIFI_ImagePlane *pInputPlane,
+ *                                  M4OSA_Time cts);
+ * @brief    The function gives a video frame to the external encoder in the "Standalone encoder"
+ *            mode. The input buffer consists of a raw YUV420 planar frame,
+ *            allocated by the service.
+ *            The time (cts) is the composition time stamp of the frame to encode and is unique
+ *            for each frame. This time is expressed in milliseconds.
+ * @note    This function is asynchronous and its completion is signaled by the
+ *            M4VE_SignalEncodeDone callback. It applies that the input buffer is maintained valid
+ *            by the service till the call of this callback. The encoded data are retrieved in
+ *            this callback function in a dedicated structure, allocated by the external encoder.
+ *            The input buffer (YUV raw frame) is considered by the service as free for any
+ *             remaining data processing after receiving the M4VE_SignalEncodeDone callback.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @param    pInputPlane:        (IN) Input buffer where video frame is stored.
+ * @param    cts:                (IN) Composition time stamp in milliseconds.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext field is not valid
+ * @return    M4ERR_ALLOC:        there is no more available memory.
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    The encoder could not encode the frame for any other reason.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_stepEncode_fct)(M4OSA_Context pContext,M4VIFI_ImagePlane *pInputPlane,
+                                            M4OSA_Time cts);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_startGrabber_fct)(M4OSA_Context pContext);
+ * @brief    This function starts the grabber sub-component of the external encoder, in the
+ *            "encoder + grabber" mode. This function is asynchronous, thus the external
+ *            encoder must call the corresponding M4VE_SignalStartGrabberDone callback function
+ *            when this start is internally effective.
+ * @note    During this step, the service waits for the grabber to launch any video preview if
+ *            needed.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext field is not valid
+ * @return    M4ERR_ALLOC:        there is no more available memory.
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    the encoder could not be started for any other reason.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_startGrabber_fct)(M4OSA_Context pContext);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_startEncoder_fct)(M4OSA_Context pContext);
+ * @brief    This function starts the video encoder in the "encoder + grabber" mode.
+ * @note    This function is asynchronous, thus the external encoder must call the corresponding
+ *            M4VE_SignalStartEncoderDone callback function when this start is internally
+ *            effective.
+ *            After the completion of this asynchronous function, the service waits for the
+ *            external encoder to periodically call the M4VE_SignalEncodeDone callback each time
+ *            a new frame has been encoded. The external encoder must expect to have several
+ *            M4VE_startEncoder_fct calls before being closed. See the description of
+ *            M4VE_stopEncoder_fct function for the expected behaviour.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext field is not valid
+ * @return    M4ERR_ALLOC:        there is no more available memory.
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    the encoder could not be started for any other reason.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_startEncoder_fct)(M4OSA_Context pContext);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR M4OSA_ERR (*M4VE_stopGrabber_fct)(M4OSA_Context pContext);
+ * @brief    This function stops the video grabber in the "encoder + grabber" mode.
+ * @note    This function is asynchronous, thus the external encoder must call the corresponding
+ *          M4VE_SignalStopGrabberDone callback function when this stop is internally effective.
+ *          During this step, the service waits for the grabber to stop the video preview
+ *          if needed.
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext field is not valid
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_FATAL:    the encoder could not be stopped for any other reason.
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_stopGrabber_fct)(M4OSA_Context pContext);
+
+
+/**
+ *********************************************************************************************
+ * M4OSA_ERR (*M4VE_stopEncoder_fct)(M4OSA_Context pContext);
+ * @brief    This function stops the video encoder in the "encoder + grabber" mode.
+ * @note    This function is asynchronous, thus the external encoder must call the corresponding
+ *            M4VE_SignalStopEncoderDone callback function when this stop is internally effective.
+ *            After the reception of this callback, the service considers that no new frame will be
+ *            retrieved via the M4VE_SignalEncodeDone callback.
+ *            The external encoder must expect to have a possible call to M4VE_startEncoder_fct
+ *            after M4VE_stopEncoder_fct. In this case, the external encoder must consider that it
+ *             has been paused/resumed. The expected behaviour is the following one:
+ *            - The result from this two encoding sessions is a Standalone stream, no header is
+ *            generated for this new session. The external encoder is free to encode a refresh
+ *            frame (like I VOP) for this new session.
+ *            - The time stamps of this new session must directly follow the time stamps of the
+ *            previous one (ie: no time hole coming from the delay between the stop of the first
+ *            session and the start of the new one).
+ * @param    pContext:            (IN) Execution context of the encoder.
+ * @return    M4NO_ERROR:            there is no error.
+ * @return    M4ERR_PARAMETER:    pContext field is not valid
+ * @return    M4ERR_STATE:        This call is not allowed in the current encoder state.
+ * @return    M4ERR_VE_ERR_FATAL:    the encoder could not be stopped for any other reason
+ *********************************************************************************************
+*/
+typedef M4OSA_ERR (*M4VE_stopEncoder_fct)(M4OSA_Context pContext);
+
+
+
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_GenericInterface
+ * @brief    The M4VE_GenericInterface structure defines the set of functions used in
+ *               both encoder modes.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_initEncoder_fct            m_pFctInitEncoder;
+    M4VE_setOption_fct                m_pFctSetOption;
+    M4VE_getOption_fct                m_pFctGetOption;
+    M4VE_openEncoder_fct            m_pFctOpenEncoder;
+    M4VE_forceIFrame_fct            m_pFctForceIFrame;
+    M4VE_releaseOutputBuffer_fct    m_pFctReleaseOutputBuffer;
+    M4VE_closeEncoder_fct            m_pFctCloseEncoder;
+    M4VE_cleanUpEncoder_fct            m_pFctCleanUpEncoder;
+} M4VE_GenericInterface;            /**< Functions used in both "Standalone Encoder" and
+                                        "Encoder + Grabber" modes */
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_SEModeInterface
+ * @brief    The M4VE_SEModeInterface structure defines the set of functions used in
+ *              "Standalone Encoder" mode.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_stepEncode_fct            m_pFctStepEncode;
+} M4VE_SEModeInterface;            /**< Functions used only in "Standalone Encoder" mode */
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_EGModeInterface
+ * @brief    The M4VE_EGModeInterface structure defines the set of functions used in
+ *              "Encoder + Grabber" mode.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_startGrabber_fct        m_pFctStartGrabber;
+    M4VE_startEncoder_fct        m_pFctStartEncoder;
+    M4VE_stopGrabber_fct        m_pFctStopGrabber;
+    M4VE_stopEncoder_fct        m_pFctStopEncoder;
+} M4VE_EGModeInterface;            /**< Functions used only in "Encoder + Grabber" mode */
+
+
+
+/**
+ *********************************************************************************************
+ * struct    M4VE_Interface
+ * @brief    The M4VE_Interface structure stores pointers to the video encoder functions.
+ *********************************************************************************************
+*/
+typedef struct
+{
+    M4VE_GenericInterface        genericInterface;    /**< Functions used everytime */
+    M4VE_EncoderMode            encoderMode;        /**< "Standalone Encoder"
+                                                    or "Encoder + Grabber" */
+    union
+    {
+        M4VE_SEModeInterface    SEModeInterface;    /**< Functions used only in
+                                                    "Standalone Encoder" mode */
+        M4VE_EGModeInterface    EGModeInterface;    /**< Functions used only in
+                                                    "Encoder + Grabber" mode */
+    }M4VE_SpecificInterface;
+} M4VE_Interface;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+#endif /*__M4VE_API_H__*/
diff --git a/libvideoeditor/vss/common/inc/M4VFL_transition.h b/libvideoeditor/vss/common/inc/M4VFL_transition.h
new file mode 100755
index 0000000..294de6f
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VFL_transition.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ /**
+ ******************************************************************************
+ * @file        M4TRAN_transition.h
+ * @brief
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef __M4VFL_TRANSITION_H__
+#define __M4VFL_TRANSITION_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+typedef unsigned char UInt8;
+typedef unsigned long UInt32;
+
+typedef    struct S_M4ViComImagePlane
+{
+    UInt32        u_width;            /* active width, in pixels */
+    UInt32        u_height;            /* active height, in lines */
+    UInt32        u_topleft;            /* index of 1st active pixel */
+    UInt32        u_stride;            /* line stride, in bytes */
+    UInt8        *pac_data;            /* buffer address */
+}    M4ViComImagePlane;
+
+typedef struct S_M4VFL_modifLumParam
+{
+    unsigned short lum_factor;
+    unsigned short copy_chroma;
+} M4VFL_ModifLumParam;
+
+typedef struct S_M4VFL_CurtainParam
+{
+    unsigned short nb_black_lines;
+    unsigned char top_is_black;
+} M4VFL_CurtainParam;
+
+#define     M4VIFI_OK                       0
+#define     M4VIFI_ILLEGAL_FRAME_HEIGHT     8
+#define     M4VIFI_ILLEGAL_FRAME_WIDTH      9
+
+unsigned char M4VFL_modifyLumaByStep(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                         M4VFL_ModifLumParam *lum_param, void *user_data);
+
+unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                         unsigned long lum_factor, void *user_data);
+
+unsigned char M4VFL_applyClosingCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                         unsigned short curtain_factor, void *user_data);
+
+unsigned char M4VFL_applyOpeningCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                         unsigned short curtain_factor, void *user_data);
+
+unsigned char M4VFL_applyFallingCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                         unsigned short curtain_factor, void *user_data);
+
+
+/**
+ ******************************************************************************
+ * unsigned char M4VFL_applyCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+ *                                   M4VFL_CurtainParam *curtain_factor, void *user_data)
+ * @brief    This function applies a black curtain onto a YUV420 image.
+ * @note    THis function writes black lines either at the top of the image or at
+ *            the bottom of the image. The other lines are copied from the source image.
+ *            First the number of black lines is compted and is rounded to an even integer.
+ * @param    plane_in: (IN) pointer to the 3 image planes of the source image
+ * @param    plane_out: (OUT) pointer to the 3 image planes of the destination image
+ * @param    user_data: (IN) pointer to some user_data
+ * @param    curtain_factor: (IN) structure with the parameters of the curtain (nb of black lines
+ *                                and if at the top/bottom of the image)
+ * @return    0: there is no error
+ ******************************************************************************
+*/
+unsigned char M4VFL_applyCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                    M4VFL_CurtainParam *curtain_factor, void *user_data);
+
+
+/**
+ *************************************************************************************************
+ * M4OSA_ERR M4VIFI_ImageBlendingonYUV420 (void *pUserData,
+ *                                                  M4VIFI_ImagePlane *pPlaneIn1,
+ *                                                  M4VIFI_ImagePlane *pPlaneIn2,
+ *                                                  M4VIFI_ImagePlane *pPlaneOut,
+ *                                                  M4VIFI_UInt32 Progress)
+ * @brief   Blends two YUV 4:2:0 Planar images.
+ * @note    Blends YUV420 planar images,
+ *          Map the value of progress from (0 - 1000) to (0 - 1024)
+ *          Set the range of blendingfactor,
+ *                  1. from 0 to (Progress << 1)            ;for Progress <= 512
+ *                  2. from (( Progress - 512)<< 1) to 1024 ;otherwise
+ *          Set the increment of blendingfactor for each element in the image row by the factor,
+ *                  =  (Range-1) / (image width-1)  ;for width >= range
+ *                  =  (Range) / (image width)      ;otherwise
+ *          Loop on each(= i) row of output Y plane (steps of 2)
+ *              Loop on each(= j) column of output Y plane (steps of 2)
+ *                  Get four Y samples and one U & V sample from two input YUV4:2:0 images and
+ *                  Compute four Y sample and one U & V sample for output YUV4:2:0 image
+ *                      using the following,
+ *                  Out(i,j) = blendingfactor(i,j) * In1(i,j)+ (l - blendingfactor(i,j)) * In2(i,j)
+ *              end loop column
+ *          end loop row.
+ * @param   pUserData: (IN)  User Specific Parameter
+ * @param   pPlaneIn1: (IN)  Pointer to an array of image plane structures maintained for Y, U
+ *                            and V planes.
+ * @param   pPlaneIn2: (IN)  Pointer to an array of image plane structures maintained for Y, U
+ *                            and V planes.
+ * @param   pPlaneOut: (OUT) Pointer to an array of image plane structures maintained for Y, U
+ *                            and V planes.
+ * @param   Progress:  (IN)  Progress value (varies between 0 and 1000)
+ * @return  M4VIFI_OK: No error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
+ ***********************************************************************************************/
+unsigned char M4VIFI_ImageBlendingonYUV420 (void *pUserData, M4ViComImagePlane *pPlaneIn1,
+                                                M4ViComImagePlane *pPlaneIn2,
+                                                M4ViComImagePlane *pPlaneOut, UInt32 Progress);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif // __M4VFL_TRANSITION_H__
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_Clip.h b/libvideoeditor/vss/common/inc/M4VIFI_Clip.h
new file mode 100755
index 0000000..13997ac
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VIFI_Clip.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file        M4VIFI_Clip.h
+ * @brief        Global Table definition
+ * @note        This file defines the Clipping and Division table address
+ ******************************************************************************
+*/
+
+#ifndef    _M4VIFI_CLIP_H_
+#define    _M4VIFI_CLIP_H_
+
+/* Clipping matrix for RGB values */
+EXTERN CNST M4VIFI_UInt8    *M4VIFI_ClipTable_zero;
+/* Division table for (65535/x); x = 0 to 512 */
+EXTERN CNST M4VIFI_UInt16    *M4VIFI_DivTable_zero;
+
+#endif /* _M4VIFI_CLIP_H_ */
+
+/* End of file M4VIFI_Clip.h */
+
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_Defines.h b/libvideoeditor/vss/common/inc/M4VIFI_Defines.h
new file mode 100755
index 0000000..d670791
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VIFI_Defines.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file        M4VIFI_Defines.h
+ * @brief        Macro Definition
+ * @note        This file defines all the macro used in the filter library
+ ******************************************************************************
+*/
+
+#ifndef _M4VIFI_DEFINES_H_
+#define _M4VIFI_DEFINES_H_
+
+/**
+ *****************************************************************************
+ *                    Macros used for color transform RGB565 to YUV
+ *****************************************************************************
+*/
+#define CST_RGB_16_SIZE 2
+#define Y16(r, g, b) CLIP(  ( ( (80593 * r)+(77855 * g)+(30728 * b)) >> 15))
+#define U16(r, g, b) CLIP(128+ ( ( -(45483 * r)-(43936 * g)+(134771 * b)) >> 15 ))
+#define V16(r, g, b) CLIP(128+ ( ( (134771 * r)-(55532 * g)-(21917 * b)) >> 15  ))
+
+
+/**
+ *****************************************************************************
+ *    Macros used for color transform YUV to RGB
+ *    B = 1.164(Y - 16)                  + 2.018(U - 128)
+ *  G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128)
+ *  R = 1.164(Y - 16) + 1.596(V - 128)
+ *  Above Conversion Formula is implemented for fixed point operation
+ *****************************************************************************
+*/
+
+#define CST_RGB_24_SIZE 3
+
+#ifdef __RGB_V1__
+#define DEMATRIX(Rx,Gx,Bx,Yx37,Ux,Vx) \
+    Rx = CLIP(((Yx37 + (Vx * 51) + 16) >> 5) - 223); \
+    Gx = CLIP(((Yx37 - ((Ux+(Vx<<1)) * 13) +16) >> 5) + 135); \
+    Bx = CLIP(((Yx37 + (Ux * 65) + 16) >> 5) - 277)
+#else
+#define DEMATRIX(Rx,Gx,Bx,Yx2568,Ux,Vx) \
+    Rx = CLIP(((Yx2568 +                 (Vx * 0x3343) + (M4VIFI_Int32)0xffe40800) >> 13)); \
+    Gx = CLIP(((Yx2568 - (Ux * 0x0c92) - (Vx * 0x1a1e) + (M4VIFI_Int32)0x00110180) >> 13)); \
+    Bx = CLIP(((Yx2568 + (Ux * 0x40cf)                    + (M4VIFI_Int32)0xffdd4200) >> 13));
+#endif /* __RGB_V1__ */
+
+/**
+ *****************************************************************************
+ *    Packing and Unpacking is different for little and big endian
+ *  r, g, b, Rx, Gx, Bx are 8 bit color value
+ *    a, data are 16 bit pixel value
+ *****************************************************************************
+ */
+
+/* Pack computations common for little endian and big endian modes */
+#define    PACK_BGR24(rgb_ptr,Rx,Gx,Bx) {rgb_ptr[0] = (M4VIFI_UInt8)Bx; rgb_ptr[1] =\
+                         (M4VIFI_UInt8)Gx; rgb_ptr[2] = (M4VIFI_UInt8)Rx;}
+#define    PACK_RGB24(rgb_ptr,Rx,Gx,Bx) {rgb_ptr[0] = (M4VIFI_UInt8)Rx; rgb_ptr[1] =\
+                         (M4VIFI_UInt8)Gx; rgb_ptr[2] = (M4VIFI_UInt8)Bx;}
+
+#ifdef BIG_ENDIAN
+#define    PACK_RGB565(a, Rx, Gx, Bx) (((Rx >> 3) << (11 + (a)))\
+                 | ((Gx >> 2) << (5 + (a))) | ((Bx >> 3) << (a)))
+#define    PACK_BGR565(a, Rx, Gx, Bx) (((Bx >> 3) << (11 + (a)))\
+                 | ((Gx >> 2) << (5 + (a))) | ((Rx >> 3) << (a)))
+#define GET_RGB565(r, g, b, data) {b = ((data) & 31); g =\
+                     ((data >> 5) & 63); r = ((data >> 11) & 31);}
+#define GET_BGR565(b, g, r, data) \
+    r = ((data) & 31); \
+    g = ((data >> 5) & 63); \
+    b = ((data >> 11) & 31 );
+#else /* LITTLE endian: 0x12345678 -> 78 56 34 12 */
+#define    PACK_RGB565(a, Rx, Gx, Bx) (((Bx >> 3) << (8 + (a))) \
+                  | (((Gx >> 2)&0x7) << (13 + (a))) | ((Gx >> 5) << (a)) | ((Rx >> 3) << (3 + a)))
+#define    PACK_BGR565(a, Rx, Gx, Bx) (((Rx >> 3) << (11 + (a))) \
+                  | ((Gx >> 2) << (5 + (a))) | ((Bx >> 3) << (a)))
+#define GET_RGB565(r, g, b, data) { b = (M4VIFI_UInt8)(((data) & 0x1F00) >> 8); g =\
+             (M4VIFI_UInt8)((((data) & 0x7) << 3) | (((data) & 0xE000) >> 13)); r =\
+             (M4VIFI_UInt8)(((data) & 0xF8) >> 3);}
+#define GET_BGR565(b, g, r, data) \
+    b = ((data) & 31); \
+    g = ((data >> 5) & 63); \
+    r = ((data >> 11) & 31 );
+#endif /* BIG_ENDIAN */
+
+
+#define CST_RGB_24_SIZE 3
+#define Y24(r,g,b) CLIP(( ( (19595 * r) + (38470 * g) + (9437 * b) ) >>16))
+#define U24(r,g,b) CLIP(128 + ( ( -(11059 * r) - (21709 * g) + (32768 * b)) >>16))
+#define V24(r,g,b) CLIP(128 + ( ( (32768 * r) - (27426 * g) - (5329 * b))  >>16))
+#define GET_RGB24(r,g,b,s,o) r = s[o]; g = s[o + 1]; b = s[o + 2];
+
+/**
+ ***********************************************************************************
+ *                    Macro for clipping using the clipping matrix for RGB values
+ ***********************************************************************************
+*/
+/** Clip function ensures values with range of 0 and 255 */
+#define        CLIP(x)    *(M4VIFI_ClipTable_zero + (x))
+#define        CLIP_OVF        500
+#define     CLIP_LUT_SIZE     (256 + 2 * CLIP_OVF)
+/** Division table for RGB565 to HLS conversion */
+#define        DIVCLIP(x)    *(M4VIFI_DivTable_zero + (x))
+
+/**
+ *****************************************************************************
+ *                    Endianness (default configuration is Little Endian)
+ *****************************************************************************
+*/
+#if (!defined(LITTLE_ENDIAN) && !defined(BIG_ENDIAN))
+/** Default endian setting */
+#define LITTLE_ENDIAN
+#endif
+
+/**
+ *****************************************************************************
+ *                    Other macros and define
+ *****************************************************************************
+*/
+/** YUV plane index */
+#define PLANES    3
+#define YPlane    0
+#define UPlane    1
+#define VPlane    2
+
+/** Check for value is EVEN */
+#ifndef IS_EVEN
+#define IS_EVEN(a)    (!(a & 0x01))
+#endif
+
+/* Used for fixed point implementation */
+#ifndef MAX_SHORT
+#define MAX_SHORT    0x10000
+#endif
+
+#endif /* _M4VIFI_DEFINES_H_ */
+
+/* End of file M4VIFI_Defines.h */
+
diff --git a/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h b/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h
new file mode 100755
index 0000000..390c6e4
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VIFI_FiltersAPI.h
@@ -0,0 +1,786 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file        M4VIFI_FiltersAPI.h
+ * @brief        External API and Data definitions for the video filter library
+ * @note        This file defines and declares data common to the video filter library:
+ *                    -# data types
+ *                    -# error codes
+ *                    -# external API's
+ *                    -# API level structure definition
+ ******************************************************************************
+*/
+
+#ifndef _M4VIFI_FILTERSAPI_H_
+
+#define _M4VIFI_FILTERSAPI_H_
+
+#ifdef __cplusplus
+
+extern "C" {
+
+#endif /* __cplusplus */
+
+    /**
+     ***********************************************************
+     *                    Data types definition
+     ***********************************************************
+    */
+
+    typedef unsigned char M4VIFI_UInt8;
+    typedef char M4VIFI_Int8;
+    typedef unsigned short M4VIFI_UInt16;
+    typedef unsigned long M4VIFI_UInt32;
+    typedef short M4VIFI_Int16;
+    typedef long M4VIFI_Int32;
+    typedef float M4VIFI_Float;
+    typedef double M4VIFI_Double;
+    typedef unsigned char M4VIFI_ErrorCode;
+
+/**
+ ***********************************************************
+ *                    Error codes definition
+ ***********************************************************
+*/
+#define M4VIFI_OK                        0
+#define M4VIFI_INVALID_PARAM            7
+#define M4VIFI_ILLEGAL_FRAME_HEIGHT        8
+#define M4VIFI_ILLEGAL_FRAME_WIDTH        9
+
+/**
+ ***********************************************************
+ *                    Other basic definitions
+ ***********************************************************
+*/
+#define CNST    const
+#define EXTERN    extern
+
+#ifndef NULL
+#define NULL    0
+
+#endif
+#ifndef FALSE
+#define FALSE    0
+#define TRUE    !FALSE
+
+#endif
+
+/**
+ ***********************************************************
+ *                    Structures definition
+ ***********************************************************
+*/
+
+/**
+ ******************************************************************************
+ * structure    M4VIFI_ImagePlane
+ * @brief        Texture (YUV) planes structure
+ * @note        This structure details the image planes for the output textures:
+ *                sizes (in pixels) are luma plane sizes, the 3 pointers point
+ *                to the Y, U and V buffers which store data in planar format.
+ ******************************************************************************
+*/
+
+    typedef struct
+        {
+        M4VIFI_UInt32 u_width;   /**< Width of luma in pixel unit */
+        M4VIFI_UInt32 u_height;  /**< Height of luma in pixel unit */
+        M4VIFI_UInt32 u_topleft; /**< Pointer to first texture active pixel */
+        M4VIFI_UInt32 u_stride;  /**< Stride value */
+        M4VIFI_UInt8 *pac_data;  /**< Pointer to the data */
+        } M4VIFI_ImagePlane;
+
+/**
+ ******************************************************************************
+ * structure    M4VIFI_FramingData
+ * @brief        Data necessary to add an overlay on an image
+ * @note        This structure details the position and the data of the overlay
+ ******************************************************************************
+*/
+    typedef struct
+        {
+        M4VIFI_UInt32
+            m_xPosStep; /**< X positioning of the overlay vs main picture.
+                                  X positioning is expressed in percentage vs the main
+                                   picture width.
+                                  m_xPosStep must be expressed by step of 1% and between
+                                  -50/+50%.
+                                  0% means overlay is centered vs main picture on
+                                   X abscissa. */
+        M4VIFI_UInt32
+            m_yPosStep; /**< Y positioning of the overlay vs main picture.
+                                  Y positioning is expressed in percentage vs the main
+                                   picture width.
+                                  m_xPosStep must be expressed by step of 1% and between
+                                   -50/+50%.
+                                  0% means overlay is centered vs main picture on
+                                   Y abscissa. */
+
+        M4VIFI_ImagePlane
+            *
+                m_imagePlane; /**< Pointer to the framing image with alpha channel */
+        } M4VIFI_FramingData;
+
+/**
+ ******************************************************************************
+ * structure    M4VIFI_HLSoffset
+ * @brief        HLS offset structure
+ * @note        This structure have the hue, saturation and lightness value
+ *                for quality enhancement. Range of values neccessarily be
+ *                hue = -360 to 360, sat = 0 to 100 and light = 0 t0 100
+ ******************************************************************************
+*/
+    typedef struct
+        {
+        M4VIFI_Int16 hue;   /**< Hue offset */
+        M4VIFI_Int16 sat;   /**< Saturation offset */
+        M4VIFI_Int16 light; /**< Light offset */
+        } M4VIFI_HLSoffset;
+
+/**
+ ******************************************************************************
+ * structure    M4VIFI_Tranformation
+ * @brief        Image Tranformation Structure
+ * @note        Image Tranformation Request
+ *                rotation : 1 -> +90deg Rotation
+ *                          -1 -> -90deg Rotation
+ *                           0 ->  No Rotation
+ ******************************************************************************
+*/
+    typedef struct
+        {
+        M4VIFI_Int32 i32_rotation; /**< Rotation Flag        */
+        } M4VIFI_Tranformation;
+
+/**
+ ******************************************************************************
+ * structure    M4VIFI_pContext
+ * @brief        New Structures
+ * @note        -# Structure of M4VIFI_HLSoffset
+ ******************************************************************************
+*/
+    typedef struct
+        {
+        M4VIFI_HLSoffset hlsOffset; /**< HLS offset structure */
+        } M4VIFI_pContext;
+
+    /*
+     *****************************************************
+     *                    External API functions
+     *****************************************************
+    */
+
+    /**< Effect filters */
+    M4VIFI_UInt8 M4VIFI_SepiaYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_GrayscaleYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_ContrastYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_NegativeYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_FlipYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_MirrorYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_Rotate180YUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_Rotate90RightYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_Rotate90LeftYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_ColorRYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_ColorGYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_ColorBYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_FramingRGB565toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_FramingYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_SetHueInYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_ColdYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    M4VIFI_UInt8 M4VIFI_WarmYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+/*                ADS Compiler                */
+
+/*        Generic ARM assembly functions        */
+#if defined ADS_ARM
+
+    /** Apply grayscale effect RGB565toRGB565 */
+
+    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear RGB888toRGB888 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear RGB565toRGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV420toYUV420 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** RGB565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_RGB565toYUV420AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** BGR565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_BGR565toYUV420AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV422 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_UYVYtoYUV420AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV420 to RGB565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toRGB565AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420 to BGR565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR565AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toRGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toBGR565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in RGB565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in BGR565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565AdsArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420AdsArm
+#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420AdsArm
+#define M4VIFI_UYVYtoYUV420                                    M4VIFI_UYVYtoYUV420AdsArm
+#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565AdsArm
+#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565AdsArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565             \
+                           M4VIFI_ResizeBilinearYUV420toRGB565AdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toBGR565             \
+                           M4VIFI_ResizeBilinearYUV420toBGR565AdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft  \
+                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm
+
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft  \
+                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm
+
+#define M4VIFI_SetHLSinRGB565                                M4VIFI_SetHLSinRGB565AdsArm
+#define M4VIFI_SetHLSinBGR565                                M4VIFI_SetHLSinBGR565AdsArm
+
+/*        ARM9E assembly functions        */
+#elif defined ADS_ARM9E
+
+    /** Apply grayscale effect RGB565toRGB565 */
+
+    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV888toYUV888 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV565toYUV565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /** Resize Bilinear YUV420toYUV420 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** RGB565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_RGB565toYUV420AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** BGR565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_BGR565toYUV420AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV422 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_UYVYtoYUV420AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV420 to RGB565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toRGB565AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420 to BGR565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR565AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toRGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toBGR565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in RGB565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in BGR565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565AdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize YUV420toYUV420 from QCIF to QVGA*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGAAdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /**Resize YUV420toRGB565 from QCIF to QVGA*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGAAdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation +90*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RRAdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation -90*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RLAdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+/** Resizes YUV420 Planar Image and stores in YUV420 Linear format with/without +or-90 rotation*/
+    M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV420LinearAdsArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420AdsArm9E
+#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420AdsArm9E
+#define M4VIFI_UYVYtoYUV420                                    M4VIFI_UYVYtoYUV420AdsArm9E
+#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565AdsArm9E
+#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565AdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+                           M4VIFI_ResizeBilinearYUV420toRGB565AdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565 \
+                           M4VIFI_ResizeBilinearYUV420toBGR565AdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightAdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+                           M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftAdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightAdsArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
+                           M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftAdsArm9E
+#define M4VIFI_SetHLSinRGB565                            M4VIFI_SetHLSinRGB565AdsArm9E
+#define M4VIFI_SetHLSinBGR565                            M4VIFI_SetHLSinBGR565AdsArm9E
+#define M4VIFI_YUV420QCIFtoYUV420QVGA                    M4VIFI_YUV420QCIFtoYUV420QVGAAdsArm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA                    M4VIFI_YUV420QCIFtoRGB565QVGAAdsArm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RR                 M4VIFI_YUV420QCIFtoRGB565QVGA_RRAdsArm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RL                 M4VIFI_YUV420QCIFtoRGB565QVGA_RLAdsArm9E
+#define M4VIFI_YUV420PlanartoYUV420Linear                M4VIFI_YUV420PlanartoYUV420LinearAdsArm9E
+/*                GCC Compiler                */
+/*        Generic ARM assembly functions        */
+
+#elif defined GCC_ARM
+
+    /** Apply grayscale effect RGB565toRGB565 */
+
+    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV888toYUV888 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV565toYUV565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV420toYUV420 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** RGB565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_RGB565toYUV420GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** BGR565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_BGR565toYUV420GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV420 to RGB565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toRGB565GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420 to BGR565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR565GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toRGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toBGR565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Modify HLS in RGB565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Modify HLS in BGR565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565GccArm(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420GccArm
+#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420GccArm
+#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565GccArm
+#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565GccArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+                               M4VIFI_ResizeBilinearYUV420toRGB565GccArm
+#define M4VIFI_ResizeBilinearYUV420toBGR565 \
+                               M4VIFI_ResizeBilinearYUV420toBGR565GccArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+                               M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
+                               M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm
+#define M4VIFI_SetHLSinRGB565                                M4VIFI_SetHLSinRGB565GccArm
+#define M4VIFI_SetHLSinBGR565                                M4VIFI_SetHLSinBGR565GccArm
+
+/*        ARM9E assembly functions        */
+#elif defined GCC_ARM9E
+
+    /** Apply grayscale effect RGB565toRGB565 */
+
+    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV888toYUV888 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV565toYUV565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV420toYUV420 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** RGB565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_RGB565toYUV420GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** BGR565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_BGR565toYUV420GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV420 to RGB565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toRGB565GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420 to BGR565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR565GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toRGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toBGR565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm9E(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in RGB565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in BGR565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565GccArm9E(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_RGB565toYUV420                                M4VIFI_RGB565toYUV420GccArm9E
+#define M4VIFI_BGR565toYUV420                                M4VIFI_BGR565toYUV420GccArm9E
+#define M4VIFI_YUV420toRGB565                                M4VIFI_YUV420toRGB565GccArm9E
+#define M4VIFI_YUV420toBGR565                                M4VIFI_YUV420toBGR565GccArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+                                   M4VIFI_ResizeBilinearYUV420toRGB565GccArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565 \
+                                   M4VIFI_ResizeBilinearYUV420toBGR565GccArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+                                   M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightGccArm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+                                   M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftGccArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight \
+                                   M4VIFI_ResizeBilinearYUV420toBGR565RotatedRightGccArm9E
+#define M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft \
+                                   M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeftGccArm9E
+#define M4VIFI_SetHLSinBGR565                                M4VIFI_SetHLSinBGR565GccArm9E
+#define M4VIFI_SetHLSinRGB565                                M4VIFI_SetHLSinRGB565GccArm9E
+
+/* TI CCS assembly files */
+#elif defined TI411_ARM9E
+
+    /** Apply grayscale effect RGB565toRGB565 */
+
+    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV888toYUV888 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV565toYUV565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV420toYUV420 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** YUV420 (Planar) to RGB565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420 (Planar) to Resized RGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420 (Planar) to Resized RGB888 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420(Planar) to Resized and Rotated (-90) RGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV420(Planar) to Resized and Rotated (+90) RGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV420(Planar) to Resized YUV420(Planar) */
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGA(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution */
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+/** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution with rotation(-90) */
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RL(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+/** Resize YUV420(Planar) of QCIF to RGB565 of QVGA resolution with rotation(+90) */
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RR(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+#define M4VIFI_YUV420toRGB565                             M4VIFI_YUV420toRGB565Ti411Arm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565 \
+                                M4VIFI_ResizeBilinearYUV420toRGB565Ti411Arm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft \
+                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeftTi411Arm9E
+#define M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight \
+                               M4VIFI_ResizeBilinearYUV420toRGB565RotatedRightTi411Arm9E
+
+#define M4VIFI_YUV420QCIFtoYUV420QVGA       M4VIFI_YUV420QCIFtoYUV420QVGATi411Arm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA       M4VIFI_YUV420QCIFtoRGB565QVGATi411Arm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RL  M4VIFI_YUV420QCIFtoRGB565QVGA_RLTi411Arm9E
+#define M4VIFI_YUV420QCIFtoRGB565QVGA_RR  M4VIFI_YUV420QCIFtoRGB565QVGA_RRTi411Arm9E
+
+/*        ANSI C Functions        */
+#else
+
+    /** Apply grayscale effect RGB565toRGB565 */
+
+    M4VIFI_UInt8 M4VIFI_GrayscaleRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV888toYUV888 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV565toYUV565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB565toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** Resize Bilinear YUV420toYUV420 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+    /** RGB565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_RGB565toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** BRG565 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_BGR565toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** BRG888 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_BGR888toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
+    /** RGB888 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane PlaneOut[3]);
+
+    /** YUV422 to YUV420 */
+    M4VIFI_UInt8 M4VIFI_UYVYtoYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+
+    /** YUV420 to RGB565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** YUV420 to BGR565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /** YUV420 to BGR565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR565RotatedLeft(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /** YUV420 to BGR565 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR565RotatedRight(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /** YUV420 to BGR24 */
+    M4VIFI_UInt8 M4VIFI_YUV420toBGR24(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /** YUV420 to RGB24 */
+    M4VIFI_UInt8 M4VIFI_YUV420toRGB24(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /** Resize Bilinear YUV420toYUV420 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toYUV420(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /** Resize Bilinear YUV420toRGB565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB888(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toBGR565 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toRGB565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedLeft(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Resize Bilinear YUV420toBGR565 with rotation +90 or -90 */
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedRight(
+        void *pUserData,
+            M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565RotatedLeft(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in RGB565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinRGB565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /** Modify HLS in BGR565 */
+    M4VIFI_UInt8 M4VIFI_SetHLSinBGR565(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    /**Resize YUV420toYUV420 from QCIF to QVGA*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoYUV420QVGA(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /**Resize YUV420toRGB565 from QCIF to QVGA*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation +90*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RR(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+    /**Resize YUV420toRGB565 from QCIF to QVGA with rotation -90*/
+    M4VIFI_UInt8 M4VIFI_YUV420QCIFtoRGB565QVGA_RL(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+/** Resizes YUV420 Planar Image and stores in YUV420 Linear format with/without +or-90 rotation*/
+    M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV420Linear(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+
+/** Resizes YUV420 Planar Image and stores in YUV422 Interleaved format
+     with/without +or-90 rotation*/
+    M4VIFI_UInt8 M4VIFI_YUV420PlanartoYUV422Interleaved(void *pUserData,
+        M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut);
+#endif
+
+    /** definition of the converter function types */
+
+    typedef M4VIFI_UInt8 M4VIFI_PlanConverterFunctionType(void
+        *pContext, M4VIFI_ImagePlane* in, M4VIFI_ImagePlane* out);
+
+    /** definition of the preprocessing function types */
+    typedef M4VIFI_UInt8 M4VIFI_PreprocessFunctionType(void
+        *pContext, M4VIFI_ImagePlane* in, M4VIFI_ImagePlane* out);
+
+    M4VIFI_UInt8 M4VIFI_YUV420toYUV420(void *user_data,
+        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_YUV420PlanarToYUV420Semiplanar(void *user_data,
+        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
+    M4VIFI_UInt8 M4VIFI_SemiplanarYUV420toYUV420(void *user_data,
+        M4VIFI_ImagePlane *PlaneIn, M4VIFI_ImagePlane *PlaneOut);
+#ifdef __cplusplus
+
+}
+
+#endif /* __cplusplus */
+
+#endif /* _M4VIFI_FILTERSAPI_H_ */
+
+/* End of file M4VIFI_FiltersAPI.h */
diff --git a/libvideoeditor/vss/common/inc/M4VPP_API.h b/libvideoeditor/vss/common/inc/M4VPP_API.h
new file mode 100755
index 0000000..a231d34
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4VPP_API.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VPP_API.h
+ * @brief    Video preprocessing API public functions prototypes.
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef M4VPP_API_H
+#define M4VPP_API_H
+
+#include "M4OSA_Types.h"            /**< Include for common OSAL types */
+#include "M4OSA_Error.h"            /**< Include for common OSAL errors */
+
+/**
+ *    Include Video filters interface definition (for the M4VIFI_ImagePlane type) */
+#include "M4VIFI_FiltersAPI.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+/**
+ ******************************************************************************
+ * Public type of the Video Preprocessing execution context
+ ******************************************************************************
+*/
+typedef M4OSA_Void*    M4VPP_Context;
+
+typedef enum
+{
+    M4VPP_kIYUV420=0,    /**< YUV 4:2:0 planar (standard input for mpeg-4 video) */
+    M4VPP_kIYUV422,        /**< YUV422 planar */
+    M4VPP_kIYUYV,        /**< YUV422 interlaced, luma first */
+    M4VPP_kIUYVY,        /**< YUV422 interlaced, chroma first */
+    M4VPP_kIJPEG,        /**< JPEG compressed frames */
+    M4VPP_kIRGB444,        /**< RGB 12 bits 4:4:4 */
+    M4VPP_kIRGB555,        /**< RGB 15 bits 5:5:5 */
+    M4VPP_kIRGB565,        /**< RGB 16 bits 5:6:5 */
+    M4VPP_kIRGB24,        /**< RGB 24 bits 8:8:8 */
+    M4VPP_kIRGB32,        /**< RGB 32 bits  */
+    M4VPP_kIBGR444,        /**< BGR 12 bits 4:4:4 */
+    M4VPP_kIBGR555,        /**< BGR 15 bits 5:5:5 */
+    M4VPP_kIBGR565,        /**< BGR 16 bits 5:6:5 */
+    M4VPP_kIBGR24,        /**< BGR 24 bits 8:8:8 */
+    M4VPP_kIBGR32        /**< BGR 32 bits  */
+} M4VPP_InputVideoFormat;
+
+
+/**
+ ******************************************************************************
+ * @brief    Prototype of the main video preprocessing function
+ * @note    Preprocess one frame
+ * @param    pContext:    (IN) Execution context of the VPP.
+ * @param    pPlaneIn:    (INOUT)    Input Image
+ * @param    pPlaneOut:    (INOUT)    Output Image
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (M4VPP_apply_fct) (M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+                                     M4VIFI_ImagePlane* pPlaneOut);
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_initVideoPreprocessing(M4VPP_Context* pContext)
+ * @brief    This function allocates a new execution context for the Video Preprocessing component.
+ * @note
+ * @param    pContext:    (OUT) Execution context allocated by the function.
+ * @return    M4NO_ERROR: there is no error.
+ * @return    M4ERR_ALLOC: there is no more available memory.
+ * @return    M4ERR_PARAMETER: pContext is NULL (debug only).
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_initVideoPreprocessing(M4VPP_Context* pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_applyVideoPreprocessing(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ *                                           M4VIFI_ImagePlane* pPlaneOut)
+ * @brief    Preprocess one frame.
+ * @note
+ * @param    pContext:    (IN) Execution context.
+ * @param    pPlaneIn:    (INOUT)    Input Image
+ * @param    pPlaneOut:    (INOUT)    Output Image
+ * @return    M4NO_ERROR: there is no error.
+ * @return    M4ERR_PARAMETER: pContext or pPlaneIn or pPlaneOut is NULL (debug only).
+ * @return    M4ERR_STATE: Video Preprocessing is not in an appropriate state for this function
+ *                           to be called
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_applyVideoPreprocessing(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+                                         M4VIFI_ImagePlane* pPlaneOut);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_cleanUpVideoPreprocessing(M4VPP_Context pContext)
+ * @brief    This method frees the execution context for the Video Preprocessing component.
+ *            Any further usage of the context will lead to unpredictable result.
+ * @note
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error.
+ * @return    M4ERR_PARAMETER: pContext is NULL (debug only).
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_cleanUpVideoPreprocessing(M4VPP_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VPP_setVideoPreprocessingMode(M4VPP_Context pContext, M4VES_InputVideoFormat format)
+ * @brief    This method apply the video preprocessing to the input plane. Result is put into the
+ *           output plan.
+ * @param    pContext:    (IN) Execution context.
+ * @param    format  :    (IN) Format of input plane (rgb, yuv, ...)
+ * @return    M4NO_ERROR: there is no error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VPP_setVideoPreprocessingMode(M4VPP_Context pContext, M4VPP_InputVideoFormat format);
+
+/**
+ ******************************************************************************
+ * @brief    Definition of the errors specific to this module.
+ ******************************************************************************
+*/
+
+/**< Input and output planes have incompatible properties */
+#define M4VPP_ERR_IMCOMPATIBLE_IN_AND_OUT_PLANES    M4OSA_ERR_CREATE( M4_ERR,\
+     M4PREPROCESS_VIDEO, 0x000001);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* M4VPP_API_H */
+
diff --git a/libvideoeditor/vss/common/inc/M4WRITER_common.h b/libvideoeditor/vss/common/inc/M4WRITER_common.h
new file mode 100755
index 0000000..779968b
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4WRITER_common.h
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ /**
+ ******************************************************************************
+ * @file    M4WRITER_common.h
+ * @brief    VES writers shell interface.
+ * @note    This file defines the types internally used by the VES to abstract writers
+ ******************************************************************************
+*/
+#ifndef __M4WRITER_COMMON_H__
+#define __M4WRITER_COMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_FileWriter.h"   /* for M4OSA_FileWriterPointer */
+#include "M4OSA_FileReader.h"   /* for M4OSA_FileWriterPointer */
+#include "M4OSA_OptionID.h"     /* for M4OSA_OPTION_ID_CREATE() */
+#include "M4OSA_CoreID.h"       /* for M4WRITER_COMMON */
+
+#include "M4SYS_Stream.h"       /* for M4SYS_StreamID */
+#include "M4SYS_AccessUnit.h"   /* for M4SYS_AccessUnit */
+
+/**
+ ******************************************************************************
+ * MP4W Errors & Warnings definition
+ ******************************************************************************
+*/
+#define M4WAR_WRITER_STOP_REQ        M4OSA_ERR_CREATE(M4_WAR, M4WRITER_COMMON ,0x000001)
+
+/**
+ ******************************************************************************
+ * enum        M4WRITER_OutputFileType
+ * @brief    This enum defines the avalaible output file format.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4WRITER_kUnknown=-1,
+    M4WRITER_k3GPP=0,            /**< 3GPP compliant file */
+    M4WRITER_kAVI=1,            /**< AVI file */
+    M4WRITER_kAMR=2,            /**< AMR file */
+    M4WRITER_kNETWORK3GPP=3,    /**< 3GPP via TCP */
+    M4WRITER_kPCM=4,            /**< PCM file */
+    M4WRITER_kJPEG=5,            /**< JPEG EXIF writer */
+    M4WRITER_kMP3=6,            /**< MP3 writer */
+
+    M4WRITER_kType_NB  /* number of writers, keep it as last enum entry */
+
+} M4WRITER_OutputFileType;
+
+/**
+ ******************************************************************************
+ * enum    M4WRITER_OptionID
+ * @brief    This enums defines all avalaible options. All the reuturned values are in
+ *           M4OSA_UInt32 type.
+ ******************************************************************************
+*/
+typedef enum {
+    M4WRITER_kMaxAUSize        = M4OSA_OPTION_ID_CREATE (M4_READ|M4_WRITE, M4WRITER_COMMON, 0x01),
+    M4WRITER_kMaxChunckSize    = M4OSA_OPTION_ID_CREATE (M4_READ|M4_WRITE, M4WRITER_COMMON, 0x02),
+    M4WRITER_kFileSize          = M4OSA_OPTION_ID_CREATE (M4_READ            , \
+        M4WRITER_COMMON, 0x03),  /**< File size if the process was ended when we call the method */
+    M4WRITER_kFileSizeAudioEstimated= M4OSA_OPTION_ID_CREATE (M4_READ    ,\
+         M4WRITER_COMMON, 0x04),    /**< File size if the process was ended when we call the
+                                     method, estimated size for audio */
+    M4WRITER_kEmbeddedString  = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
+         M4WRITER_COMMON, 0x05),    /**< String embedded at the end of the file(SW - VES) */
+    M4WRITER_kEmbeddedVersion = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
+         M4WRITER_COMMON, 0x06),    /**< Version embedded at the end of the file */
+    M4WRITER_kIntegrationTag  = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
+         M4WRITER_COMMON, 0x07),    /**< String embedded at the end of the file (char[60]
+                                         for integration purpose) */
+    M4WRITER_kMaxFileSize      = M4OSA_OPTION_ID_CREATE (M4_WRITE        , \
+        M4WRITER_COMMON, 0x08),    /**< Maximum file size limitation */
+    M4WRITER_kMaxFileDuration = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
+         M4WRITER_COMMON, 0x09),    /**< Maximum file duration limitation */
+    M4WRITER_kSetFtypBox      = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
+         M4WRITER_COMMON, 0x0A),    /**< Set 'ftyp' atom */
+    M4WRITER_kMetaData          = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
+         M4WRITER_COMMON, 0x0B),    /**< Additionnal information to set in the file */
+    M4WRITER_kDSI          = M4OSA_OPTION_ID_CREATE (M4_WRITE        , \
+        M4WRITER_COMMON, 0x0C),    /**< To set DSI of the file (Decoder specifc info) */
+    M4WRITER_kJpegReserveFPData     = M4OSA_OPTION_ID_CREATE (M4_WRITE        ,\
+         M4WRITER_COMMON, 0x0D),    /**< Reserve some space in the file for JPEG fast
+                                        processing data */
+    M4WRITER_kJpegSetFPData     = M4OSA_OPTION_ID_CREATE (M4_WRITE        , \
+        M4WRITER_COMMON, 0x0E),    /**< Write Fast Processing Data in the file*/
+    /* + CRLV6775 -H.264 trimming */
+    M4WRITER_kMUL_PPS_SPS       = M4OSA_OPTION_ID_CREATE (M4_WRITE        , M4WRITER_COMMON, 0x0F)
+    /* - CRLV6775 -H.264 trimming */
+} M4WRITER_OptionID;
+
+
+/**
+ ******************************************************************************
+ * struct    M4WRITER_Header
+ * @brief    This structure defines the buffer where an header is put.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_MemAddr8    pBuf;        /**< Buffer for the header */
+    M4OSA_UInt32    Size;        /**< Size of the data */
+} M4WRITER_Header;
+
+
+/**
+ ******************************************************************************
+ * struct    M4WRITER_StreamVideoInfos
+ * @brief    This structure defines the specific video stream infos, extension to
+ *           M4SYS_StreamDescription.
+ ******************************************************************************
+*/
+typedef struct {
+    M4OSA_UInt32    height;                /**< Frame height */
+    M4OSA_UInt32    width;                /**< Frame Width */
+    M4OSA_Double    fps;                /**< Targetted framerate of the video */
+    M4WRITER_Header    Header;                /**< Sequence header of the video stream,
+                                        member set to NULL if no header present */
+} M4WRITER_StreamVideoInfos;
+
+
+/**
+ ******************************************************************************
+ * struct    M4WRITER_StreamAudioInfos
+ * @brief    This structure defines the specific audio stream infos, extension to
+             M4SYS_StreamDescription.
+ ******************************************************************************
+*/
+typedef struct {
+    M4OSA_UInt32    nbSamplesPerSec;    /**< Number of Samples per second */
+    M4OSA_UInt16    nbBitsPerSample;    /**< Number of Bits in 1 sample */
+    M4OSA_UInt16    nbChannels;            /**< Number of channels */
+    M4WRITER_Header    Header;                /**< Decoder Specific Info of the audiostream,
+                                             member set to NULL if no DSI present */
+} M4WRITER_StreamAudioInfos;
+
+
+/**
+ ******************************************************************************
+ * enum        M4WRITER_Orientation
+ * @brief    This enum defines the possible orientation of a frame as described
+ *            in the EXIF standard.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4WRITER_OrientationUnknown = 0,
+    M4WRITER_OrientationTopLeft,
+    M4WRITER_OrientationTopRight,
+    M4WRITER_OrientationBottomRight,
+    M4WRITER_OrientationBottomLeft,
+    M4WRITER_OrientationLeftTop,
+    M4WRITER_OrientationRightTop,
+    M4WRITER_OrientationRightBottom,
+    M4WRITER_OrientationLeftBottom
+}M4WRITER_Orientation ;
+
+/**
+ ******************************************************************************
+ * struct    M4WRITER_MetaData
+ * @brief    This structure defines all the meta data to store in the encoded file.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Char*                Description ;
+    M4OSA_Char*                PhoneManufacturer ;
+    M4OSA_Char*                PhoneModel ;
+    M4OSA_Char*                Artist ;
+    M4OSA_Char*                Copyright ;
+    M4OSA_Char*                Software ;
+    M4OSA_Char*                CreationDate;
+    M4WRITER_Orientation    Orientation ;
+
+    M4OSA_UInt32            Width ;
+    M4OSA_UInt32            Height ;
+
+    M4OSA_UInt32            ThumbnailWidth ;
+    M4OSA_UInt32            ThumbnailHeight ;
+    M4OSA_Bool                ThumbnailPresence ;
+}M4WRITER_MetaData;
+
+
+typedef void* M4WRITER_Context;
+
+typedef M4OSA_ERR (M4WRITER_openWrite)        (M4WRITER_Context* hContext,\
+                                             void* outputFileDescriptor,\
+                                             M4OSA_FileWriterPointer* pFileWriterPointer,\
+                                             void* tempFileDescriptor, \
+                                             M4OSA_FileReadPointer* pFileReaderPointer);
+typedef M4OSA_ERR (M4WRITER_addStream)        (M4WRITER_Context  pContext,\
+                                            M4SYS_StreamDescription*streamDescription);
+typedef M4OSA_ERR (M4WRITER_startWriting)    (M4WRITER_Context  pContext);
+typedef M4OSA_ERR (M4WRITER_closeWrite)        (M4WRITER_Context  pContext);
+typedef M4OSA_ERR (M4WRITER_setOption)        (M4WRITER_Context  pContext, \
+                                            M4OSA_UInt32 optionID, \
+                                            M4OSA_DataOption optionValue);
+typedef M4OSA_ERR (M4WRITER_getOption)        (M4WRITER_Context  pContext, \
+                                            M4OSA_UInt32 optionID, \
+                                            M4OSA_DataOption optionValue);
+
+
+/**
+ ******************************************************************************
+ * struct    M4WRITER_GlobalInterface
+ * @brief    Defines all the functions required for a writer shell.
+ ******************************************************************************
+*/
+typedef struct _M4WRITER_GlobalInterface
+{
+    M4WRITER_openWrite*             pFctOpen;
+    M4WRITER_addStream*                pFctAddStream;
+    M4WRITER_startWriting*          pFctStartWriting;
+    M4WRITER_closeWrite*            pFctCloseWrite;
+    M4WRITER_setOption*                pFctSetOption;
+    M4WRITER_getOption*                pFctGetOption;
+} M4WRITER_GlobalInterface;
+
+typedef M4OSA_ERR  M4WRITER_startAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,\
+                                     M4SYS_AccessUnit* pAU);
+typedef M4OSA_ERR  M4WRITER_processAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,\
+                                     M4SYS_AccessUnit* pAU);
+
+/**
+ ******************************************************************************
+ * struct    M4WRITER_DataInterface
+ * @brief    Defines all the functions required to write data with a writer shell.
+ ******************************************************************************
+*/
+typedef struct _M4WRITER_DataInterface
+{
+    M4WRITER_startAU*    pStartAU;
+    M4WRITER_processAU* pProcessAU;
+
+    M4WRITER_Context    pWriterContext;
+
+} M4WRITER_DataInterface;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /*__M4WRITER_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_BitStreamParser.h b/libvideoeditor/vss/common/inc/M4_BitStreamParser.h
new file mode 100755
index 0000000..0e7dfb0
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_BitStreamParser.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4_BitStreamParser.h
+ * @brief  MPEG-4 File Format bit stream utility
+ * @note   This file contains utility functions used to parse MPEG specific
+ *         data structures.
+ ************************************************************************
+*/
+#ifndef __M4_BITSTREAMPARSER_H__
+#define __M4_BITSTREAMPARSER_H__
+
+#include "M4OSA_Types.h"
+
+/**
+* M4_BitStreamParser_Init.
+*
+* Allocates the context and initializes internal data
+*
+* @param pContext   : A pointer to the context internally used by the package - ALLOCATED BY THE
+*                    FUNCTION (M4OSA_NULL if allocation fails)
+* @param bitStream  : A pointer to the bitstream - must be 32 bits as access are 32 bits
+* @param size        : The size of the bitstream in bytes
+*
+*/
+void M4_BitStreamParser_Init(void** pContext, void* pBitStream, M4OSA_Int32 size);
+
+/**
+ ************************************************************************
+ * @brief    Clean up context
+ * @param    pContext    (IN/OUT)  M4_BitStreamParser context.
+ ************************************************************************
+*/
+void M4_BitStreamParser_CleanUp(void* pContext);
+
+/**
+ ************************************************************************
+ * @brief    Read the next <length> bits in the bitstream.
+ * @note    The function does not update the bitstream pointer.
+ * @param    pContext    (IN/OUT) M4_BitStreamParser context.
+ * @param    length        (IN) The number of bits to extract from the bitstream
+ * @return    the read bits
+ ************************************************************************
+*/
+M4OSA_UInt32 M4_BitStreamParser_ShowBits(void* pContext, M4OSA_Int32 length);
+
+/**
+ ************************************************************************
+ * @brief    Increment the bitstream pointer of <length> bits.
+ * @param    pContext    (IN/OUT) M4_BitStreamParser context.
+ * @param    length        (IN) The number of bit to shift the bitstream
+ ************************************************************************
+*/
+void M4_BitStreamParser_FlushBits(void* pContext, M4OSA_Int32 length);
+
+/**
+ ************************************************************************
+ * @brief    Get a pointer to the current byte pointed by the bitstream pointer.
+ * It does not update the bitstream pointer
+ *
+ * @param pContext   : A pointer to the context internally used by the package
+ * @param length        : The number of bit to extract from the bitstream
+ *
+ * @returns the read bits
+*/
+M4OSA_UInt32 M4_BitStreamParser_GetBits(void* pContext,M4OSA_Int32 bitPos, M4OSA_Int32 length);
+
+/**
+* M4_BitStreamParser_Restart resets the bitstream indexes.
+*
+* @param pContext   : A pointer to the context internally used by the package
+*
+*/
+void M4_BitStreamParser_Restart(void* pContext);
+
+/**
+ ************************************************************************
+ * @brief    Get a pointer to the current byte pointed by the bitstream pointer.
+ * @returns pointer to the current location in the bitstream
+ * @note    It should be used carefully as the pointer is in the bitstream itself
+ *            and no copy is made.
+ * @param    pContext    (IN/OUT)  M4_BitStreamParser context.
+*/
+M4OSA_UInt8*  M4_BitStreamParser_GetCurrentbitStreamPointer(void* pContext);
+
+/**
+* M4_BitStreamParser_GetSize gets the size of the bitstream in bytes
+*
+* @param pContext   : A pointer to the context internally used by the package
+*
+* @returns the size of the bitstream in bytes
+*/
+M4OSA_Int32 M4_BitStreamParser_GetSize(void* pContext);
+
+void M4_MPEG4BitStreamParser_Init(void** pContext, void* pBitStream, M4OSA_Int32 size);
+
+/**
+* getMpegLengthFromInteger returns a decoded size value from an encoded one (SDL)
+*
+* @param pContext   : A pointer to the context internally used by the package
+* @param val : encoded value
+*
+* @returns size in a human readable form
+*/
+
+M4OSA_Int32 M4_MPEG4BitStreamParser_GetMpegLengthFromInteger(void* pContext, M4OSA_UInt32 val);
+
+
+/**
+ ************************************************************************
+ * @brief    Decode an MPEG4 Systems descriptor size from an encoded SDL size data.
+ * @note    The value is read from the current bitstream location.
+ * @param    pContext    (IN/OUT)  M4_BitStreamParser context.
+ * @return    Size in a human readable form
+ ************************************************************************
+*/
+M4OSA_Int32 M4_MPEG4BitStreamParser_GetMpegLengthFromStream(void* pContext);
+
+#endif /*__M4_BITSTREAMPARSER_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_Common.h b/libvideoeditor/vss/common/inc/M4_Common.h
new file mode 100755
index 0000000..6e1d42f
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_Common.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file   M4_Common.h
+ * @brief  Common data structure between shells
+ * @note
+*************************************************************************
+*/
+#ifndef __M4_COMMON_H__
+#define __M4_COMMON_H__
+
+#include "M4OSA_Types.h"
+
+/**
+ ************************************************************************
+ * structure    _parameterSet
+ * @brief        This structure defines the structure of parameters for the avc
+ *               decoder specific info
+ * @note
+ ************************************************************************
+*/
+typedef struct _parameterSet
+{
+    M4OSA_UInt16 m_length;                /* Number of items*/
+    M4OSA_UInt8* m_pParameterSetUnit;   /* Array of items*/
+} ParameterSet ;
+
+/**
+ ************************************************************************
+ * structure    _avcSpecificInfo
+ * @brief        This structure defines the structure of specific info for the avc decoder
+ * @note
+ ************************************************************************
+*/
+typedef struct _avcSpecificInfo
+{
+    M4OSA_UInt8        m_nalUnitLength;                /* length in bytes of the NALUnitLength
+                                                            field in a AVC sample */
+    M4OSA_UInt8        m_numOfSequenceParameterSets;   /* Number of sequence parameter sets*/
+    M4OSA_UInt8        m_numOfPictureParameterSets;    /* Number of picture parameter sets*/
+    ParameterSet    *m_pSequenceParameterSet;        /* Sequence parameter sets array*/
+    ParameterSet    *m_pPictureParameterSet;        /* Picture parameter sets array*/
+} AvcSpecificInfo ;
+
+/**
+ ************************************************************************
+ * structure    M4_SynthesisAudioInfo
+ * @brief        This structure contains specific pointers used for synthesis audio format
+ ************************************************************************
+*/
+typedef struct _synthesisAudioInfo
+{
+    M4OSA_Void*        m_pInputBuf;
+    M4OSA_Void*        m_pInputInfo;
+    M4OSA_UInt16    m_uiNbSubFramePerStep;
+    M4OSA_UInt32    m_uiUsedBytes;
+} M4_SynthesisAudioInfo;
+
+
+/*
+ ************************************************************************
+ * enum     M4_AACDownsamplingMode
+ * @brief   This enum states modes for Down sampling
+ ************************************************************************
+*/
+typedef enum
+{
+    AAC_kDS_OFF    = 0,        /**< No Down sampling */
+    AAC_kDS_BY_2   = 1,        /**< Down sampling by 2
+                                 Profile = AAC :
+                                            output sampling rate = aac_samp_freq/2
+                                 Profile = HE_AAC and input is AAC:
+                                            Output sampling rate = aac_samp_freq.(No downsamping).
+                                 Profile = HE_AAC and input is HE_AAC:
+                                            Output sampling rate = aac_samp_freq (Downsampling
+                                            occurs in SBR tool).
+                                 case profile = HE_AAC_v2 :
+                                            Not Supported */
+    AAC_kDS_BY_3   = 2,        /**< Down sampling by 3  - only for AAC profile */
+    AAC_kDS_BY_4   = 3,        /**< Down sampling by 4  - only for AAC profile */
+    AAC_kDS_BY_8   = 4        /**< Down sampling by 8  - only for AAC profile */
+
+} M4_AACDownsamplingMode;
+
+
+/*
+ ************************************************************************
+ * enum     M4_AACOutputMode
+ * @brief   This enum defines the output mode
+ ************************************************************************
+*/
+typedef enum
+{
+    AAC_kMono      = 0,    /**< Output is Mono  */
+    AAC_kStereo    = 1     /**< Output is Stereo */
+} M4_AACOutputMode;
+
+
+/*
+ ************************************************************************
+ * enum     M4_AACDecProfile
+ * @brief   This enum defines the AAC decoder profile
+ ************************************************************************
+*/
+typedef enum
+{
+    AAC_kAAC       = 0,        /**< AAC profile (only AAC LC object are supported) */
+    AAC_kHE_AAC    = 1,        /**< HE AAC or AAC+ profile (SBR in LP Mode)  */
+    AAC_kHE_AAC_v2 = 2        /**< HE AAC v2 or Enhanced AAC+ profile (SBR Tool in HQ Mode) */
+} M4_AACDecProfile;
+
+
+/**
+ ************************************************************************
+ * structure    M4_AacDecoderConfig
+ * @brief        This structure defines specific settings according to
+ *                the user requirements
+ ************************************************************************
+*/
+typedef struct
+{
+    M4_AACDecProfile        m_AACDecoderProfile;
+    M4_AACDownsamplingMode    m_DownSamplingMode;
+    M4_AACOutputMode        m_OutputMode;
+
+} M4_AacDecoderConfig;
+
+
+/**
+ ************************************************************************
+ * structure M4READER_AudioSbrUserdata
+ * @brief    This structure defines the user's data needed to decode the
+ *            AACplus stream
+ * @note    The field m_pFirstAU is used in case of local files    and
+ *            the field m_bIsSbrEnabled is used in streaming case.
+ ************************************************************************
+*/
+typedef struct
+{
+  M4OSA_Void*            m_pFirstAU;                /**< The first AU from where SBR data are
+                                                         extracted (local file case)*/
+  M4OSA_Bool            m_bIsSbrEnabled;        /**< A boolean that indicates if the stream is
+                                                    AACplus (streaming case)*/
+  M4_AacDecoderConfig*    m_pAacDecoderUserConfig;/**< Decoder specific user setting */
+
+} M4READER_AudioSbrUserdata;
+
+#endif /* __M4_COMMON_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_Logo.h b/libvideoeditor/vss/common/inc/M4_Logo.h
new file mode 100755
index 0000000..79a2e1c
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_Logo.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef M4_Logo_h
+#define M4_Logo_h
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+extern const unsigned char logo[];
+extern const int logo_width;
+extern const int logo_height;
+#ifdef __cplusplus
+}
+#endif
+#endif
+
diff --git a/libvideoeditor/vss/common/inc/M4_Utils.h b/libvideoeditor/vss/common/inc/M4_Utils.h
new file mode 100755
index 0000000..ec4bfb7
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_Utils.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file    M4_Utils.h
+ * @brief    Utilities
+ * @note    This file defines utility macros
+*************************************************************************
+*/
+#ifndef __M4_UTILS_H__
+#define __M4_UTILS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*    M4_MediaTime definition
+    This type is used internally by some shell components */
+#include "M4OSA_Types.h"
+typedef M4OSA_Double    M4_MediaTime;
+
+/*    GET_MEMORY32 macro definition
+    This macro is used by the 3GP reader*/
+#ifdef __BIG_ENDIAN
+#define GET_MEMORY32(x) (x)
+#else
+#define GET_MEMORY32(x) ( (((x)&0xff)<<24) | (((x)&0xff00)<<8) |\
+     (((x)&0xff0000)>>8) | (((x)&0xff000000)>>24) )
+#endif /*__BIG_ENDIAN*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4_UTILS_H__*/
+
diff --git a/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h b/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h
new file mode 100755
index 0000000..3bac61f
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/M4_VideoEditingCommon.h
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4_VideoEditingCommon.h
+ * @brief    Video Editing (VSS3GPP, MCS, PTO3GPP) common definitions
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef __M4_VIDEOEDITINGCOMMON_H__
+#define __M4_VIDEOEDITINGCOMMON_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ *    Version */
+/* CHANGE_VERSION_HERE */
+#define M4VIDEOEDITING_VERSION_MAJOR    3
+#define M4VIDEOEDITING_VERSION_MINOR    1
+#define M4VIDEOEDITING_VERSION_REVISION    0
+
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_FileType
+ * @brief    This enum defines the file format type to be used
+ ******************************************************************************
+*/
+typedef enum {
+    M4VIDEOEDITING_kFileType_3GPP        = 0,    /**< 3GPP file media type : input & output */
+    M4VIDEOEDITING_kFileType_MP4         = 1,    /**< MP4  file media type : input          */
+    M4VIDEOEDITING_kFileType_AMR         = 2,      /**< AMR  file media type : input & output */
+    M4VIDEOEDITING_kFileType_MP3         = 3,      /**< MP3  file media type : input          */
+    M4VIDEOEDITING_kFileType_PCM         = 4,      /**< PCM RAW file media type : input    RC */
+    M4VIDEOEDITING_kFileType_JPG         = 5,      /**< STILL PICTURE FEATURE: JPG file media
+                                                        type : input AND OUTPUT */
+    M4VIDEOEDITING_kFileType_BMP         = 6,      /**< STILL PICTURE FEATURE: BMP file media
+                                                        type : input only */
+    M4VIDEOEDITING_kFileType_GIF         = 7,      /**< STILL PICTURE FEATURE: GIF file media
+                                                        type : input only */
+    M4VIDEOEDITING_kFileType_PNG         = 8,      /**< STILL PICTURE FEATURE: PNG file media
+                                                        type : input only */
+    M4VIDEOEDITING_kFileType_ARGB8888       = 9,      /**< STILL PICTURE FEATURE: ARGB8888 file
+                                                            media type : input only */
+
+    M4VIDEOEDITING_kFileType_Unsupported = 255   /**< Unsupported file media type */
+} M4VIDEOEDITING_FileType;
+
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_VideoFormat
+ * @brief    This enum defines the avalaible video compression formats.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VIDEOEDITING_kNoneVideo            = 0,    /**< Video not present */
+    M4VIDEOEDITING_kH263                = 1,    /**< H263 video */
+    M4VIDEOEDITING_kMPEG4                = 2,    /**< MPEG-4 video */
+    M4VIDEOEDITING_kMPEG4_EMP            = 3,    /**< MPEG-4 video with support for EMP
+                                                    (hsr=15, vsr=15, err=0, Iperiod=15,
+                                                     NO_M4V, NO_AC_PRED) */
+    M4VIDEOEDITING_kH264                = 4,    /**< H264 video */
+    M4VIDEOEDITING_kNullVideo           = 254,  /**< Do not care video type, use NULL encoder */
+    M4VIDEOEDITING_kUnsupportedVideo    = 255    /**< Unsupported video stream type */
+} M4VIDEOEDITING_VideoFormat;
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_AudioFormat
+ * @brief    This enum defines the avalaible audio format.
+ * @note    HE_AAC, HE_AAC_v2 and MP3 can not be used for the output audio format
+ ******************************************************************************
+*/
+typedef enum {
+    M4VIDEOEDITING_kNoneAudio            = 0,    /**< Audio not present */
+    M4VIDEOEDITING_kAMR_NB              = 1,    /**< AMR Narrow Band audio */
+    M4VIDEOEDITING_kAAC                    = 2,    /**< AAC audio */
+    M4VIDEOEDITING_kAACplus                = 3,    /**< AAC+ audio */
+    M4VIDEOEDITING_keAACplus             = 4,    /**< Enhanced AAC+ audio */
+    M4VIDEOEDITING_kMP3                 = 5,    /**< MP3 audio */
+    M4VIDEOEDITING_kEVRC                = 6,    /**< EVRC audio */
+    M4VIDEOEDITING_kPCM                 = 7,    /**< PCM audio */
+    M4VIDEOEDITING_kNullAudio           = 254,  /**< Do not care audio type, use NULL encoder */
+    M4VIDEOEDITING_kUnsupportedAudio    = 255    /**< Unsupported audio stream type */
+} M4VIDEOEDITING_AudioFormat;
+
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_VideoProfileAndLevel
+ * @brief    This enum defines the video profile and level for MPEG-4 and H263 streams.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VIDEOEDITING_kMPEG4_SP_Level_0               = 0,
+    M4VIDEOEDITING_kMPEG4_SP_Level_0b              = 1,
+    M4VIDEOEDITING_kMPEG4_SP_Level_1               = 2,
+    M4VIDEOEDITING_kMPEG4_SP_Level_2               = 3,
+    M4VIDEOEDITING_kMPEG4_SP_Level_3               = 4,
+    M4VIDEOEDITING_kH263_Profile_0_Level_10        = 5,
+    M4VIDEOEDITING_kH263_Profile_0_Level_20        = 6,
+    M4VIDEOEDITING_kH263_Profile_0_Level_30        = 7,
+    M4VIDEOEDITING_kH263_Profile_0_Level_40        = 8,
+    M4VIDEOEDITING_kH263_Profile_0_Level_45        = 9,
+    M4VIDEOEDITING_kMPEG4_SP_Level_4a              = 10,
+    M4VIDEOEDITING_kMPEG4_SP_Level_5               = 11,
+    M4VIDEOEDITING_kH264_Profile_0_Level_1         = 12,
+    M4VIDEOEDITING_kH264_Profile_0_Level_1b        = 13,
+    M4VIDEOEDITING_kH264_Profile_0_Level_1_1       = 14,
+    M4VIDEOEDITING_kH264_Profile_0_Level_1_2       = 15,
+    M4VIDEOEDITING_kH264_Profile_0_Level_1_3       = 16,
+    M4VIDEOEDITING_kH264_Profile_0_Level_2         = 17,
+    M4VIDEOEDITING_kH264_Profile_0_Level_2_1       = 18,
+    M4VIDEOEDITING_kH264_Profile_0_Level_2_2       = 19,
+    M4VIDEOEDITING_kH264_Profile_0_Level_3         = 20,
+    M4VIDEOEDITING_kH264_Profile_0_Level_3_1       = 21,
+    M4VIDEOEDITING_kH264_Profile_0_Level_3_2       = 22,
+    M4VIDEOEDITING_kH264_Profile_0_Level_4         = 23,
+    M4VIDEOEDITING_kH264_Profile_0_Level_4_1       = 24,
+    M4VIDEOEDITING_kH264_Profile_0_Level_4_2       = 25,
+    M4VIDEOEDITING_kH264_Profile_0_Level_5         = 26,
+    M4VIDEOEDITING_kH264_Profile_0_Level_5_1       = 27,
+    M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range = 255
+} M4VIDEOEDITING_VideoProfileAndLevel;
+
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_VideoFrameSize
+ * @brief    This enum defines the available output frame sizes.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VIDEOEDITING_kSQCIF=0,        /**< SQCIF 128x96  */
+    M4VIDEOEDITING_kQQVGA,            /**< QQVGA 160x120 */
+    M4VIDEOEDITING_kQCIF,            /**< QCIF  176x144 */
+    M4VIDEOEDITING_kQVGA,            /**< QVGA  320x240 */
+    M4VIDEOEDITING_kCIF,            /**< CIF   352x288 */
+    M4VIDEOEDITING_kVGA,
+/* +PR LV5807 */                    /**< VGA   640x480 */
+    M4VIDEOEDITING_kWVGA,            /**< WVGA 800x480 */
+    M4VIDEOEDITING_kNTSC,
+/* -PR LV5807 */                    /**< NTSC 720x480 */
+
+/* +CR Google */
+    M4VIDEOEDITING_k640_360,            /**< 640x360 */
+    M4VIDEOEDITING_k854_480,            /**< 854x480 */
+    M4VIDEOEDITING_kHD1280,                /**< 720p 1280x720 */
+    M4VIDEOEDITING_kHD1080,                /**< 720p 1080x720 */
+    M4VIDEOEDITING_kHD960                /**< 720p 960x720 */
+
+/* -CR Google */
+
+} M4VIDEOEDITING_VideoFrameSize;
+
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_Videoframerate
+ * @brief    This enum defines the available video framerates.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VIDEOEDITING_k5_FPS = 0,
+    M4VIDEOEDITING_k7_5_FPS,
+    M4VIDEOEDITING_k10_FPS,
+    M4VIDEOEDITING_k12_5_FPS,
+    M4VIDEOEDITING_k15_FPS,
+    M4VIDEOEDITING_k20_FPS,
+    M4VIDEOEDITING_k25_FPS,
+    M4VIDEOEDITING_k30_FPS
+} M4VIDEOEDITING_VideoFramerate;
+
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_AudioSamplingFrequency
+ * @brief    This enum defines the available output audio sampling frequencies
+ * @note    8 kHz is the only supported frequency for AMR-NB output
+ * @note    16 kHz is the only supported frequency for AAC output
+ * @note    The recommended practice is to use the Default value when setting the encoding parameters
+ ******************************************************************************
+*/
+typedef enum {
+    M4VIDEOEDITING_kDefault_ASF    = 0,    /**< Default Audio Sampling Frequency for selected
+                                                 Audio output format */
+    M4VIDEOEDITING_k8000_ASF    = 8000,    /**< Note: Default audio Sampling Frequency for
+                                                    AMR-NB output */
+    M4VIDEOEDITING_k11025_ASF    = 11025,
+    M4VIDEOEDITING_k12000_ASF    = 12000,
+    M4VIDEOEDITING_k16000_ASF    = 16000,    /**< Note: Default audio Sampling Frequency
+                                                     for AAC output */
+    M4VIDEOEDITING_k22050_ASF    = 22050,
+    M4VIDEOEDITING_k24000_ASF    = 24000,
+    M4VIDEOEDITING_k32000_ASF    = 32000,
+    M4VIDEOEDITING_k44100_ASF    = 44100,
+    M4VIDEOEDITING_k48000_ASF    = 48000
+
+} M4VIDEOEDITING_AudioSamplingFrequency;
+
+
+/**
+ ******************************************************************************
+ * enum        M4VIDEOEDITING_Bitrate
+ * @brief    This enum defines the available audio or video bitrates.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VIDEOEDITING_kVARIABLE_KBPS = -1,     /* no regulation */
+    M4VIDEOEDITING_kUndefinedBitrate = 0,   /* undefined */
+    M4VIDEOEDITING_k8_KBPS = 8000,
+    M4VIDEOEDITING_k9_2_KBPS = 9200,        /* evrc only */
+    M4VIDEOEDITING_k12_2_KBPS = 12200,      /* amr only */
+    M4VIDEOEDITING_k16_KBPS = 16000,
+    M4VIDEOEDITING_k24_KBPS = 24000,
+    M4VIDEOEDITING_k32_KBPS = 32000,
+    M4VIDEOEDITING_k40_KBPS = 40000,
+    M4VIDEOEDITING_k48_KBPS = 48000,
+    M4VIDEOEDITING_k56_KBPS = 56000,
+    M4VIDEOEDITING_k64_KBPS = 64000,
+    M4VIDEOEDITING_k80_KBPS = 80000,
+    M4VIDEOEDITING_k96_KBPS = 96000,
+    M4VIDEOEDITING_k112_KBPS = 112000,
+    M4VIDEOEDITING_k128_KBPS = 128000,
+    M4VIDEOEDITING_k160_KBPS = 160000,
+    M4VIDEOEDITING_k192_KBPS = 192000,
+    M4VIDEOEDITING_k224_KBPS = 224000,
+    M4VIDEOEDITING_k256_KBPS = 256000,
+    M4VIDEOEDITING_k288_KBPS = 288000,
+    M4VIDEOEDITING_k320_KBPS = 320000,
+    M4VIDEOEDITING_k384_KBPS = 384000,
+    M4VIDEOEDITING_k512_KBPS = 512000,
+    M4VIDEOEDITING_k800_KBPS = 800000,
+/*+ New Encoder bitrates */
+    M4VIDEOEDITING_k2_MBPS = 2000000,
+    M4VIDEOEDITING_k5_MBPS = 5000000,
+    M4VIDEOEDITING_k8_MBPS = 8000000,
+/*- New Encoder bitrates */
+} M4VIDEOEDITING_Bitrate;
+
+
+/**
+ ******************************************************************************
+ * structure    M4VIDEOEDITING_FtypBox
+ * @brief        Information to build the 'ftyp' atom
+ ******************************************************************************
+*/
+#define M4VIDEOEDITING_MAX_COMPATIBLE_BRANDS 10
+typedef struct
+{
+    /* All brand fields are actually char[4] stored in big-endian integer format */
+
+    M4OSA_UInt32    major_brand;           /* generally '3gp4'            */
+    M4OSA_UInt32    minor_version;         /* generally '0000' or 'x.x '  */
+    M4OSA_UInt32    nbCompatibleBrands;    /* number of compatible brands */
+    M4OSA_UInt32    compatible_brands[M4VIDEOEDITING_MAX_COMPATIBLE_BRANDS]; /* array of
+                                                                         max compatible brands */
+
+} M4VIDEOEDITING_FtypBox;
+
+/* Some useful brands */
+#define M4VIDEOEDITING_BRAND_0000  0x00000000
+#define M4VIDEOEDITING_BRAND_3G2A  0x33673261
+#define M4VIDEOEDITING_BRAND_3GP4  0x33677034
+#define M4VIDEOEDITING_BRAND_3GP5  0x33677035
+#define M4VIDEOEDITING_BRAND_3GP6  0x33677036
+#define M4VIDEOEDITING_BRAND_AVC1  0x61766331
+#define M4VIDEOEDITING_BRAND_EMP   0x656D7020
+#define M4VIDEOEDITING_BRAND_ISOM  0x69736F6D
+#define M4VIDEOEDITING_BRAND_MP41  0x6D703431
+#define M4VIDEOEDITING_BRAND_MP42  0x6D703432
+#define M4VIDEOEDITING_BRAND_VFJ1  0x76666A31
+
+/**
+ ******************************************************************************
+ * enum     M4VIDEOEDITING_ClipProperties
+ * @brief   This structure gathers the information related to an input file
+ ******************************************************************************
+*/
+typedef struct {
+
+    /**
+     * Common */
+    M4OSA_Bool                          bAnalysed;           /**< Flag to know if the file has
+                                                                  been already analysed or not */
+    M4OSA_UInt8                         Version[3];          /**< Version of the libraries used to
+                                                                  perform the clip analysis */
+    M4OSA_UInt32                        uiClipDuration;      /**< Clip duration (in ms) */
+    M4VIDEOEDITING_FileType             FileType;            /**< .3gp, .amr, .mp3 */
+    M4VIDEOEDITING_FtypBox              ftyp;                /**< 3gp 'ftyp' atom, major_brand =
+                                                                    0 if not used */
+
+    /**
+     * Video */
+    M4VIDEOEDITING_VideoFormat          VideoStreamType;     /**< Format of the video stream */
+    M4OSA_UInt32                        uiClipVideoDuration; /**< Video track duration (in ms) */
+    M4OSA_UInt32                        uiVideoBitrate;      /**< Video average bitrate (in bps)*/
+    M4OSA_UInt32                        uiVideoMaxAuSize;    /**< Maximum Access Unit size of the
+                                                                  video stream */
+    M4OSA_UInt32                        uiVideoWidth;        /**< Video frame width */
+    M4OSA_UInt32                        uiVideoHeight;       /**< Video frame height */
+    M4OSA_UInt32                        uiVideoTimeScale;    /**< Video time scale */
+    M4OSA_Float                         fAverageFrameRate;   /**< Average frame rate of the video
+                                                                  stream */
+    M4VIDEOEDITING_VideoProfileAndLevel    ProfileAndLevel;     /**< Supported MPEG4 and H263
+                                                                     profiles and levels */
+    M4OSA_UInt8                         uiH263level;         /**< H263 level (from core decoder)*/
+    M4OSA_UInt8                         uiVideoProfile;      /**< H263 or MPEG-4 profile
+                                                                (from core decoder) */
+    M4OSA_Bool                          bMPEG4dataPartition; /**< MPEG-4 uses data partitioning */
+    M4OSA_Bool                          bMPEG4rvlc;          /**< MPEG-4 uses RVLC tool */
+    M4OSA_Bool                          bMPEG4resynchMarker; /**< MPEG-4 stream uses Resynch
+                                                                   Marker */
+
+    /**
+     * Audio */
+    M4VIDEOEDITING_AudioFormat          AudioStreamType;     /**< Format of the audio stream */
+    M4OSA_UInt32                        uiClipAudioDuration; /**< Audio track duration (in ms) */
+    M4OSA_UInt32                        uiAudioBitrate;      /**< Audio average bitrate (in bps) */
+    M4OSA_UInt32                        uiAudioMaxAuSize;    /**< Maximum Access Unit size of the
+                                                                    audio stream */
+    M4OSA_UInt32                        uiNbChannels;        /**< Number of channels
+                                                                    (1=mono, 2=stereo) */
+    M4OSA_UInt32                        uiSamplingFrequency; /**< Sampling audio frequency
+                                                           (8000 for amr, 16000 or more for aac) */
+    M4OSA_UInt32                        uiExtendedSamplingFrequency; /**< Extended frequency for
+                                                                         AAC+, eAAC+ streams */
+    M4OSA_UInt32                        uiDecodedPcmSize;    /**< Size of the decoded PCM data */
+
+    /**
+     * Video editing compatibility chart */
+    M4OSA_Bool      bVideoIsEditable;                        /**< Video stream can be decoded and
+                                                                 re-encoded */
+    M4OSA_Bool      bAudioIsEditable;                        /**< Audio stream can be decoded and
+                                                                  re-encoded */
+    M4OSA_Bool      bVideoIsCompatibleWithMasterClip;        /**< Video properties match reference
+                                                                  clip properties */
+    M4OSA_Bool      bAudioIsCompatibleWithMasterClip;        /**< Audio properties match reference
+                                                                   clip properties */
+
+    /**
+     * Still Picture */
+    M4OSA_UInt32                        uiStillPicWidth;        /**< Image width */
+    M4OSA_UInt32                        uiStillPicHeight;       /**< Image height */
+    M4OSA_UInt32                        uiClipAudioVolumePercentage;
+
+} M4VIDEOEDITING_ClipProperties;
+
+
+#ifdef __cplusplus
+    }
+#endif
+
+#endif /* __M4_VIDEOEDITINGCOMMON_H__ */
+
diff --git a/libvideoeditor/vss/common/inc/MonoTo2I_16.h b/libvideoeditor/vss/common/inc/MonoTo2I_16.h
new file mode 100755
index 0000000..386b353
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/MonoTo2I_16.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _MONOTO2I_16_H_
+#define _MONOTO2I_16_H_
+
+
+void MonoTo2I_16(  const short *src,
+                         short *dst,
+                         short n);
+
+/**********************************************************************************/
+
+#endif  /* _MONOTO2I_16_H_ */
+
+/**********************************************************************************/
+
diff --git a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h
new file mode 100755
index 0000000..46a7efc
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef NXPSW_COMPILERSWITCHES_H
+#define NXPSW_COMPILERSWITCHES_H
+
+/* ----- Main features ----- */
+#include "NXPSW_CompilerSwitches_MCS.h" /* Transcoder */
+
+/* ----- Add-ons ----- */
+
+#endif /* NXPSW_COMPILERSWITCHES_H */
+
diff --git a/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h
new file mode 100755
index 0000000..028cd27
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/NXPSW_CompilerSwitches_MCS.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef NXPSW_COMPILERSWITCHES_MCS_H
+#define NXPSW_COMPILERSWITCHES_MCS_H
+
+                            /***********/
+                            /* READERS */
+                            /***********/
+
+/* -----  AMR reader support ----- */
+#define M4VSS_SUPPORT_READER_AMR        /**< [default] Support .amr files */
+
+/* ----- 3GPP  reader support ----- */
+#define M4VSS_SUPPORT_READER_3GP        /**< [default] Support .mp4, .3gp files */
+
+
+/* ----- MP3 reader support ----- */
+#define M4VSS_SUPPORT_READER_MP3        /**< [default] Support .mp3 files */
+
+/* ----- RAW reader support ----- */
+#define M4VSS_SUPPORT_READER_PCM        /**< [default] Support .pcm files */
+
+
+                            /************/
+                            /* DECODERS */
+                            /************/
+
+/* -----  AMR NB decoder support ----- */
+#define M4VSS_SUPPORT_AUDEC_AMRNB       /**< [default] Support AMR NB streams */
+
+/* ----- AAC decoder support ----- */
+#define M4VSS_SUPPORT_AUDEC_AAC            /**< [default] Support AAC, AAC+ and eAAC+ streams */
+
+/* ----- MP4/H263 video decoder support ----- */
+#define M4VSS_SUPPORT_VIDEC_3GP         /**< [default] Support mpeg4 and H263 decoders */
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+#define GET_DECODER_CONFIG_INFO
+#endif
+
+#define M4VSS_SUPPORT_VIDEO_AVC            /**< [default] Support H264 decoders */
+
+/* ----- MP3 decoder support----- */
+#define M4VSS_SUPPORT_AUDEC_MP3         /**< [default] Support MP3 decoders */
+
+
+/* ----- NULL decoder support----- */
+#define M4VSS_SUPPORT_AUDEC_NULL        /** [default] Support PCM reading */
+
+
+                            /***********/
+                            /* WRITERS */
+                            /***********/
+
+/* ----- 3gp writer ----- */
+#define M4VSS_SUPPORT_WRITER_3GPP       /**< [default] support encapsulating in 3gp format
+                                             {amr,aac} x {mpeg4,h263} */
+
+
+
+
+
+                            /************/
+                            /* ENCODERS */
+                            /************/
+
+/* ----- mpeg4 & h263 encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_MPEG4     /**< [default] support encoding in mpeg4 and
+                                             h263 format {yuv,rgb} */
+
+/* ----- h264 encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_AVC
+
+/* ----- amr encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_AMR  /**< [default] support encoding in amr 12.2 format {amr,wav} */
+
+/* ----- aac encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_AAC       /**< [default] support encoding in aac format {amr,wav} */
+
+
+/* ----- mp3 encoder ----- */
+#define M4VSS_SUPPORT_ENCODER_MP3       /**< [default] support encoding in mp3 format {mp3} */
+
+                            /************/
+                            /* FEATURES */
+                            /************/
+
+/* ----- VSS3GPP & xVSS ----- */
+#define M4VSS_SUPPORT_EXTENDED_FEATURES /**< [default] if defined, implementation is xVSS else
+                                            it is VSS3GPP */
+
+/* ----- SPS ----- */
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+
+//#define M4SPS_GIF_NOT_SUPPORTED  /**< [option] do not support GIF format in still picture api */
+//#define M4SPS_JPEG_NOT_SUPPORTED /**< [option] do not support JPEG format in still picture api */
+//#define M4SPS_PNG_NOT_SUPPORTED  /**< [option] do not support PNG format in still picture api */
+#define M4SPS_WBMP_NOT_SUPPORTED   /**< [option] do not support WBMP format in still picture api */
+#define M4SPS_BGR565_COLOR_OUTPUT  /**< [option] output in still picture api is BGR565
+                                        (default = BGR24) */
+
+#else
+
+#define M4SPS_GIF_NOT_SUPPORTED    /**< [option] do not support GIF format in still picture api */
+//#define M4SPS_JPEG_NOT_SUPPORTED /**< [option] do not support JPEG format in still picture api */
+#define M4SPS_PNG_NOT_SUPPORTED    /**< [option] do not support PNG format in still picture api */
+#define M4SPS_WBMP_NOT_SUPPORTED   /**< [option] do not support WBMP format in still picture api */
+//#define M4SPS_BGR565_COLOR_OUTPUT /**< [option] output in still picture api is BGR565
+//                                          (default = BGR24) */
+
+#endif
+
+#define M4VSS_ENABLE_EXTERNAL_DECODERS
+
+#define M4VSS_SUPPORT_OMX_CODECS
+
+#endif /* NXPSW_COMPILERSWITCHES_MCS_H */
+
diff --git a/libvideoeditor/vss/common/inc/SSRC.h b/libvideoeditor/vss/common/inc/SSRC.h
new file mode 100755
index 0000000..16ca3f6
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/SSRC.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/****************************************************************************************/
+/*                                                                                      */
+/*     Project::                                                                        */
+/*     %name:          SSRC.h % */
+/*                                                                                      */
+/****************************************************************************************/
+
+/*
+    The input and output blocks of the SRC are by default blocks of 40 ms.  This means that
+    the following default block sizes are used:
+
+          Fs     Default Block size
+        -----        ----------
+         8000           320
+        11025           441
+        12000           480
+        16000           640
+        22050           882
+        24000           960
+        32000          1280
+        44100          1764
+        48000          1920
+
+    An API is provided to change the default block size into any multiple of the minimal
+    block size.
+
+    All the sampling rates above are supported as input and as output sampling rate
+*/
+
+#ifndef __SSRC_H__
+#define __SSRC_H__
+
+/****************************************************************************************
+   INCLUDES
+*****************************************************************************************/
+
+#include "LVM_Types.h"
+
+/****************************************************************************************
+   DEFINITIONS
+*****************************************************************************************/
+
+#define SSRC_INSTANCE_SIZE          548
+#define SSRC_INSTANCE_ALIGNMENT     4
+#define SSRC_SCRATCH_ALIGNMENT      4
+
+/****************************************************************************************
+   TYPE DEFINITIONS
+*****************************************************************************************/
+
+/* Status return values */
+typedef enum
+{
+    SSRC_OK                     = 0,                /* Successful return from a routine */
+    SSRC_INVALID_FS             = 1,                /* The input or the output sampling rate is
+                                                        invalid */
+    SSRC_INVALID_NR_CHANNELS    = 2,                /* The number of channels is not equal to mono
+                                                         or stereo */
+    SSRC_NULL_POINTER           = 3,                /* One of the input pointers is NULL */
+    SSRC_WRONG_NR_SAMPLES       = 4,                /* Invalid number of samples */
+    SSRC_ALLINGMENT_ERROR       = 5,                /* The instance memory or the scratch memory
+                                                        is not alligned */
+    SSRC_INVALID_MODE           = 6,                /* A wrong value has been used for the mode
+                                                        parameter */
+    SSRC_INVALID_VALUE          = 7,                /* An invalid (out of range) value has been
+                                                     used for one of the parameters */
+    LVXXX_RETURNSTATUS_DUMMY = LVM_MAXENUM
+} SSRC_ReturnStatus_en;
+
+/* Instance memory */
+typedef struct
+{
+    LVM_INT32 Storage [ SSRC_INSTANCE_SIZE/4 ];
+} SSRC_Instance_t;
+
+/* Scratch memory */
+typedef LVM_INT32 SSRC_Scratch_t;
+
+/* Nuber of samples mode */
+typedef enum
+{
+    SSRC_NR_SAMPLES_DEFAULT     = 0,
+    SSRC_NR_SAMPLES_MIN         = 1,
+    SSRC_NR_SAMPLES_DUMMY       = LVM_MAXENUM
+} SSRC_NR_SAMPLES_MODE_en;
+
+/* Instance parameters */
+typedef struct
+{
+    LVM_Fs_en           SSRC_Fs_In;
+    LVM_Fs_en           SSRC_Fs_Out;
+    LVM_Format_en       SSRC_NrOfChannels;
+    LVM_INT16           NrSamplesIn;
+    LVM_INT16           NrSamplesOut;
+} SSRC_Params_t;
+
+
+/****************************************************************************************
+   FUNCTION PROTOTYPES
+*****************************************************************************************/
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/* FUNCTION:                SSRC_GetNrSamples                                           */
+/*                                                                                      */
+/* DESCRIPTION:                                                                         */
+/*  This function retrieves the number of samples (or sample pairs for stereo) to be    */
+/*  used as input and as output of the SSRC module.                                     */
+/*                                                                                      */
+/* PARAMETERS:                                                                          */
+/*  Mode                    There are two modes:                                        */
+/*                              - SSRC_NR_SAMPELS_DEFAULT.  In this mode, the function  */
+/*                                will return the number of samples for 40 ms blocks    */
+/*                              - SSRC_NR_SAMPELS_MIN will return the minimal number    */
+/*                                of samples that is supported for this conversion      */
+/*                                ratio.  Each integer multiple of this ratio will      */
+/*                                be accepted by the SSRC_Init function                 */
+/*                                                                                      */
+/*  pSSRC_Params            pointer to the instance parameters                          */
+/*                                                                                      */
+/* RETURNS:                                                                             */
+/*  SSRC_OK                 Succeeded                                                   */
+/*  SSRC_INVALID_FS         When the requested input or output sampling rates           */
+/*                          are invalid.                                                */
+/*  SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO           */
+/*                          or LVM_STEREO                                               */
+/*  SSRC_NULL_POINTER       When pSSRC_Params is a NULL pointer                         */
+/*  SSRC_INVALID_MODE       When Mode is not a valid setting                            */
+/*                                                                                      */
+/*                                                                                      */
+/* NOTES:                                                                               */
+/*                                                                                      */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_GetNrSamples( SSRC_NR_SAMPLES_MODE_en  Mode,
+                                        SSRC_Params_t*           pSSRC_Params );
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/* FUNCTION:                SSRC_GetScratchSize                                         */
+/*                                                                                      */
+/* DESCRIPTION:                                                                         */
+/*  This function retrieves the scratch size for a given conversion ratio and           */
+/*  for given buffer sizes at the input and at the output                               */
+/*                                                                                      */
+/* PARAMETERS:                                                                          */
+/*  pSSRC_Params            pointer to the instance parameters                          */
+/*  pScratchSize            pointer to the scratch size.  The SSRC_GetScratchSize       */
+/*                          function will fill in the correct value (in bytes).         */
+/*                                                                                      */
+/* RETURNS:                                                                             */
+/*  SSRC_OK                 when the function call succeeds                             */
+/*  SSRC_INVALID_FS         When the requested input or output sampling rates           */
+/*                          are invalid.                                                */
+/*  SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO           */
+/*                          or LVM_STEREO                                               */
+/*  SSRC_NULL_POINTER       When any of the input pointers is a NULL pointer            */
+/*  SSRC_WRONG_NR_SAMPLES   When the number of samples on the input or on the output    */
+/*                          are incorrect                                               */
+/*                                                                                      */
+/* NOTES:                                                                               */
+/*                                                                                      */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_GetScratchSize(   SSRC_Params_t*    pSSRC_Params,
+                                            LVM_INT32*        pScratchSize );
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/* FUNCTION:                SSRC_Init                                                   */
+/*                                                                                      */
+/* DESCRIPTION:                                                                         */
+/*  This function is used to initialize the SSRC module instance.                       */
+/*                                                                                      */
+/* PARAMETERS:                                                                          */
+/*  pSSRC_Instance          Instance pointer                                            */
+/*                                                                                      */
+/*  pSSRC_Scratch           pointer to the scratch memory                               */
+/*  pSSRC_Params            pointer to the instance parameters                          */
+/*  pInputInScratch,        pointer to a location in the scratch memory that can be     */
+/*                          used to store the input samples (e.g. to save memory)       */
+/*  pOutputInScratch        pointer to a location in the scratch memory that can be     */
+/*                          used to store the output samples (e.g. to save memory)      */
+/*                                                                                      */
+/* RETURNS:                                                                             */
+/*  SSRC_OK                 Succeeded                                                   */
+/*  SSRC_INVALID_FS         When the requested input or output sampling rates           */
+/*                          are invalid.                                                */
+/*  SSRC_INVALID_NR_CHANNELS When the channel format is not equal to LVM_MONO           */
+/*                          or LVM_STEREO                                               */
+/*  SSRC_WRONG_NR_SAMPLES   When the number of samples on the input or the output       */
+/*                          are incorrect                                               */
+/*  SSRC_NULL_POINTER       When any of the input pointers is a NULL pointer            */
+/*  SSRC_ALLINGMENT_ERROR   When the instance memory or the scratch memory is not       */
+/*                          4 bytes alligned                                            */
+/*                                                                                      */
+/* NOTES:                                                                               */
+/*  1. The init function will clear the internal state                                  */
+/*                                                                                      */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_Init( SSRC_Instance_t* pSSRC_Instance,
+                                SSRC_Scratch_t*  pSSRC_Scratch,
+                                SSRC_Params_t*   pSSRC_Params,
+                                LVM_INT16**      ppInputInScratch,
+                                LVM_INT16**      ppOutputInScratch);
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/* FUNCTION:                SSRC_SetGains                                               */
+/*                                                                                      */
+/* DESCRIPTION:                                                                         */
+/*  This function sets headroom gain and the post gain of the SSRC                      */
+/*                                                                                      */
+/* PARAMETERS:                                                                          */
+/*  bHeadroomGainEnabled    parameter to enable or disable the headroom gain of the     */
+/*                          SSRC.  The default value is LVM_MODE_ON.  LVM_MODE_OFF      */
+/*                          can be used in case it can be guaranteed that the input     */
+/*                          level is below -6dB in all cases (the default headroom      */
+/*                          is -6 dB)                                                   */
+/*                                                                                      */
+/*  bOutputGainEnabled      parameter to enable or disable the output gain.  The        */
+/*                          default value is LVM_MODE_ON                                */
+/*                                                                                      */
+/*  OutputGain              the value of the output gain.  The output gain is a linear  */
+/*                          gain value. 0x7FFF is equal to +6 dB and 0x0000 corresponds */
+/*                          to -inf dB.  By default, a 3dB gain is applied, resulting   */
+/*                          in an overall gain of -3dB (-6dB headroom + 3dB output gain)*/
+/*                                                                                      */
+/* RETURNS:                                                                             */
+/*  SSRC_OK                 Succeeded                                                   */
+/*  SSRC_NULL_POINTER       When pSSRC_Instance is a NULL pointer                       */
+/*  SSRC_INVALID_MODE       Wrong value used for the bHeadroomGainEnabled or the        */
+/*                          bOutputGainEnabled parameters.                              */
+/*  SSRC_INVALID_VALUE      When OutputGain is out to the range [0;32767]               */
+/*                                                                                      */
+/* NOTES:                                                                               */
+/*  1. The SSRC_SetGains function is an optional function that should only be used      */
+/*     in rare cases.  Preferably, use the default settings.                            */
+/*                                                                                      */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_SetGains( SSRC_Instance_t* pSSRC_Instance,
+                                    LVM_Mode_en      bHeadroomGainEnabled,
+                                    LVM_Mode_en      bOutputGainEnabled,
+                                    LVM_INT16        OutputGain );
+
+
+/****************************************************************************************/
+/*                                                                                      */
+/* FUNCTION:                SSRC_Process                                                */
+/*                                                                                      */
+/* DESCRIPTION:                                                                         */
+/*  Process function for the SSRC module.                                               */
+/*                                                                                      */
+/* PARAMETERS:                                                                          */
+/*  pSSRC_Instance          Instance pointer                                            */
+/*  pSSRC_AudioIn           Pointer to the input data                                   */
+/*  pSSRC_AudioOut          Pointer to the output data                                  */
+/*                                                                                      */
+/* RETURNS:                                                                             */
+/* SSRC_OK                  Succeeded                                                   */
+/* SSRC_NULL_POINTER        When one of pSSRC_Instance, pSSRC_AudioIn or pSSRC_AudioOut */
+/*                          is NULL                                                     */
+/*                                                                                      */
+/* NOTES:                                                                               */
+/*                                                                                      */
+/****************************************************************************************/
+
+SSRC_ReturnStatus_en SSRC_Process(  SSRC_Instance_t* pSSRC_Instance,
+                                    LVM_INT16*       pSSRC_AudioIn,
+                                    LVM_INT16*       pSSRC_AudioOut);
+
+/****************************************************************************************/
+
+#endif /* __SSRC_H__ */
diff --git a/libvideoeditor/vss/common/inc/gLVAudioResampler.h b/libvideoeditor/vss/common/inc/gLVAudioResampler.h
new file mode 100755
index 0000000..9d3d63f
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/gLVAudioResampler.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+#ifndef GLVAUDIORESAMPLER_H
+#define GLVAUDIORESAMPLER_H
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifndef int8_t
+#define  int8_t signed char
+#endif
+
+#ifndef int32_t
+#define int32_t long int
+#endif
+
+#ifndef uint32_t
+#define uint32_t unsigned long int
+#endif
+
+#ifndef int16_t
+#define int16_t signed short
+#endif
+
+#ifndef uint16_t
+#define uint16_t unsigned short
+#endif
+
+#ifndef status_t
+#define status_t long int
+#endif
+
+    static const int kNumPhaseBits = 30;
+    // phase mask for fraction
+    static const uint32_t kPhaseMask = (1<<30)-1;
+    // multiplier to calculate fixed point phase increment
+    static const uint32_t kPhaseMultiplier = (1 << 30);
+
+    static const int kNumInterpBits = 15;
+
+    // bits to shift the phase fraction down to avoid overflow
+    static const int kPreInterpShift = 15; //=kNumPhaseBits - kNumInterpBits;
+
+typedef struct Buffer {
+            void*       raw;
+            short*      i16;
+            int8_t*     i8;
+            long frameCount;
+        }Buffer;
+
+typedef enum src_quality {
+            DEFAULT=0,
+            LOW_QUALITY=1,
+            MED_QUALITY=2,
+            HIGH_QUALITY=3
+        }src_quality;
+
+typedef struct LVAudioResampler
+{
+
+    int32_t mBitDepth;
+    int32_t mChannelCount;
+    int32_t mSampleRate;
+    int32_t mInSampleRate;
+    Buffer mBuffer;
+    int16_t mVolume[2];
+    int16_t mTargetVolume[2];
+    int mFormat;
+    long mInputIndex;
+    int32_t mPhaseIncrement;
+    uint32_t mPhaseFraction;
+    int mX0L;
+    int mX0R;
+    int32_t kPreInterpShift;
+    int32_t kNumInterpBits;
+    src_quality mQuality;
+}LVAudioResampler;
+
+
+int32_t LVAudioResamplerCreate(int bitDepth, int inChannelCount,
+        int32_t sampleRate, int quality);
+void LVAudiosetSampleRate(int32_t context,int32_t inSampleRate);
+void LVAudiosetVolume(int32_t context, int16_t left, int16_t right) ;
+
+void LVAudioresample_LowQuality(int16_t* out, int16_t* input, long outFrameCount, int32_t context);
+void LVResampler_LowQualityInit(int bitDepth, int inChannelCount,
+        int32_t sampleRate, int32_t context);
+
+
+void MonoTo2I_16( const short *src,
+                        short *dst,
+                        short n);
+
+void From2iToMono_16( const short *src,
+                            short *dst,
+                            short n);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* GLVAUDIORESAMPLER_H */
+
+
diff --git a/libvideoeditor/vss/common/inc/marker.h b/libvideoeditor/vss/common/inc/marker.h
new file mode 100755
index 0000000..d63ba8c
--- /dev/null
+++ b/libvideoeditor/vss/common/inc/marker.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARKER_H
+#define MARKER_H
+
+#define ADD_CODE_MARKER_FUN(m_condition)                    \
+    if ( !(m_condition) )                                   \
+    {                                                       \
+        __asm__ volatile (                                  \
+            ".word     0x21614062\n\t"      /* '!a@b' */    \
+            ".word     0x47712543\n\t"      /* 'Gq%C' */    \
+            ".word     0x5F5F5F43\n\t"      /* '___C' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x245F5F5F"          /* '$___' */    \
+        );                                                  \
+    }
+
+#define ADD_TEXT_MARKER_FUN(m_condition)                    \
+    if ( !(m_condition) )                                   \
+    {                                                       \
+        __asm__ volatile (                                  \
+            ".word     0x21614062\n\t"      /* '!a@b' */    \
+            ".word     0x47712543\n\t"      /* 'Gq%C' */    \
+            ".word     0x5F5F5F54\n\t"      /* '___T' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x5F5F5F5F\n\t"      /* '____' */    \
+            ".word     0x245F5F5F"          /* '$___' */    \
+        );                                                  \
+    }
+
+#endif
diff --git a/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h b/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h
new file mode 100755
index 0000000..ae21d95
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4EXIFC_CommonAPI.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4EXIFC_CommonAPI.h
+ * @brief    EXIF common data header
+ * @note    The types, structures and macros defined in this file allow reading
+ *            and writing EXIF JPEG images compliant spec EXIF 2.2
+ ******************************************************************************
+*/
+
+
+#ifndef __M4_EXIF_COMMON_API_H__
+#define __M4_EXIF_COMMON_API_H__
+
+#include "M4TOOL_VersionInfo.h"
+#include "M4Common_types.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_CoreID.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ ************************************************************************
+ * type M4EXIFC_Context
+ ************************************************************************
+*/
+typedef M4OSA_Void*    M4EXIFC_Context;
+
+/**
+ ******************************************************************************
+ * Errors & Warnings
+ ******************************************************************************
+*/
+
+#define M4EXIFC_NO_ERR              0x00000000    /**< invalid parameter */
+#define M4EXIFC_ERR_PARAMETER       0x00000001    /**< invalid parameter */
+#define M4EXIFC_ERR_ALLOC           0x00000002    /**< allocation error */
+#define M4EXIFC_ERR_BAD_CONTEXT     0x00000003    /**< invalid context */
+#define M4EXIFC_ERR_NOT_COMPLIANT   0x00000004    /**< the image in buffer is not
+                                                       JPEG compliant */
+#define M4EXIFC_ERR_NO_APP_FOUND    0x00000005    /**< the JPEG image does not contain any APP1
+                                                        Exif 2.2 compliant */
+#define M4EXIFC_WAR_NO_THUMBNAIL    0x00000006    /**< the Exif part does not contain any
+                                                        thumbnail */
+#define M4EXIFC_ERR_APP_TRUNCATED   0x00000007    /**< The APP1 section in input buffer is
+                                                        not complete */
+
+
+/**
+ ******************************************************************************
+ * structure    M4EXIFC_BasicTags
+ * @brief        This structure stores the basic tags values.
+ * @note        This Exif reader focuses on a set of "Entry Tags".
+ *                This structure contains the corresponding "Entry Values" of these tags.
+ *                M4EXIFC_Char* fields of structure are Null terminated Strings.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Int32        width;                /**< image width in pixels */
+    M4OSA_Int32        height;               /**< image height in pixels */
+    M4OSA_Char        *creationDateTime;     /**< date and time original image was generated */
+    M4OSA_Char        *lastChangeDateTime;   /**< file change date and time */
+    M4OSA_Char        *description;          /**< image title */
+    M4OSA_Char        *make;                 /**< manufacturer of image input equipment */
+    M4OSA_Char        *model;                /**< model of image input equipment */
+    M4OSA_Char        *software;             /**< software used */
+    M4OSA_Char        *artist;               /**< person who created the image */
+    M4OSA_Char        *copyright;            /**< copyright holder */
+    M4COMMON_Orientation orientation;        /**< orientation of image */
+    M4OSA_Int32        thumbnailSize;        /**< size of the thumbnail */
+    M4OSA_UInt8        *thumbnailImg;        /**< pointer to the thumbnail in main image buffer*/
+    M4OSA_Char        *latitudeRef;          /**< latitude reference */
+    M4COMMON_Location latitude;              /**< latitude */
+    M4OSA_Char        *longitudeRef;         /**< longitude reference */
+    M4COMMON_Location longitude;             /**< longitude */
+
+} M4EXIFC_BasicTags;
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4EXIFC_getVersion    (M4_VersionInfo *pVersion)
+ * @brief    get the version numbers of the exif library.
+ * @note    This function retrieves the version numbers in a structure.
+ * @param    pVersion:    (OUT)        the structure containing version numbers
+ * @return    M4NO_ERROR:                there is no error
+ * @return    M4EXIFC_ERR_PARAMETER:        (Debug only) the parameter is M4EXIFC_NULL.
+ ******************************************************************************
+*/
+M4OSA_ERR M4EXIFC_getVersion (M4_VersionInfo *pVersion);
+
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus*/
+#endif /* __M4_EXIF_COMMON_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_API.h b/libvideoeditor/vss/inc/M4PTO3GPP_API.h
new file mode 100755
index 0000000..86c6b93
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4PTO3GPP_API.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4PTO3GPP_API.h
+ * @brief    The Pictures to 3GPP Converter.
+ * @note    M4PTO3GPP produces 3GPP compliant audio/video  files
+ *            from an AMR NB audio file and raw pictures into a MPEG-4/h263 3GPP file.
+ ******************************************************************************
+ */
+
+#ifndef __M4PTO3GPP_API_H__
+#define __M4PTO3GPP_API_H__
+
+/**
+ *    OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ *    OSAL types for file access */
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+
+/**
+ *    Definition of M4_VersionInfo */
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ * Definitions of M4VIFI_ImagePlane */
+#include "M4VIFI_FiltersAPI.h"
+
+#include "M4VE_API.h"
+
+/**
+ * Common definitions of video editing components */
+#include "M4_VideoEditingCommon.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ *    Public type of the M4PTO3GPP context */
+typedef M4OSA_Void* M4PTO3GPP_Context;
+
+
+/**
+ ******************************************************************************
+ * enum        M4PTO3GPP_ReplaceAudioMode
+ * @brief    This enumeration defines the way the audio is managed if it is shorter than the video
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4PTO3GPP_kAudioPaddingMode_None = 0,  /**< Audio track is kept shorter than the video track*/
+    M4PTO3GPP_kAudioPaddingMode_Silence,   /**< If audio is shorter, silence is added at the end*/
+    M4PTO3GPP_kAudioPaddingMode_Loop       /**< If audio is shorter, loop back to the beginning
+                                                when the whole track has been processed */
+} M4PTO3GPP_AudioPaddingMode;
+
+
+/**
+ ******************************************************************************
+ * struct    M4PTO3GPP_OutputFileMaxSize
+ * @brief    Defines the maximum size of the 3GPP file produced by the PTO3GPP
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4PTO3GPP_k50_KB,            /**< Output 3GPP file size is limited to 50 Kbytes  */
+    M4PTO3GPP_k75_KB,            /**< Output 3GPP file size is limited to 75 Kbytes  */
+    M4PTO3GPP_k100_KB,           /**< Output 3GPP file size is limited to 100 Kbytes */
+    M4PTO3GPP_k150_KB,           /**< Output 3GPP file size is limited to 150 Kbytes */
+    M4PTO3GPP_k200_KB,           /**< Output 3GPP file size is limited to 200 Kbytes */
+    M4PTO3GPP_k300_KB,           /**< Output 3GPP file size is limited to 300 Kbytes */
+    M4PTO3GPP_k400_KB,           /**< Output 3GPP file size is limited to 400 Kbytes */
+    M4PTO3GPP_k500_KB,           /**< Output 3GPP file size is limited to 500 Kbytes */
+    M4PTO3GPP_kUNLIMITED=-1      /**< Output 3GPP file size is not limited           */
+} M4PTO3GPP_OutputFileMaxSize;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR (M4PTO3GPP_PictureCallbackFct) (M4OSA_Void* pPictureCtxt,
+ * M4VIFI_ImagePlane* pImagePlanes, M4OSA_Double* pPictureDuration);
+ * @brief    The integrator must implement a function following this prototype.
+ *            Its goal is to feed the PTO3GPP with YUV420 pictures.
+ *
+ * @note    This function is given to the PTO3GPP in the M4PTO3GPP_Params structure
+ * @param    pContext    (IN) The integrator own context
+ * @param    pImagePlanes(IN/OUT) Pointer to an array of three valid image planes
+ * @param    pPictureDuration(OUT) Duration of the returned picture
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null (bebug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (M4PTO3GPP_PictureCallbackFct) (M4OSA_Void* pPictureCtxt,
+                                                  M4VIFI_ImagePlane* pImagePlanes,
+                                                  M4OSA_Double* pPictureDuration);
+
+
+/**
+ ******************************************************************************
+ * struct    M4PTO3GPP_Params
+ * @brief    M4PTO3GPP parameters definition
+ ******************************************************************************
+ */
+typedef struct
+{
+    /**< Output video compression format, H263 or MPEG4 */
+    M4VIDEOEDITING_VideoFormat      OutputVideoFormat;
+    /**< Output frame size : SQCIF to VGA*/
+    M4VIDEOEDITING_VideoFrameSize   OutputVideoFrameSize;
+    /**< Targeted Output bit-rate, see enum*/
+    M4VIDEOEDITING_Bitrate          OutputVideoBitrate;
+    /**< Maximum size of the output 3GPP file, see enum */
+    M4PTO3GPP_OutputFileMaxSize     OutputFileMaxSize;
+    /**< Callback function to be called by the PTO3GPP to get the input pictures*/
+    M4PTO3GPP_PictureCallbackFct*   pPictureCallbackFct;
+    /**< Context to be given as third argument of the picture callback function call*/
+    M4OSA_Void*                     pPictureCallbackCtxt;
+    /**< File descriptor of the input audio track file */
+    M4OSA_Void*                     pInputAudioTrackFile;
+    /**< Format of the audio file */
+    M4VIDEOEDITING_FileType         AudioFileFormat;
+    /**< Type of processing to apply when audio is shorter than video*/
+    M4PTO3GPP_AudioPaddingMode      AudioPaddingMode;
+    /**< File descriptor of the output 3GPP file */
+    M4OSA_Void*                     pOutput3gppFile;
+     /**< File descriptor of the temporary file to store metadata ("moov.bin") */
+    M4OSA_Void*                     pTemporaryFile;
+    /**< Number of input YUV frames to encode */
+    M4OSA_UInt32                    NbVideoFrames;
+} M4PTO3GPP_Params;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+ * @brief    Get the M4PTO3GPP version.
+ * @note    Can be called anytime. Do not need any context.
+ * @param    pVersionInfo        (OUT) Pointer to a version info structure
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext);
+ * @brief    Initializes the M4PTO3GPP (allocates an execution context).
+ * @note
+ * @param    pContext            (OUT) Pointer on the M4PTO3GPP context to allocate
+ * @param   pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        The context structure could not be allocated
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
+                         M4OSA_FileWriterPointer* pFileWritePtrFct);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
+ * @brief    Set the M4PTO3GPP input and output files.
+ * @note    It opens the input file, but the output file may not be created yet.
+ * @param    pContext            (IN) M4PTO3GPP context
+ * @param    pParams                (IN) Pointer to the parameters for the PTO3GPP.
+ * @note    The pointed structure can be de-allocated after this function returns because
+ *            it is internally copied by the PTO3GPP
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        M4PTO3GPP is not in an appropriate state
+ *                                for this function to be called
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ * @return    ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 The output video frame
+ *                                size parameter is incompatible with H263 encoding
+ * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT
+ *                          The output video format  parameter is undefined
+ * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE
+ *                        The output video bit-rate parameter is undefined
+ * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE
+ *                        The output video frame size parameter is undefined
+ * @return    ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE
+ *                          The output file size parameter is undefined
+ * @return    ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING
+ *                        The output audio padding parameter is undefined
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
+ * @brief    Perform one step of trancoding.
+ * @note
+ * @param    pContext            (IN) M4PTO3GPP context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
+ * @return    M4ERR_STATE:        M4PTO3GPP is not in an appropriate state
+ *                                for this function to be called
+ * @return    M4PTO3GPP_WAR_END_OF_PROCESSING:    Encoding completed
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
+ * @brief    Finish the M4PTO3GPP transcoding.
+ * @note    The output 3GPP file is ready to be played after this call
+ * @param    pContext            (IN) M4PTO3GPP context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
+ * @return    M4ERR_STATE:        M4PTO3GPP is not in an appropriate state
+ *                                for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
+ * @brief    Free all resources used by the M4PTO3GPP.
+ * @note    The context is no more valid after this call
+ * @param    pContext            (IN) M4PTO3GPP context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
+
+
+M4OSA_ERR M4PTO3GPP_RegisterExternalVideoEncoder(M4PTO3GPP_Context pContext,
+                                                 M4VE_EncoderType encoderType,
+                                                 M4VE_Interface*    pEncoderInterface,
+                                                 M4OSA_Void* pUserData);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4PTO3GPP_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h
new file mode 100755
index 0000000..555a7d3
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4PTO3GPP_ErrorCodes.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4PTO3GPP_ErrorCodes.h
+ * @brief    Picture to 3gpp Service error definitions.
+ * @note
+ ******************************************************************************
+ */
+
+#ifndef __M4PTO3GPP_ErrorCodes_H__
+#define __M4PTO3GPP_ErrorCodes_H__
+
+/**
+ *    OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ *    OSAL core ID definitions */
+#include "M4OSA_CoreID.h"
+
+
+/**
+ *    The output video format parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0001 )
+/**
+ *    The output video frame size parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE        \
+    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0002 )
+/**
+ *    The output video bit-rate parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE           \
+    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0003 )
+/**
+ *    The output video frame size parameter is incompatible with H263 encoding */
+#define ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263        \
+    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0004 )
+/**
+ *    The file size is undefined */
+#define ERR_PTO3GPP_INVALID_FILE_SIZE                M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0005 )
+/**
+ * The input audio file contains a track format not handled by PTO3GPP */
+#define ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE         \
+    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0006 )
+/**
+ *    The output video format parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_AUDIO_FORMAT    M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0007 )
+
+/**
+ *    The AMR decoder initialization failed */
+#define ERR_PTO3GPP_AMR_DECODER_INIT_ERROR           M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0020 )
+/**
+ *    The AMR decoder failed */
+#define ERR_PTO3GPP_AMR_DECODE_ERROR                 M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0021 )
+/**
+ *    The AMR decoder cleanup failed */
+#define ERR_PTO3GPP_AMR_DECODER_DESTROY_ERROR        M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0022 )
+
+/**
+ *    The video encoder initialization failed */
+#define ERR_PTO3GPP_VIDEO_ENCODER_INIT_ERROR         M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0023 )
+/**
+ *    The video encoder decoding failed */
+#define ERR_PTO3GPP_VIDEO_ENCODE_ERROR               M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0024 )
+/**
+ *    The video encoder cleanup failed */
+#define ERR_PTO3GPP_VIDEO_ENCODER_DESTROY_ERROR      M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0025 )
+
+/**
+ *    The output file size parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE       M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0026 )
+
+/**
+ *    The Encoding is completed */
+#define M4PTO3GPP_WAR_END_OF_PROCESSING              M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0027 )
+
+/**
+ *    The Encoding is completed */
+#define M4PTO3GPP_WAR_LAST_PICTURE                   M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0028 )
+
+/**
+ *    The output audio padding parameter is undefined */
+#define ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING          M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x0029 )
+
+/**
+ * The video encoder encountered an Acces Unit error: very probably a file write error */
+#define ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR         M4OSA_ERR_CREATE( M4_ERR, M4PTO3GPP, 0x002A )
+
+#endif /* __M4PTO3GPP_ErrorCodes_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h b/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h
new file mode 100755
index 0000000..a858cb2
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4PTO3GPP_InternalTypes.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4PTO3GPP_InternalTypes.h
+ * @brief    Picture to 3gpp Service internal definitions
+ * @note    This file contains all enum and types not visible to the external world.
+ ******************************************************************************
+ */
+
+
+#ifndef __M4PTO3GPP_INTERNALTYPES_H__
+#define __M4PTO3GPP_INTERNALTYPES_H__
+
+#define M4PTO3GPP_VERSION_MAJOR        3
+#define M4PTO3GPP_VERSION_MINOR        0
+#define M4PTO3GPP_VERSION_REVISION    6
+
+/**
+ *    M4PTO3GPP public API and types */
+#include "M4PTO3GPP_API.h"
+#include "M4_Utils.h"
+
+/**
+ *    Internally used modules */
+
+#include "M4WRITER_common.h"    /* Write 3GPP file    */
+#include "M4READER_Common.h"    /* Read AMR file    */
+#include "M4ENCODER_common.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ ******************************************************************************
+ * enum            M4PTO3GPP_States
+ * @brief        Main state machine of the M4PTO3GPP.
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4PTO3GPP_kState_CREATED         = 0,    /**< M4PTO3GPP_Init has been called */
+    M4PTO3GPP_kState_OPENED          = 1,    /**< M4PTO3GPP_Open has been called */
+    M4PTO3GPP_kState_READY           = 2,    /**< Step can be called */
+    M4PTO3GPP_kState_FINISHED        = 3,    /**< Transcoding is finished */
+    M4PTO3GPP_kState_CLOSED          = 4     /**< Output file has been created */
+}
+M4PTO3GPP_States;
+
+/**
+ ******************************************************************************
+ * enum            M4PTO3GPP_StreamState
+ * @brief        State of a media stream encoding (audio or video).
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4PTO3GPP_kStreamState_NOSTREAM  = 0,    /**< No stream present */
+    M4PTO3GPP_kStreamState_STARTED   = 1,    /**< The stream encoding is in progress */
+    M4PTO3GPP_kStreamState_FINISHED  = 2    /**< The stream has finished encoding */
+}
+M4PTO3GPP_StreamState;
+
+/*
+ * Definition of max AU size */
+#define M4PTO3GPP_VIDEO_MIN_COMPRESSION_RATIO     0.8F    /**< Max AU size will be 0.8 times the
+                                                               YUV4:2:0 frame size */
+#define M4PTO3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO    1.2F /**< Max chunk size will be 1.2 times
+                                                                  the max AU size */
+#define M4PTO3GPP_AUDIO_MAX_AU_SIZE              1000    /**< AAC max AU size seems to be
+                                                              about 850 bytes */
+#define M4PTO3GPP_AUDIO_MAX_CHUNK_SIZE           5000
+
+/**
+ ******************************************************************************
+ * enum            anonymous enum
+ * @brief        enum to keep track of the encoder state
+ ******************************************************************************
+ */
+enum
+{
+    M4PTO3GPP_kNoEncoder,
+    M4PTO3GPP_kEncoderClosed,
+    M4PTO3GPP_kEncoderStopped,
+    M4PTO3GPP_kEncoderRunning
+};
+
+/**
+ ******************************************************************************
+ * structure    M4PTO3GPP_InternalContext
+ * @brief        This structure defines the M4PTO3GPP context (private)
+ * @note        This structure is used for all M4PTO3GPP calls to store the context
+ ******************************************************************************
+ */
+typedef struct
+{
+    /**
+     *    M4PTO3GPP main variables */
+    M4PTO3GPP_States             m_State;            /**< M4PTO3GPP internal state */
+    M4PTO3GPP_Params             m_Params;           /**< M4PTO3GPP parameters, set by the user */
+    M4PTO3GPP_StreamState        m_VideoState;       /**< State of the video encoding */
+    M4PTO3GPP_StreamState        m_AudioState;       /**< State of the audio encoding */
+
+    /**
+     *    OSAL file read/write functions */
+    M4OSA_FileReadPointer*        pOsalFileRead;     /**< OSAL file read functions,
+                                                           to be provided by user */
+    M4OSA_FileWriterPointer*      pOsalFileWrite;    /**< OSAL file write functions,
+                                                          to be provided by user */
+
+    /**
+     *    Reader stuff */
+    M4_AccessUnit*                m_pReaderAudioAU;    /**< Read audio access unit */
+    M4_AudioStreamHandler*        m_pReaderAudioStream;/**< Description of the read audio stream */
+
+    /**
+     *    Writer stuff */
+    M4SYS_AccessUnit            m_WriterVideoAU;       /**< Written video access unit */
+    M4SYS_AccessUnit            m_WriterAudioAU;       /**< Written audio access unit */
+    M4ENCODER_Header*           m_pEncoderHeader;      /**< Sequence header returned by the
+                                                            encoder at encoder create (if any) */
+    M4SYS_StreamDescription*    m_pWriterVideoStream;  /**< Description of the written
+                                                             video stream */
+    M4SYS_StreamDescription*    m_pWriterAudioStream;  /**< Description of the written
+                                                             audio stream */
+    M4WRITER_StreamVideoInfos*  m_pWriterVideoStreamInfo;    /**< Video properties of the written
+                                                               video stream */
+    M4WRITER_StreamAudioInfos*    m_pWriterAudioStreamInfo;   /**< Audio properties of the written
+                                                               audio stream */
+
+    /**
+     *    Contexts of the used modules  */
+    M4OSA_Void*                    m_pAudioReaderContext; /**< Context of the audio reader module*/
+    M4OSA_Void*                    m_p3gpWriterContext;   /**< Context of the 3GP writer module */
+    M4OSA_Void*                    m_pMp4EncoderContext;  /**< Mp4 encoder context */
+    M4OSA_UInt32                   m_eEncoderState;
+
+    /**
+     * Reader Interfaces */
+    M4READER_GlobalInterface*    m_pReaderGlobInt;    /**< Reader common interface, global part */
+    M4READER_DataInterface*      m_pReaderDataInt;     /**< Reader common interface, data part */
+
+    /**
+     * Writer Interfaces */
+    M4WRITER_GlobalInterface*   m_pWriterGlobInt;     /**< Writer common interface, global part */
+    M4WRITER_DataInterface*     m_pWriterDataInt;     /**< Writer common interface, data part */
+
+    /**
+     * Encoder Interfaces */
+    M4ENCODER_GlobalInterface*  m_pEncoderInt;                /**< Encoder common interface */
+    M4OSA_Void*                 m_pEncoderExternalAPI;
+    M4OSA_Void*                 m_pEncoderUserData;
+
+    /**
+     * */
+    M4VIFI_ImagePlane*            pSavedPlane;
+    M4OSA_UInt32                  uiSavedDuration;
+
+    /**
+     *    Video rate control stuff */
+    M4_MediaTime                m_dLastVideoRegulCts; /**< Last time (CTS) the video bitrate
+                                                           regulation has been called */
+    M4_MediaTime                m_mtCts;         /**< Current video cts */
+    M4_MediaTime                m_mtNextCts;     /**< Next video CTS to transcode */
+    M4_MediaTime                m_mtAudioCts;    /**< Current audio cts */
+    M4_MediaTime                m_AudioOffSet;   /**< Audio Offset to add to the cts in loop mode*/
+    M4_MediaTime                m_PrevAudioCts;  /**< Previous audio cts for AAC looping */
+    M4_MediaTime                m_DeltaAudioCts; /**< Delta audio cts for AAC looping */
+    M4OSA_UInt32                m_CurrentFileSize; /**< Current Output file size  */
+    M4OSA_UInt32                m_MaxFileSize;     /**< Max Output file size  */
+    M4OSA_Bool                  m_IsLastPicture;   /**< A boolean that signals to the encoder that
+                                                       this is the last frame to be encoded*/
+    M4OSA_Bool                  m_bLastInternalCallBack;
+    M4OSA_UInt32                m_NbCurrentFrame;  /**< Index of the current YUV frame encoded */
+
+    /**
+     *    Audio padding mode */
+    M4OSA_Bool                    m_bAudioPaddingSilence;  /**< A boolean that signals that audio
+                                                                AU will be padded by silence */
+
+    struct
+    {
+        M4VE_Interface*    pEncoderInterface;
+        M4OSA_Void*        pUserData;
+        M4OSA_Bool        registered;
+    } registeredExternalEncs[M4VE_kEncoderType_NB];
+} M4PTO3GPP_InternalContext;
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ *                                  M4VIFI_ImagePlane* pPlaneOut)
+ * @brief    Call an external callback to get the picture to encode
+ * @note    It is called by the video encoder
+ * @param    pContext    (IN) VPP context, which actually is the M4PTO3GPP
+ *                            internal context in our case
+ * @param    pPlaneIn    (IN) Contains the image
+ * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the
+ *                        output YUV420 image read with the m_pPictureCallbackFct
+ * @return    M4NO_ERROR:    No error
+ * @return    Any error returned by an underlaying module
+ ******************************************************************************
+ */
+M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ M4VIFI_ImagePlane* pPlaneOut);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4PTO3GPP_INTERNALTYPES_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_API.h b/libvideoeditor/vss/inc/M4VSS3GPP_API.h
new file mode 100755
index 0000000..f6f8daa
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_API.h
@@ -0,0 +1,961 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VSS3GPP_API_H__
+#define __M4VSS3GPP_API_H__
+
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_API.h
+ * @brief   Video Studio Service 3GPP public API.
+ * @note    VSS allows editing 3GPP files.
+ *          It is a straightforward and fully synchronous API.
+ ******************************************************************************
+ */
+
+/**
+ *  OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ *  OSAL types for file access */
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+
+/**
+ *  Definition of M4_VersionInfo */
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ * Image planes definition */
+#include "M4VIFI_FiltersAPI.h"
+
+/**
+ * Common definitions of video editing components */
+#include "M4_VideoEditingCommon.h"
+
+
+#include "M4VD_HW_API.h"
+#include "M4VE_API.h"
+
+#include "M4ENCODER_AudioCommon.h"
+#include "M4AD_Common.h"
+#include "M4DA_Types.h"
+
+/**
+ * Extended API (xVSS) */
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+#include "M4VSS3GPP_Extended_API.h"
+#endif
+
+//#include "M4VD_HW_API.h"
+//#include "M4VE_API.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ *      Edition Feature
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+
+/**
+ *  Public type of the VSS edit context */
+typedef M4OSA_Void* M4VSS3GPP_EditContext;
+
+
+/**
+ ******************************************************************************
+ * enum     M4VSS3GPP_VideoEffectType
+ * @brief   This enumeration defines the video effect types of the VSS3GPP
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4VSS3GPP_kVideoEffectType_None           = 0,  /**< No video effect */
+    M4VSS3GPP_kVideoEffectType_FadeFromBlack  = 8,  /**< Intended for begin effect */
+    M4VSS3GPP_kVideoEffectType_CurtainOpening = 9,  /**< Intended for begin effect */
+    M4VSS3GPP_kVideoEffectType_FadeToBlack    = 16, /**< Intended for end effect */
+    M4VSS3GPP_kVideoEffectType_CurtainClosing = 17, /**< Intended for end effect */
+    M4VSS3GPP_kVideoEffectType_External       = 256 /**< External effect function is used */
+    /* reserved 256 + n */                          /**< External effect number n */
+
+} M4VSS3GPP_VideoEffectType;
+
+
+/**
+ ******************************************************************************
+ * enum     M4VSS3GPP_AudioEffectType
+ * @brief   This enumeration defines the audio effect types of the VSS3GPP
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4VSS3GPP_kAudioEffectType_None    = 0,
+    M4VSS3GPP_kAudioEffectType_FadeIn  = 8, /**< Intended for begin effect */
+    M4VSS3GPP_kAudioEffectType_FadeOut = 16 /**< Intended for end effect */
+
+} M4VSS3GPP_AudioEffectType;
+
+
+/**
+ ******************************************************************************
+ * enum     M4VSS3GPP_VideoTransitionType
+ * @brief   This enumeration defines the video effect that can be applied during a transition.
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4VSS3GPP_kVideoTransitionType_None      = 0,
+    M4VSS3GPP_kVideoTransitionType_CrossFade = 1,
+    M4VSS3GPP_kVideoTransitionType_External  = 256
+    /* reserved 256 + n */                          /**< External transition number n */
+
+} M4VSS3GPP_VideoTransitionType;
+
+
+/**
+ ******************************************************************************
+ * enum     M4VSS3GPP_AudioTransitionType
+ * @brief   This enumeration defines the audio effect that can be applied during a transition.
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4VSS3GPP_kAudioTransitionType_None = 0,
+    M4VSS3GPP_kAudioTransitionType_CrossFade
+
+} M4VSS3GPP_AudioTransitionType;
+
+
+/**
+ ******************************************************************************
+ * struct   M4VSS3GPP_ExternalProgress
+ * @brief   This structure contains information provided to the external Effect
+ *          and Transition functions
+ * @note    The uiProgress value should be enough for most cases
+ ******************************************************************************
+ */
+typedef struct
+{
+    /**< Progress of the Effect or the Transition, from 0 to 1000 (one thousand) */
+    M4OSA_UInt32    uiProgress;
+    /**< Index of the current clip (first clip in case of a Transition), from 0 to N */
+    //M4OSA_UInt8     uiCurrentClip;
+    /**< Current time, in milliseconds, in the current clip time-line */
+    M4OSA_UInt32    uiClipTime;
+    /**< Current time, in milliseconds, in the output clip time-line */
+    M4OSA_UInt32    uiOutputTime;
+    M4OSA_Bool        bIsLast;
+
+} M4VSS3GPP_ExternalProgress;
+
+
+/**
+ ************************************************************************
+ * enum     M4VSS3GPP_codecType
+ * @brief    This enum defines the codec types used to create interfaces
+ * @note    This enum is used internally by the VSS3GPP services to identify
+ *             a currently supported codec interface. Each codec is
+ *            registered with one of this type associated.
+ *            When a codec instance is needed, this type is used to
+ *            identify and retrieve its interface.
+ *            This can be extended for other codecs.
+ ************************************************************************
+ */
+typedef enum
+{
+    /* Video Decoder Types */
+    M4VSS3GPP_kVideoDecMPEG4 = 0,
+    M4VSS3GPP_kVideoDecH264,
+
+    /* Video Encoder Types */
+    M4VSS3GPP_kVideoEncMPEG4,
+    M4VSS3GPP_kVideoEncH263,
+    M4VSS3GPP_kVideoEncH264,
+
+    /* Audio Decoder Types */
+    M4VSS3GPP_kAudioDecAMRNB,
+    M4VSS3GPP_kAudioDecAAC,
+    M4VSS3GPP_kAudioDecMP3,
+
+    /* Audio Encoder Types */
+    M4VSS3GPP_kAudioEncAMRNB,
+    M4VSS3GPP_kAudioEncAAC,
+
+    /* number of codecs, keep it as last enum entry, before invlaid type */
+    M4VSS3GPP_kCodecType_NB,
+    /* invalid codec type */
+    M4VSS3GPP_kCodecTypeInvalid = 255
+
+} M4VSS3GPP_codecType;
+
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_editVideoEffectFct
+ * @brief       Begin and End video effect functions implemented by the integrator
+ *              must match this prototype.
+ * @note        The function is provided with the original image of the clip.
+ *              It must apply the video effect to build the output image.
+ *              The progress of the effect is given, on a scale from 0 to 1000.
+ *              When the effect function is called, all the image plane structures
+ *              and buffers are valid and owned by the VSS 3GPP.
+ *
+ * @param   pFunctionContext    (IN) The function context, previously set by the integrator
+ * @param   pInputPlanes        (IN) Input YUV420 image: pointer to an array of three valid
+                                     image planes (Y, U and V)
+ * @param   pOutputPlanes       (IN/OUT) Output (filtered) YUV420 image: pointer to an array
+                                         of three valid image planes (Y, U and V)
+ * @param   pProgress           (IN) Set of information about the video transition progress.
+ * @param   uiExternalEffectId  (IN) Which effect function should be used (for external effects)
+ *
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (*M4VSS3GPP_editVideoEffectFct)
+(
+    M4OSA_Void *pFunctionContext,
+    M4VIFI_ImagePlane *pInputPlanes,
+    M4VIFI_ImagePlane *pOutputPlanes,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiExternalEffectId
+);
+
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_editVideoTransitionFct
+ * @brief       External transition functions implemented by the integrator
+ *              must match this prototype.
+ * @note        The function is provided with the image of the first clip and
+ *              the image of the second clip. It must build the output image
+ *              from the two input images.
+ *              The progress of the transition is given, on a scale from 0 to 1000.
+ *              When the external function is called, all the image plane
+ *              structures and buffers are valid and owned by the VSS 3GPP.
+ *
+ * @param   pFunctionContext    (IN) The function context, previously set by the integrator
+ * @param   pClip1InputPlanes   (IN) First input YUV420 image: pointer to an array of three
+                                     valid image planes (Y, U and V)
+ * @param   pClip2InputPlanes   (IN) Second input YUV420 image: pointer to an array of three
+                                     valid image planes (Y, U and V)
+ * @param   pOutputPlanes       (IN/OUT) Output (filtered) YUV420 image: pointer to an array
+                                         of three valid image planes (Y, U and V)
+ * @param   pProgress           (IN) Set of information about the video effect progress.
+ * @param   uiExternalTransitionId    (IN) Which transition function should be used
+                                            (for external transitions)
+ *
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (*M4VSS3GPP_editVideoTransitionFct)
+(
+    M4OSA_Void *pFunctionContext,
+    M4VIFI_ImagePlane *pClip1InputPlanes,
+    M4VIFI_ImagePlane *pClip2InputPlanes,
+    M4VIFI_ImagePlane *pOutputPlanes,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiExternalTransitionId
+);
+
+
+/**
+ ******************************************************************************
+ * struct   M4VSS3GPP_EffectSettings
+ * @brief   This structure defines an audio/video effect for the edition.
+ * @note    Effect start time is relative to output clip.
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4OSA_UInt32                 uiStartTime;           /**< In ms */
+    M4OSA_UInt32                 uiDuration;            /**< In ms */
+    M4VSS3GPP_VideoEffectType    VideoEffectType;       /**< None, FadeIn, FadeOut, etc. */
+    M4VSS3GPP_editVideoEffectFct ExtVideoEffectFct;     /**< External effect function */
+    M4OSA_Void                  *pExtVideoEffectFctCtxt;/**< Context given to the external
+                                                             effect function */
+    M4VSS3GPP_AudioEffectType    AudioEffectType;       /**< None, FadeIn, FadeOut */
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+    M4xVSS_EffectSettings         xVSS;
+#endif
+
+} M4VSS3GPP_EffectSettings;
+
+
+/**
+ ******************************************************************************
+ * enum        M4VSS3GPP_TransitionBehaviour
+ * @brief    Transition behavior
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4VSS3GPP_TransitionBehaviour_SpeedUp = 0,
+    M4VSS3GPP_TransitionBehaviour_Linear,
+    M4VSS3GPP_TransitionBehaviour_SpeedDown,
+    M4VSS3GPP_TransitionBehaviour_SlowMiddle,
+    M4VSS3GPP_TransitionBehaviour_FastMiddle
+} M4VSS3GPP_TransitionBehaviour;
+
+
+/**
+ ******************************************************************************
+ * struct   M4VSS3GPP_TransitionSettings
+ * @brief   This structure defines the transition to be applied when assembling two clips.
+ ******************************************************************************
+ */
+typedef struct
+{
+    /**< Duration of the transition, in milliseconds (set to 0 to get no transition) */
+    M4OSA_UInt32                     uiTransitionDuration;
+
+    /**< Type of the video transition */
+    M4VSS3GPP_VideoTransitionType    VideoTransitionType;
+
+    /**< External transition video effect function */
+    M4VSS3GPP_editVideoTransitionFct ExtVideoTransitionFct;
+
+    /**< Context of the external transition video effect function */
+    M4OSA_Void                      *pExtVideoTransitionFctCtxt;
+    M4VSS3GPP_AudioTransitionType    AudioTransitionType;   /**< Type of the audio transition */
+    M4VSS3GPP_TransitionBehaviour     TransitionBehaviour;    /**<Transition behaviour*/
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+    M4xVSS_TransitionSettings        xVSS;
+#endif
+
+} M4VSS3GPP_TransitionSettings;
+
+
+/**
+ ******************************************************************************
+ * struct   M4VSS3GPP_ClipSettings
+ * @brief   This structure defines an input clip for the edition.
+ * @note    It also contains the settings for the cut and begin/end effects applied to the clip.
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4OSA_Void                     *pFile;            /**< Clip file descriptor */
+    M4VIDEOEDITING_FileType         FileType;         /**< .3gp, .amr, .mp3     */
+    M4OSA_UInt32                    filePathSize;      /**< Clip path size
+                                                           (add because of UTF16 conversion)*/
+    M4VIDEOEDITING_ClipProperties   ClipProperties;   /**< Clip analysis previously computed
+                                                       with M4VSS3GPP_editAnalyseClip */
+    M4OSA_UInt32                    uiBeginCutTime;   /**< Begin cut time, in milliseconds */
+    M4OSA_UInt32                    uiEndCutTime;     /**< End cut time, in milliseconds */
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+    M4xVSS_ClipSettings             xVSS;
+#endif
+
+} M4VSS3GPP_ClipSettings;
+
+
+/**
+ ******************************************************************************
+ * struct   M4VSS3GPP_EditSettings
+ * @brief   This structure gathers all the information needed to define a complete
+ *          edition operation
+ ******************************************************************************
+ */
+typedef struct
+{
+      /**< Number of element of the clip list pClipList */
+    M4OSA_UInt8                      uiClipNumber;
+    /**< The properties of this clip will be used as a reference for compatibility checking */
+    M4OSA_UInt8                      uiMasterClip;
+    /**< List of the input clips settings. Pointer to an array of uiClipNumber
+     clip settings pointers */
+    M4VSS3GPP_ClipSettings           **pClipList;
+    /**< List of the transition settings. Pointer to an array of uiClipNumber-1
+     transition settings pointers */
+    M4VSS3GPP_TransitionSettings     **pTransitionList;
+    M4VSS3GPP_EffectSettings         *Effects;         /**< List of effects */
+    M4OSA_UInt8                         nbEffects;     /**< Number of effects in the above list */
+    /**< Frame rate at which the modified video sections will be encoded */
+    M4VIDEOEDITING_VideoFramerate    videoFrameRate;
+    M4OSA_Void                       *pOutputFile;      /**< Output 3GPP clip file descriptor */
+    M4OSA_UInt32                     uiOutputPathSize;    /**< Output file path size*/
+    /**< Temporary file to store metadata ("moov.bin") */
+    M4OSA_Void                       *pTemporaryFile;
+
+#ifdef M4VSS_SUPPORT_EXTENDED_FEATURES
+    M4xVSS_EditSettings              xVSS;
+#endif
+    M4OSA_Float                    PTVolLevel;
+} M4VSS3GPP_EditSettings;
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editAnalyseClip()
+ * @brief   This function allows checking if a clip is compatible with VSS 3GPP editing
+ * @note    It also fills a ClipAnalysis structure, which can be used to check if two
+ *          clips are compatible
+ * @param   pClip               (IN) File descriptor of the input 3GPP/MP3 clip file.
+ * @param   pClipProperties     (IN) Pointer to a valid ClipProperties structure.
+ * @param   FileType            (IN) Type of the input file (.3gp, .amr, .mp3)
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return   M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED
+ * @return   M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return   M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC
+ * @return   M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT
+ * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editAnalyseClip(M4OSA_Void *pClip, M4VIDEOEDITING_FileType FileType,
+                                    M4VIDEOEDITING_ClipProperties  *pClipProperties,
+                                    M4OSA_FileReadPointer *pFileReadPtrFct);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility()
+ * @brief   This function allows checking if two clips are compatible with each other
+ *          for VSS 3GPP editing assembly feature.
+ * @note
+ * @param   pClip1Properties        (IN) Clip analysis of the first clip
+ * @param   pClip2Properties        (IN) Clip analysis of the second clip
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return  M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_PLATFORM
+ * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT
+ * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE
+ * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE
+ * @return  M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING
+ * @return  M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY
+ * @return  M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility(M4VIDEOEDITING_ClipProperties  *pClip1Properties,
+                                               M4VIDEOEDITING_ClipProperties  *pClip2Properties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editInit()
+ * @brief    Initializes the VSS 3GPP edit operation (allocates an execution context).
+ * @note
+ * @param    pContext            (OUT) Pointer on the VSS 3GPP edit context to allocate
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editInit(
+    M4VSS3GPP_EditContext* pContext,
+    M4OSA_FileReadPointer* pFileReadPtrFct,
+    M4OSA_FileWriterPointer* pFileWritePtrFct );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCreateClipSettings()
+ * @brief    Allows filling a clip settings structure with default values
+ *
+ * @note    WARNING: pClipSettings->pFile      will be allocated in this function.
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   pFile               (IN) Clip file name
+ * @param   filePathSize        (IN) Size of the clip path (needed for UTF16 conversion)
+ * @param    nbEffects           (IN) Nb of effect settings to allocate
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCreateClipSettings(M4VSS3GPP_ClipSettings *pClipSettings,
+                                           M4OSA_Void* pFile, M4OSA_UInt32 filePathSize,
+                                           M4OSA_UInt8 nbEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings()
+ * @brief    Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects (deprecated)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings(M4VSS3GPP_ClipSettings *pClipSettingsDest,
+                                              M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+                                              M4OSA_Bool bCopyEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editFreeClipSettings()
+ * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects).
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editFreeClipSettings(M4VSS3GPP_ClipSettings *pClipSettings);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editOpen()
+ * @brief   Set the VSS 3GPP input and output files, and set the settings.
+ * @note
+ * @param   pContext            (IN) VSS 3GPP edit context
+ * @param   pSettings           (IN) Edit settings
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        VSS is not in an appropriate state for this function to be called
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editOpen(M4VSS3GPP_EditContext pContext, M4VSS3GPP_EditSettings *pSettings);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editStep()
+ * @brief   Perform one step of editing.
+ * @note
+ * @param   pContext                (IN) VSS 3GPP edit context
+ * @param   pProgress               (OUT) Progress percentage (0 to 100) of the editing operation
+ * @return  M4NO_ERROR:             No error
+ * @return  M4ERR_PARAMETER:        pContext is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:            VSS 3GPP is not in an appropriate state for this function to
+ *                                  be called
+ * @return  M4VSS3GPP_WAR_EDITING_DONE:Edition is done, user should now call M4VSS3GPP_editClose()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editStep(M4VSS3GPP_EditContext pContext, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editClose()
+ * @brief   Finish the VSS 3GPP edit operation.
+ * @note    The output 3GPP file is ready to be played after this call
+ * @param   pContext            (IN) VSS 3GPP edit context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        VSS 3GPP is not in an appropriate state for this function
+ *                              to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editClose(M4VSS3GPP_EditContext pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCleanUp()
+ * @brief   Free all resources used by the VSS 3GPP edit operation.
+ * @note    The context is no more valid after this call
+ * @param   pContext            (IN) VSS 3GPP edit context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCleanUp(M4VSS3GPP_EditContext pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editRegisterExternalVideoDecoder(M4VSS3GPP_EditContext pContext,
+ *                                     M4VD_VideoType decoderType,
+ *                                     M4VD_Interface*    pDecoderInterface,
+ *                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video decoder
+ * @note
+ * @param   pContext           (IN) VSS3GPP context
+ * @param   decoderType        (IN) Type of decoder (MPEG4 ...)
+ * @param   pDecoderInterface  (IN) Decoder interface
+ * @param   pUserData          (IN) Pointer on a user data to give to external decoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        VSS3GPP is not in an appropriate state for this function
+ *                              to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editRegisterExternalVideoDecoder(M4VSS3GPP_EditContext pContext,
+                                     M4VD_VideoType decoderType,
+                                     M4VD_Interface*    pDecoderInterface,
+                                     M4OSA_Void* pUserData);
+
+/**
+ ******************************************************************************
+ *M4OSA_ERR M4VSS3GPP_editRegisterExternalVideoEncoder(M4VSS3GPP_EditContext pContext,
+ *                                     M4VE_EncoderType encoderType,
+ *                                     M4VE_Interface*    pEncoderInterface,
+ *                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video encoder
+ * @note
+ * @param   pContext           (IN) VSS3GPP context
+ * @param   encoderType        (IN) Type of encoder (MPEG4 ...)
+ * @param   pEncoderInterface  (IN) Encoder interface
+ * @param   pUserData          (IN) Pointer on a user data to give to external encoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        VSS3GPP is not in an appropriate state for this function
+ *                              to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editRegisterExternalVideoEncoder(M4VSS3GPP_EditContext pContext,
+                                     M4VE_EncoderType encoderType,
+                                     M4VE_Interface*    pEncoderInterface,
+                                     M4OSA_Void* pUserData);
+
+
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ *      Audio Mixing Feature
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+/**
+ *  Public type of the VSS audio mixing context */
+typedef M4OSA_Void* M4VSS3GPP_AudioMixingContext;
+
+
+/**
+ ******************************************************************************
+ * struct   M4VSS3GPP_AudioMixingSettings
+ * @brief   This structure defines the settings of the audio mixing operation.
+ ******************************************************************************
+ */
+typedef struct {
+    M4OSA_Void*                             pOriginalClipFile;      /**< Input 3GPP clip file */
+    M4OSA_Void*                             pAddedAudioTrackFile;   /**< New audio track */
+    M4VIDEOEDITING_FileType                 AddedAudioFileType;     /**< File Format of the new audio file */
+    M4OSA_UInt32                            uiAddCts;               /**< Time, in milliseconds,
+                                                                    at which the added audio track is inserted */
+    M4OSA_UInt32                            uiAddVolume;            /**< Volume, in percentage,
+                                                                        of the added audio track */
+    M4OSA_UInt32                            uiBeginLoop;            /**< Describes in milli-second the
+                                                                        start time of the loop */
+    M4OSA_UInt32                            uiEndLoop;              /**< Describes in milli-second the end
+                                                                    time of the loop (0 means no loop) */
+    M4OSA_Bool                              bRemoveOriginal;      /**< If true, the original audio track
+                                                                     is not taken into account */
+    M4OSA_Void*                             pOutputClipFile;      /**< Output 3GPP clip file */
+    M4OSA_Void*                             pTemporaryFile;       /**< Temporary file to store metadata
+                                                     ("moov.bin") */
+    /**< The following parameters are optionnal. They are just used in case of MP3 replacement. */
+    M4VIDEOEDITING_AudioSamplingFrequency   outputASF;         /**< Output sampling frequency */
+    M4VIDEOEDITING_AudioFormat              outputAudioFormat; /**< Output audio codec(AAC/AMR)*/
+    M4VIDEOEDITING_Bitrate                  outputAudioBitrate; /**< Output audio bitrate */
+    M4OSA_UInt8                             outputNBChannels; /**< Output audio nb of channels */
+    M4OSA_Bool                              b_DuckingNeedeed;
+    M4OSA_Int32                             InDucking_threshold;
+    M4OSA_Float                             fBTVolLevel;
+    M4OSA_Float                             fPTVolLevel;
+    M4OSA_Float                             InDucking_lowVolume;
+    M4OSA_Bool                              bLoop;
+    M4OSA_UInt32                            uiSamplingFrequency;
+    M4OSA_UInt32                            uiNumChannels;
+} M4VSS3GPP_AudioMixingSettings;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingInit(M4VSS3GPP_AudioMixingContext* pContext,
+ *                                     M4VSS3GPP_AudioMixingSettings* pSettings)
+ * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param    pSettings        (IN) Pointer to valid audio mixing settings
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingInit(
+    M4VSS3GPP_AudioMixingContext* pContext,
+    M4VSS3GPP_AudioMixingSettings* pSettings,
+    M4OSA_FileReadPointer* pFileReadPtrFct,
+    M4OSA_FileWriterPointer* pFileWritePtrFct );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingStep()
+ * @brief   Perform one step of audio mixing.
+ * @note
+ * @param   pContext                        (IN) VSS 3GPP audio mixing context
+ * @return  M4NO_ERROR:                     No error
+ * @return  M4ERR_PARAMETER:                pContext is M4OSA_NULL (debug only)
+ * @param   pProgress                       (OUT) Progress percentage (0 to 100)
+                                                  of the finalization operation
+ * @return  M4ERR_STATE:                    VSS is not in an appropriate state for
+                                            this function to be called
+ * @return  M4VSS3GPP_WAR_END_OF_AUDIO_MIXING: Audio mixing is over, user should
+                                               now call M4VSS3GPP_audioMixingCleanUp()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingStep(M4VSS3GPP_AudioMixingContext pContext,
+                                     M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingCleanUp()
+ * @brief   Free all resources used by the VSS audio mixing operation.
+ * @note    The context is no more valid after this call
+ * @param   pContext            (IN) VSS 3GPP audio mixing context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingCleanUp(M4VSS3GPP_AudioMixingContext pContext);
+
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ *      Extract Picture Feature
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+/**
+ *  Public type of the VSS extract picture context */
+typedef M4OSA_Void* M4VSS3GPP_ExtractPictureContext;
+
+/**
+ ******************************************************************************
+ * struct   M4VSS3GPP_ExtractPictureSettings
+ * @brief   This structure defines the settings of the extract picture audio operation.
+ ******************************************************************************
+ */
+typedef struct {
+    M4OSA_Void*                         pInputClipFile;  /**< Input 3GPP clip file */
+    M4OSA_Int32                         iExtractionTime; /**< frame time (in ms) to be extracted */
+    M4OSA_Void*                         pOutputYuvPic;   /**< Output YUV picture name */
+} M4VSS3GPP_ExtractPictureSettings;
+
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_extractPictureInit()
+ * @brief    Initializes the VSS extract picture operation (allocates an execution context).
+ * @note
+ * @param    pContext            (OUT) Pointer on the VSS extract picture context to allocate
+ * @param    pSettings            (IN) Pointer to valid extract picture settings
+ * @param    pWidth                (OUT) video stream width
+ * @param    pHeight                (OUT) video stream height
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @return    M4NO_ERROR:                        No error
+ * @return    M4ERR_PARAMETER:                At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_ALLOC:                    There is no more available memory
+ * @return    M4VSS3GPP_ERR_INVALID_CLIP1:    The input clip is empty
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_extractPictureInit(
+        M4VSS3GPP_ExtractPictureContext* pContext,
+        M4VSS3GPP_ExtractPictureSettings* pSettings,
+        M4OSA_UInt32 *pWidth,
+        M4OSA_UInt32 *pHeight,
+        M4OSA_FileReadPointer* pFileReadPtrFct );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_extractPictureStep()
+ * @brief   Perform one step of picture extraction.
+ * @note
+ * @param   pContext                        (IN) VSS extract picture context
+ * @return  M4NO_ERROR:                     No error
+ * @return  M4ERR_PARAMETER:                pContext is M4OSA_NULL (debug only)
+ * @param   pDecPlanes                      (OUT) Plane in wich the extracted picture is copied
+ * @param   pProgress                       (OUT) Progress percentage (0 to 100)
+                                                 of the picture extraction
+ * @return  M4ERR_STATE:                    VSS is not in an appropriate state for this
+                                            function to be called
+ * @return  VSS_WAR_END_OF_EXTRACT_PICTURE: Picture extraction  is over, user should now
+                                            call M4VSS3GPP_extractPictureCleanUp()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_extractPictureStep(M4VSS3GPP_ExtractPictureContext pContext,
+                                       M4VIFI_ImagePlane *pDecPlanes, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_extractPictureCleanUp()
+ * @brief   Free all resources used by the VSS picture extraction.
+ * @note    The context is no more valid after this call
+ * @param   pContext            (IN) VSS extract picture context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_extractPictureCleanUp(M4VSS3GPP_ExtractPictureContext pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_extractPictureRegisterExternalVideoDecoder(
+ *                   M4VSS3GPP_ExtractPictureContext pContext,
+ *                                     M4VD_VideoType decoderType,
+ *                                     M4VD_Interface*    pDecoderInterface,
+ *                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video decoder
+ * @note
+ * @param   pContext           (IN) Extract picture context
+ * @param   decoderType        (IN) Type of decoder (MPEG4 ...)
+ * @param   pDecoderInterface  (IN) Decoder interface
+ * @param   pUserData          (IN) Pointer on a user data to give to external decoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        Extract picture is not in an appropriate state for this
+ *                              function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_extractPictureRegisterExternalVideoDecoder(\
+                   M4VSS3GPP_ExtractPictureContext pContext,
+                                     M4VD_VideoType decoderType,
+                                     M4VD_Interface*    pDecoderInterface,
+                                     M4OSA_Void* pUserData);
+
+
+
+/**
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ *
+ *      Common features
+ *
+ ******************************************************************************
+ ******************************************************************************
+ ******************************************************************************
+ */
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetVersion()
+ * @brief   Get the VSS version.
+ * @note    Can be called anytime. Do not need any context.
+ * @param   pVersionInfo        (OUT) Pointer to a version info structure
+ * @return  M4NO_ERROR:         No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+
+
+#ifdef WIN32
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetErrorMessage()
+ * @brief   Return a string describing the given error code
+ * @note    The input string must be already allocated (and long enough!)
+ * @param   err             (IN) Error code to get the description from
+ * @param   sMessage        (IN/OUT) Allocated string in which the description will be copied
+ * @return  M4NO_ERROR:     Input error is from the VSS3GPP module
+ * @return  M4ERR_PARAMETER:Input error is not from the VSS3GPP module
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_GetErrorMessage(M4OSA_ERR err, M4OSA_Char* sMessage);
+#endif /**< WIN32 */
+
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editRegisterExternalCodec(
+ *                                                             M4VSS3GPP_EditContext    pContext,
+ *                                     M4VSS3GPP_codecType        codecType,
+ *                                     M4OSA_Context    pCodecInterface,
+ *                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video/Audio codec with VSS3GPP
+ * @note This is much different from the other external codec registration API to
+ *       cope up with specific requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @param  codecType        (IN) Type of codec (MPEG4 ...)
+ * @param  pCodecInterface  (IN) Codec interface
+ * @param  pUserData          (IN) Pointer on a user data to give to external codec
+ * @return  M4NO_ERROR:       No error
+ * @return  M4ERR_PARAMETER:  At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:     VSS3GPP is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+ M4OSA_ERR M4VSS3GPP_editRegisterExternalCodec(M4VSS3GPP_EditContext pContext,
+                                     M4VSS3GPP_codecType codecType,
+                                     M4OSA_Context    pCodecInterface,
+                                     M4OSA_Void* pUserData);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editSubscribeExternalCodecs(M4VSS3GPP_EditContext    pContext)
+ * @brief    Subscribes to previously registered external Video/Audio codec
+ * @note This is much different from the other external codec registration API to
+ *       cope up with specific requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:     VSS3GPP is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+ M4OSA_ERR M4VSS3GPP_editSubscribeExternalCodecs(M4VSS3GPP_EditContext pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intSubscribeExternalCodecs(M4VSS3GPP_EditContext    pContext,
+ *                                                M4OSA_Context pShellCtxt)
+ * @brief    Subscribes to previously registered external Video/Audio codec
+ * @note This is much different from the other external codec registration API to
+ *       cope up with specific requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @param pShellContext    (IN) Media Codec shell context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:     VSS3GPP is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+ M4OSA_ERR M4VSS3GPP_intSubscribeExternalCodecs(M4VSS3GPP_EditContext pContext,
+                                                M4OSA_Context pShellCtxt);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4VSS3GPP_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
new file mode 100755
index 0000000..3ad97a7
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_ErrorCodes.h
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_ErrorCodes.h
+ * @brief    Video Studio Service 3GPP error definitions.
+ * @note
+ ******************************************************************************
+ */
+
+#ifndef __M4VSS3GPP_ErrorCodes_H__
+#define __M4VSS3GPP_ErrorCodes_H__
+
+/**
+ *    OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ *    OSAL core ID definitions */
+#include "M4OSA_CoreID.h"
+
+
+/************************************************************************/
+/* Warning codes                                                        */
+/************************************************************************/
+
+/**
+ *    End of edition, user should now call M4VSS3GPP_editClose() */
+#define M4VSS3GPP_WAR_EDITING_DONE             M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0001)
+
+/**
+ *    End of audio mixing, user should now call M4VSS3GPP_audioMixingCleanUp() */
+#define M4VSS3GPP_WAR_END_OF_AUDIO_MIXING      M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0010)
+
+/**
+ *    End of extract picture, user should now call M4VSS3GPP_extractPictureCleanUp() */
+#define M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE   M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0020)
+/* RC: to know when a file has been processed */
+#define M4VSS3GPP_WAR_SWITCH_CLIP              M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0030)
+
+/************************************************************************/
+/* Error codes                                                          */
+/************************************************************************/
+
+/**
+ * Invalid file type */
+#define M4VSS3GPP_ERR_INVALID_FILE_TYPE               M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0001)
+/**
+ * Invalid effect kind */
+#define M4VSS3GPP_ERR_INVALID_EFFECT_KIND             M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0002)
+/**
+ * Invalid effect type for video */
+#define M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0003)
+/**
+ * Invalid effect type for audio */
+#define M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0004)
+/**
+ * Invalid transition type for video */
+#define M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0005)
+/**
+ * Invalid transition type for audio */
+#define M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0006)
+/**
+ * Invalid video encoding frame rate */
+#define M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE        \
+                                      M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0007)
+ /**
+ * External effect function is used without being set */
+#define M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL            M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0008)
+/**
+ * External transition function is used without being set */
+#define M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0009)
+
+/**
+ * Begin cut time is larger than the clip duration */
+#define M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION  M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0010)
+/**
+ * Begin cut time is larger or equal than end cut */
+#define M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0011)
+/**
+ * Two consecutive transitions are overlapping on one clip */
+#define M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0012)
+
+/**
+ * An input 3GPP file is invalid/corrupted */
+#define M4VSS3GPP_ERR_INVALID_3GPP_FILE               M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0016)
+/**
+ * A file contains an unsupported video format */
+#define M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT  M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0017)
+/**
+ * A file contains an unsupported audio format */
+#define M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT  M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0018)
+
+/**
+ * A file format is not supported by the VSS */
+#define M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0019)
+ /**
+ *    An input clip has an unexpectedly large Video AU */
+#define M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001A)
+/**
+ *    An input clip has an unexpectedly large Audio AU */
+#define M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001B)
+/**
+ *    An input clip has a corrupted Audio AMR AU */
+#define M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001C)
+/**
+ * The video encoder encountered an Acces Unit error: very probably a file write error */
+#define M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x001D)
+
+
+/************************************************************************/
+/* Errors returned by M4VSS3GPP_editAnalyseClip()                       */
+/************************************************************************/
+
+/**
+ * Unsupported video format for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0020)
+/**
+ * Unsupported H263 profile for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0021)
+/**
+ * Unsupported MPEG-4 profile for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE    \
+                                             M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0022)
+/**
+ * Unsupported MPEG-4 RVLC tool for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0023)
+/**
+ * Unsupported audio format for Video Editing */
+#define M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0024)
+ /**
+ * File contains no supported stream */
+#define M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE    M4OSA_ERR_CREATE( M4_ERR,\
+                                                                            M4VSS3GPP, 0x0025)
+/**
+ * File contains no video stream or an unsupported video stream */
+#define M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE    M4OSA_ERR_CREATE( M4_ERR,\
+                                                                                M4VSS3GPP, 0x0026)
+
+
+/************************************************************************/
+/* Errors returned by M4VSS3GPP_editCheckClipCompatibility()            */
+/************************************************************************/
+
+/**
+ * At least one of the clip analysis has been generated by another version of the VSS 3GPP */
+#define M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0030)
+/**
+ * Clips don't have the same video format (H263 or MPEG4) */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT       M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0031)
+/**
+ *    Clips don't have the same frame size */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0032)
+/**
+ *    Clips don't have the same MPEG-4 time scale */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0033)
+/**
+ *    Clips don't have the same use of MPEG-4 data partitioning */
+#define M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING    M4OSA_ERR_CREATE( M4_ERR,\
+                                                                              M4VSS3GPP, 0x0034)
+/**
+ *    MP3 clips can't be assembled */
+#define M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0035)
+/**
+ *  Clips don't have the same audio stream type (ex: AMR != AAC) */
+#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE  M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0036)
+/**
+ *  Clips don't have the same audio number of channels (ex: stereo != mono) */
+#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS        M4OSA_ERR_CREATE( M4_WAR,\
+                                                                            M4VSS3GPP, 0x0037)
+/**
+ *  Clips don't have the same sampling frequency (ex: 44100Hz != 16000Hz) */
+#define M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY    M4OSA_ERR_CREATE( M4_WAR,\
+                                                                              M4VSS3GPP, 0x0038)
+
+/************************************************************************/
+/* Audio mixing error codes                                            */
+/************************************************************************/
+
+/**
+ * The input 3GPP file does not contain any supported audio or video track */
+#define M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE     M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0050)
+/**
+ * The Volume of the added audio track (AddVolume) must be strictly superior than zero */
+#define M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO           M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0051)
+/**
+ * The time at which the audio track is added (AddCts) can't be superior than the
+   input video track duration */
+#define M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION        M4OSA_ERR_CREATE( M4_ERR,\
+                                                                            M4VSS3GPP, 0x0052)
+/**
+ * The audio track file format setting is undefined */
+#define M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT        M4OSA_ERR_CREATE( M4_ERR,\
+                                                                            M4VSS3GPP, 0x0053)
+/**
+ * The added audio track stream has an unsupported format */
+#define M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0054)
+/**
+ * The audio mixing feature doesn't support EVRC, MP3 audio tracks */
+#define M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0055)
+/**
+ * An added audio track limit the available features: uiAddCts must be 0
+   and bRemoveOriginal must be M4OSA_TRUE */
+#define M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK  M4OSA_ERR_CREATE( M4_ERR,\
+                                                                              M4VSS3GPP, 0x0056)
+/**
+ * Input audio track is not AMR-NB nor AAC so it can't be mixed with output */
+#define M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED                    M4OSA_ERR_CREATE( M4_ERR,\
+                                                                              M4VSS3GPP, 0x0057)
+/**
+ * Input clip must be a 3gpp file */
+#define M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP              M4OSA_ERR_CREATE( M4_ERR,\
+                                                                              M4VSS3GPP, 0x0058)
+/**
+ * Begin loop time is higher than end loop time or higher than added clip duration */
+#define M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP              M4OSA_ERR_CREATE( M4_ERR,\
+                                                                              M4VSS3GPP, 0x0059)
+
+
+/************************************************************************/
+/* Audio mixing and extract picture error code                          */
+/************************************************************************/
+
+/**
+ * H263 Profile 3 level 10 is not supported */
+#define M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED            M4OSA_ERR_CREATE( M4_ERR,\
+                                                                            M4VSS3GPP, 0x0060)
+/**
+ * File contains no video stream or an unsupported video stream */
+#define M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE        M4OSA_ERR_CREATE( M4_ERR,\
+                                                                            M4VSS3GPP, 0x0061)
+
+
+/************************************************************************/
+/* Internal error and warning codes                                     */
+/************************************************************************/
+
+/**
+ * Internal state error */
+#define M4VSS3GPP_ERR_INTERNAL_STATE                 M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0100)
+/**
+ * Luminance filter effect error */
+#define M4VSS3GPP_ERR_LUMA_FILTER_ERROR              M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0104)
+/**
+ * Curtain filter effect error */
+#define M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR           M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0105)
+/**
+ * Transition filter effect error */
+#define M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR        M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0106)
+/**
+ * The audio decoder initialization failed */
+#define M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED      M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0110)
+/**
+ * The decoder produced an unattended amount of PCM */
+#define M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE   M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0115)
+/**
+ * Output file must be 3GPP or MP3 */
+#define M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR         M4OSA_ERR_CREATE( M4_ERR, M4VSS3GPP, 0x0117)
+
+#endif /* __M4VSS3GPP_ErrorCodes_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h b/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h
new file mode 100755
index 0000000..faecb91
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_Extended_API.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VSS3GPP_EXTENDED_API_H__
+#define __M4VSS3GPP_EXTENDED_API_H__
+
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_Extended_API.h
+ * @brief    API of xVSS
+ * @note
+ ******************************************************************************
+*/
+
+#ifndef M4VSS_SUPPORT_EXTENDED_FEATURES
+#error "*** the flag M4VSS_SUPPORT_EXTENDED_FEATURES should be activated in CompilerSwitches\
+             for VideoStudio ***"
+#endif
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_getTextRgbBufferFct
+ * @brief        External text to RGB buffer functions implemented by the integrator
+ *                must match this prototype.
+ * @note        The function is provided with the renderingData, the text buffer and
+ *                its size. It must build the output RGB image plane containing the text.
+ *
+ * @param   pRenderingData    (IN) The data given by the user in M4xVSS_EffectSettings
+ * @param    pTextBuffer        (IN) Text buffer given by the user in M4xVSS_EffectSettings
+ * @param    textBufferSize    (IN) Text buffer size given by the user in M4xVSS_EffectSettings
+ * @param    pOutputPlane    (IN/OUT) Output RGB565 image
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (*M4xVSS_getTextRgbBufferFct)
+(
+    M4OSA_Void *pRenderingData,
+    M4OSA_Void *pTextBuffer,
+    M4OSA_UInt32 textBufferSize,
+    M4VIFI_ImagePlane **pOutputPlane
+);
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_BGMSettings
+ * @brief    This structure gathers all the information needed to add Background music to 3gp file
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Void                  *pFile;         /**< Input file path */
+    M4VIDEOEDITING_FileType     FileType;       /**< .3gp, .amr, .mp3     */
+    M4OSA_UInt32                uiAddCts;       /**< Time, in milliseconds, at which the added
+                                                      audio track is inserted */
+    M4OSA_UInt32                uiAddVolume;     /**< Volume, in percentage, of the added audio track */
+    M4OSA_UInt32                uiBeginLoop;    /**< Describes in milli-second the start time
+                                                     of the loop */
+    M4OSA_UInt32                uiEndLoop;      /**< Describes in milli-second the end time of the
+                                                     loop (0 means no loop) */
+    M4OSA_Bool                  b_DuckingNeedeed;
+    M4OSA_Int32                 InDucking_threshold;  /**< Threshold value at which background
+                                                            music shall duck */
+    M4OSA_Float                 lowVolume;       /**< lower the background track to this factor
+                                                 and increase the primary track to inverse of this factor */
+    M4OSA_Bool                  bLoop;
+    M4OSA_UInt32                uiSamplingFrequency;
+    M4OSA_UInt32                uiNumChannels;
+} M4xVSS_BGMSettings;
+
+
+/**
+ ******************************************************************************
+ * enum     M4VSS3GPP_VideoEffectType
+ * @brief   This enumeration defines the video effect types of the VSS3GPP
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VSS3GPP_kRGB888           = 0,  /**< RGB888 data type */
+    M4VSS3GPP_kRGB565           = 1  /**< RGB565 data type */
+
+} M4VSS3GPP_RGBType;
+
+/**
+ ******************************************************************************
+ * struct   M4xVSS_EffectSettings
+ * @brief   This structure defines an audio/video effect for the edition.
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**< In percent of the cut clip duration */
+    M4OSA_UInt32               uiStartPercent;
+    /**< In percent of the ((clip duration) - (effect starttime)) */
+    M4OSA_UInt32               uiDurationPercent;
+    /**< Framing file path (GIF/PNG file), used only if VideoEffectType == framing */
+    M4OSA_Void                 *pFramingFilePath;
+    /**< Framing RGB565 buffer,  used only if VideoEffectType == framing */
+    M4VIFI_ImagePlane          *pFramingBuffer;
+    /**<RGB Buffer type,used only if VideoEffectType == framing */
+    M4VSS3GPP_RGBType          rgbType;
+    /**< The top-left X coordinate in the output picture where the added frame will be displayed.
+     Used only if VideoEffectType == framing || VideoEffectType == text */
+    M4OSA_UInt32               topleft_x;
+    /**< The top-left Y coordinate in the output picture where the added frame will be displayed.
+     Used only if VideoEffectType == framing || VideoEffectType == text */
+    M4OSA_UInt32               topleft_y;
+    /**< Does framing image is resized to output video size.
+     Used only if VideoEffectType == framing */
+    M4OSA_Bool                 bResize;
+    M4VIDEOEDITING_VideoFrameSize framingScaledSize;
+/**< Size to which the the framing file needs to be resized */
+    /**< Text buffer. Used only if VideoEffectType == text */
+    M4OSA_Void*                pTextBuffer;
+    /**< Text buffer size. Used only if VideoEffectType == text */
+    M4OSA_UInt32               textBufferSize;
+    /**< Pointer containing specific data used by the font engine (size, color...) */
+    M4OSA_Void*                pRenderingData;
+    /**< Text plane width. Used only if VideoEffectType == text */
+    M4OSA_UInt32               uiTextBufferWidth;
+    /**< Text plane height. Used only if VideoEffectType == text */
+    M4OSA_UInt32               uiTextBufferHeight;
+    /**< Processing rate of the effect added when using the Fifties effect */
+    M4OSA_UInt32               uiFiftiesOutFrameRate;
+    /**< RGB16 input color of the effect added when using the rgb16 color effect */
+    M4OSA_UInt16               uiRgb16InputColor;
+
+    M4OSA_UInt8                uialphaBlendingStart;       /*Start percentage of Alpha blending*/
+    M4OSA_UInt8                uialphaBlendingMiddle;      /*Middle percentage of Alpha blending*/
+    M4OSA_UInt8                uialphaBlendingEnd;         /*End percentage of Alpha blending*/
+    M4OSA_UInt8                uialphaBlendingFadeInTime;  /*Duration, in percentage of
+                                                            effect duration, of the FadeIn phase*/
+    M4OSA_UInt8                uialphaBlendingFadeOutTime;   /*Duration, in percentage of effect
+                                                                duration, of the FadeOut phase*/
+    M4OSA_UInt32                width;   /*width of the ARGB8888 clip .
+                                            Used only if video effect is framming */
+    M4OSA_UInt32                height; /*height of the ARGB8888 clip .
+                                            Used only if video effect is framming */
+} M4xVSS_EffectSettings;
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_AlphaMagicSettings
+ * @brief    This structure defines the alpha magic transition settings
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Void*            pAlphaFilePath;        /**< Alpha file path (JPG file)  */
+    M4OSA_Int32            blendingPercent;    /**< Blending Percentage between 0 and 100 */
+    M4OSA_Bool             isreverse;            /**< direct effect or reverse */
+    /*To support ARGB8888 : get the width and height */
+    M4OSA_UInt32            width;
+    M4OSA_UInt32            height;
+} M4xVSS_AlphaMagicSettings;
+
+/**
+ ******************************************************************************
+ * enum        M4xVSS_SlideTransition_Direction
+ * @brief    Defines directions for the slide transition
+ ******************************************************************************
+*/
+
+typedef enum {
+    M4xVSS_SlideTransition_RightOutLeftIn,
+    M4xVSS_SlideTransition_LeftOutRightIn,
+    M4xVSS_SlideTransition_TopOutBottomIn,
+    M4xVSS_SlideTransition_BottomOutTopIn
+} M4xVSS_SlideTransition_Direction;
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_AlphaMagicSettings
+ * @brief    This structure defines the slide transition settings
+ ******************************************************************************
+*/
+
+typedef struct
+{
+    M4xVSS_SlideTransition_Direction direction; /* direction of the slide */
+} M4xVSS_SlideTransitionSettings;
+
+/**
+ ******************************************************************************
+ * struct   M4xVSS_TransitionSettings
+ * @brief   This structure defines additional transition settings specific to
+ *            xVSS, which are appended to the VSS3GPP transition settings
+ *            structure.
+ ******************************************************************************
+*/
+typedef struct
+{
+    /* Anything xVSS-specific, but common to all transitions, would go here,
+    before the union. */
+    union {
+        /**< AlphaMagic settings, used only if VideoTransitionType ==
+            M4xVSS_kVideoTransitionType_AlphaMagic */
+        M4xVSS_AlphaMagicSettings        *pAlphaMagicSettings;
+        /* only in case of slide transition. */
+        M4xVSS_SlideTransitionSettings    *pSlideTransitionSettings;
+    } transitionSpecific;
+} M4xVSS_TransitionSettings;
+
+
+/**
+ ******************************************************************************
+ * enum        M4xVSS_MediaRendering
+ * @brief    This enum defines different media rendering using exif orientation
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4xVSS_kResizing = 0,        /*The picture is resized, the aspect ratio can be different
+                                    from the original one. All of the picture is rendered*/
+    M4xVSS_kCropping,            /*The picture is cropped, the aspect ratio is the same as
+                                    the original one. The picture is not rendered entirely*/
+    M4xVSS_kBlackBorders        /*Black borders are rendered in order to keep the original
+                                    aspect ratio. All the picture is rendered*/
+
+} M4xVSS_MediaRendering;
+
+
+/**
+ ******************************************************************************
+ * struct   M4xVSS_ClipSettings
+ * @brief   This structure defines an input clip for the edition.
+ * @note    It also contains the settings for the cut and begin/end effects applied to the clip.
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32                    uiBeginCutPercent;    /**< Begin cut time, in percent of clip
+                                                                duration (only for 3GPP clip !) */
+    M4OSA_UInt32                    uiEndCutPercent;    /**< End cut time, in percent of clip
+                                                             duration (only for 3GPP clip !) */
+    M4OSA_UInt32                    uiDuration;            /**< Duration of the clip, if different
+                                                                from 0, has priority on
+                                                                uiEndCutTime or uiEndCutPercent */
+    M4OSA_Bool                        isPanZoom;            /**< RC: Boolean used to know if the
+                                                                 pan and zoom mode is enabled */
+    M4OSA_UInt16                    PanZoomXa;            /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftXa;    /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftYa;    /**< RC */
+    M4OSA_UInt16                    PanZoomXb;            /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftXb;    /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftYb;    /**< RC */
+    M4xVSS_MediaRendering            MediaRendering;        /**< FB only used with JPEG: to crop,
+                                                                 resize, or render black borders*/
+
+} M4xVSS_ClipSettings;
+
+/**
+ ******************************************************************************
+ * struct   M4xVSS_EditSettings
+ * @brief   This structure gathers all the information needed to define a complete
+ *          edition operation
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**< Output video size */
+    M4VIDEOEDITING_VideoFrameSize             outputVideoSize;
+    /**< Output video format (MPEG4 / H263) */
+    M4VIDEOEDITING_VideoFormat                outputVideoFormat;
+    /**< Output audio format (AAC, AMRNB ...) */
+    M4VIDEOEDITING_AudioFormat                outputAudioFormat;
+    /**< Output audio sampling freq (8000Hz,...) */
+    M4VIDEOEDITING_AudioSamplingFrequency     outputAudioSamplFreq;
+    /**< Maximum output file size in BYTES (if set to 0, no limit */
+    M4OSA_UInt32                              outputFileSize;
+    /**< Is output audio must be Mono ? Valid only for AAC */
+    M4OSA_Bool                                bAudioMono;
+    /**< Output video bitrate*/
+    M4OSA_UInt32                              outputVideoBitrate;
+    /**< Output audio bitrate*/
+    M4OSA_UInt32                              outputAudioBitrate;
+    /**< Background music track settings */
+    M4xVSS_BGMSettings                        *pBGMtrack;
+    /**< Function pointer on text rendering engine, if not used, must be set to NULL !! */
+    M4xVSS_getTextRgbBufferFct                pTextRenderingFct;
+
+} M4xVSS_EditSettings;
+
+#endif /* __M4VSS3GPP_EXTENDED_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h
new file mode 100755
index 0000000..3926427
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_InternalConfig.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4VSS3GPP_INTERNALCONFIG_H__
+#define __M4VSS3GPP_INTERNALCONFIG_H__
+
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_InternalConfig.h
+ * @brief    This file contains some magical and configuration parameters.
+ ******************************************************************************
+*/
+
+/***********************/
+/* VideoEdition config */
+/***********************/
+
+#define M4VSS3GPP_MINIMAL_TRANSITION_DURATION            100    /**< 100 milliseconds */
+#define M4VSS3GPP_NB_AU_PREFETCH                        4        /**< prefect 4 AUs */
+#define M4VSS3GPP_NO_STSS_JUMP_POINT                    40000 /**< If 3gp file does not contain
+                                                                   an STSS table (no rap frames),
+                                                                   jump backward 40 s maximum */
+
+/*****************/
+/* Writer config */
+/*****************/
+
+#define M4VSS3GPP_WRITER_AUDIO_STREAM_ID                1
+#define M4VSS3GPP_WRITER_VIDEO_STREAM_ID                2
+
+/**< Max AU size will be 0.8 times the YUV4:2:0 frame size */
+#define M4VSS3GPP_VIDEO_MIN_COMPRESSION_RATIO            0.9F
+/**< Max chunk size will be 1.2 times the max AU size */
+#define M4VSS3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO    1.2F
+
+/** READ CAREFULLY IN CASE OF REPORTED RUNNING TROUBLES
+The max AU size is used to pre-set max size of AU that can be written in the 3GP writer
+For audio standard with variable AU size, there could be some encoding settings leading to AU size
+exceeding this limit.
+For AAC streams for instance the average AU size is given by:
+av AU size = (av bitrate * 1024)/(sampling freq)
+If VSS returns the message:
+>> ERROR: audio AU size (XXXX) to copy larger than allocated one (YYYY) => abort
+>> PLEASE CONTACT SUPPORT TO EXTEND MAX AU SIZE IN THE PRODUCT LIBRARY
+Error is most likely to happen when mixing with audio full replacement
+ */
+/**< AAC max AU size - READ EXPLANATION ABOVE */
+#define M4VSS3GPP_AUDIO_MAX_AU_SIZE                        2048
+/**< set to x4 max AU size per chunk */
+#define M4VSS3GPP_AUDIO_MAX_CHUNCK_SIZE                    8192
+
+
+/***********************/
+/* H263 / MPEG4 config */
+/***********************/
+
+#define    M4VSS3GPP_EDIT_H263_MODULO_TIME            255
+
+#ifdef BIG_ENDIAN
+/**< 0xb3 01 00 00 Little endian / b00 00 00 01 b3 big endian*/
+#define    M4VSS3GPP_EDIT_GOV_HEADER            0x000001b3
+#else
+/**< 0xb3 01 00 00 Little endian / b00 00 00 01 b3 big endian*/
+#define    M4VSS3GPP_EDIT_GOV_HEADER            0xb3010000
+#endif
+
+/**************/
+/* AMR config */
+/**************/
+
+#define M4VSS3GPP_WRITTEN_AMR_TRACK_TIME_SCALE            8000
+#define M4VSS3GPP_AMR_DECODED_PCM_SAMPLE_NUMBER            160        /**< 20ms at 8000hz -->
+                                                                     20x8=160 samples */
+#define M4VSS3GPP_AMR_DEFAULT_BITRATE                   12200   /**< 12.2 kbps */
+
+/**************/
+/* EVRC config */
+/**************/
+
+#define M4VSS3GPP_EVRC_DEFAULT_BITRATE                  9200   /**< 9.2 kbps */
+
+/**************/
+/* MP3 config */
+/**************/
+
+/** Macro to make a jump on the MP3 track on several steps
+    To avoid to block the system with an long MP3 jump, this process
+    is divided on several steps.
+ */
+#define M4VSS3GPP_MP3_JUMPED_AU_NUMBER_MAX 100
+
+/** Macro to define the number of read AU to analyse the bitrate
+    So the process will read the first n AU of the MP3 stream to get
+    the average bitrate. n is defined by this define.
+ */
+#define M4VSS3GPP_MP3_AU_NUMBER_MAX 500
+
+/*****************************/
+/* define AMR silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 160
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+const M4OSA_UInt8 M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+{
+    0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33,
+    0xFF, 0xE0, 0x00, 0x00, 0x00
+};
+#else
+extern const M4OSA_UInt8 \
+              M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
+#endif
+
+/*****************************/
+/* define AAC silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE      4
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE] =
+{
+    0x00, 0xC8, 0x20, 0x07
+};
+#else
+extern const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE];
+#endif
+
+#define M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE        6
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE] =
+{
+    0x21, 0x10, 0x03, 0x20, 0x54, 0x1C
+};
+#else
+extern const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE];
+#endif
+
+#endif /* __M4VSS3GPP_INTERNALCONFIG_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h
new file mode 100755
index 0000000..aeddd97
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_InternalFunctions.h
@@ -0,0 +1,663 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_InternalFunctions.h
+ * @brief    This file contains all function prototypes not visible to the external world.
+ * @note
+ ******************************************************************************
+*/
+
+
+#ifndef __M4VSS3GPP_INTERNALFUNCTIONS_H__
+#define __M4VSS3GPP_INTERNALFUNCTIONS_H__
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ *    VSS public API and types */
+#include "M4VSS3GPP_API.h"
+
+/**
+ *    VSS private types */
+#include "M4VSS3GPP_InternalTypes.h"
+
+
+#include "M4READER_Common.h" /**< for M4_AccessUnit definition */
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_HW_API.h"
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* All errors are fatal in the VSS */
+#define M4ERR_CHECK_RETURN(err) if(M4NO_ERROR!=err) return err;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepVideo()
+ * @brief    One step of video processing
+ * @param   pC    (IN/OUT) Internal edit context
+  ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditStepVideo(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepAudio()
+ * @brief    One step of audio processing
+ * @param   pC    (IN/OUT) Internal edit context
+  ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditStepAudio(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepMP3()
+ * @brief    One step of audio processing for the MP3 clip
+ * @param   pC    (IN/OUT) Internal edit context
+  ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditStepMP3(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intOpenClip()
+ * @brief    Open next clip
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intOpenClip(M4VSS3GPP_InternalEditContext *pC, M4VSS3GPP_ClipContext **hClip,
+                                 M4VSS3GPP_ClipSettings *pClipSettings);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder()
+ * @brief    Destroy the video encoder
+ * @note
+  ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder()
+ * @brief    Creates the video encoder
+ * @note
+  ******************************************************************************
+*/
+M4OSA_ERR  M4VSS3GPP_intCreateVideoEncoder(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo()
+ * @brief    Do what to do when the end of a clip video track is reached
+ * @note    If there is audio on the current clip, process it, else switch to the next clip
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio()
+ * @brief    Do what to do when the end of a clip audio track is reached
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing()
+ * @brief    Check if the clip is compatible with VSS editing
+ * @note
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param    pClipProperties     (OUT) Pointer to a valid ClipProperties structure.
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing(M4VIDEOEDITING_ClipProperties \
+                                                            *pClipProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipOpen()
+ * @brief    Open a clip. Creates a clip context.
+ * @note
+ * @param   hClipCtxt            (OUT) Return the internal clip context
+ * @param   pClipSettings        (IN) Edit settings of this clip. The module will keep a
+ *                                        reference to this pointer
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param    bSkipAudioTrack        (IN) If true, do not open the audio
+ * @param    bFastOpenMode        (IN) If true, use the fast mode of the 3gpp reader
+ *                                            (only the first AU is read)
+ * @return    M4NO_ERROR:                No error
+ * @return    M4ERR_ALLOC:            There is no more available memory
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipInit (
+    M4VSS3GPP_ClipContext **hClipCtxt,
+    M4OSA_FileReadPointer *pFileReadPtrFct
+);
+
+M4OSA_ERR M4VSS3GPP_intClipOpen (
+    M4VSS3GPP_ClipContext *pClipCtxt,
+    M4VSS3GPP_ClipSettings *pClipSettings,
+    M4OSA_Bool bSkipAudioTrack,
+    M4OSA_Bool bFastOpenMode,
+    M4OSA_Bool bAvoidOpeningVideoDec
+);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
+ * @brief    Delete the audio track. Clip will be like if it had no audio track
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ ******************************************************************************
+*/
+M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCurrentTime()
+ * @brief    Jump to the previous RAP and decode up to the current video time
+ * @param   pClipCtxt    (IN) Internal clip context
+ * @param   iCts        (IN) Target CTS
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCts(M4VSS3GPP_ClipContext* pClipCtxt, M4OSA_Int32 iCts);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame()
+ * @brief    Read one AU frame in the clip
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame()
+ * @brief    Decode the current AUDIO frame.
+ * @note
+ * @param   pClipCtxt        (IN) internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt()
+ * @brief    Jump in the audio track of the clip.
+ * @note
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param   pJumpCts            (IN/OUT) in:target CTS, out: reached CTS
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt(M4VSS3GPP_ClipContext *pClipCtxt, M4OSA_Int32 *pJumpCts);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipClose()
+ * @brief    Close a clip. Destroy the context.
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intClipClose(M4VSS3GPP_ClipContext *pClipCtxt);
+
+M4OSA_ERR M4VSS3GPP_intClipCleanUp(M4VSS3GPP_ClipContext *pClipCtxt);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditJumpMP3()
+ * @brief    One step of jumping processing for the MP3 clip.
+ * @note    On one step, the jump of several AU is done
+ * @param   pC    (IN/OUT) Internal edit context
+  ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intEditJumpMP3(M4VSS3GPP_InternalEditContext *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerWriter()
+ * @brief    This function will register a specific file format writer.
+ * @note    According to the Mediatype, this function will store in the internal context
+ *             the writer context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext,pWtrGlobalInterface or pWtrDataInterface is
+ *                                 M4OSA_NULL (debug only), or invalid MediaType
+ ******************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_registerWriter(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                     M4WRITER_OutputFileType MediaType,
+                                     M4WRITER_GlobalInterface* pWtrGlobalInterface,
+                                     M4WRITER_DataInterface* pWtrDataInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerEncoder()
+ * @brief    This function will register a specific video encoder.
+ * @note    According to the Mediatype, this function will store in the internal context
+ *            the encoder context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
+ *                                 or invalid MediaType
+ ******************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_registerVideoEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4ENCODER_Format MediaType,
+                                           M4ENCODER_GlobalInterface *pEncGlobalInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerAudioEncoder()
+ * @brief    This function will register a specific audio encoder.
+ * @note    According to the Mediatype, this function will store in the internal context
+ *             the encoder context.
+ * @param    pContext:                (IN) Execution context.
+ * @param    mediaType:                (IN) The media type.
+ * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_registerAudioEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                             M4ENCODER_AudioFormat MediaType,
+                                             M4ENCODER_AudioGlobalInterface *pEncGlobalInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerReader()
+ * @brief    Register reader.
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_registerReader(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                     M4READER_MediaType mediaType,
+                                     M4READER_GlobalInterface *pRdrGlobalInterface,
+                                     M4READER_DataInterface *pRdrDataInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerVideoDecoder()
+ * @brief    Register video decoder
+ * @param    pContext                (IN/OUT) VSS context.
+ * @param    decoderType            (IN) Decoder type
+ * @param    pDecoderInterface    (IN) Decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only), or the decoder type
+ *                                    is invalid
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_registerVideoDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                            M4DECODER_VideoType decoderType,
+                                            M4DECODER_VideoInterface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerAudioDecoder()
+ * @brief    Register audio decoder
+ * @note    This function is used internaly by the VSS to register audio decoders,
+ * @param    context                (IN/OUT) VSS context.
+ * @param    decoderType            (IN) Audio decoder type
+ * @param    pDecoderInterface    (IN) Audio decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null, or the decoder type is invalid
+ *                                 (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_registerAudioDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4AD_Type decoderType,
+                                           M4AD_Interface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllWriters()
+ * @brief    Unregister writer
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_unRegisterAllWriters(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllEncoders()
+ * @brief    Unregister the encoders
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_unRegisterAllEncoders(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllReaders()
+ * @brief    Unregister reader
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_unRegisterAllReaders(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllDecoders()
+ * @brief    Unregister the decoders
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_unRegisterAllDecoders(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentWriter()
+ * @brief    Set current writer
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_setCurrentWriter(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                        M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentVideoEncoder()
+ * @brief    Set a video encoder
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    MediaType           (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_setCurrentVideoEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                                M4SYS_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentAudioEncoder()
+ * @brief    Set an audio encoder
+ * @param    context            (IN/OUT) VSS context.
+ * @param    MediaType        (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_setCurrentAudioEncoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                             M4SYS_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentReader()
+ * @brief    Set current reader
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_setCurrentReader(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentVideoDecoder()
+ * @brief    Set a video decoder
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_setCurrentVideoDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                             M4_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentAudioDecoder()
+ * @brief    Set an audio decoder
+ * @param    context            (IN/OUT) VSS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_setCurrentAudioDecoder(M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                             M4_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_clearInterfaceTables()
+ * @brief    Clear encoders, decoders, reader and writers interfaces tables
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    The context is null
+ ************************************************************************
+*/
+M4OSA_ERR   M4VSS3GPP_clearInterfaceTables(M4VSS3GPP_MediaAndCodecCtxt *pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_SubscribeMediaAndCodec()
+ * @brief    This function registers the reader, decoders, writers and encoders
+ *          in the VSS.
+ * @note
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext is NULL
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_subscribeMediaAndCodec(M4VSS3GPP_MediaAndCodecCtxt *pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB()
+ * @brief   Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
+ * @note
+ * @param   pAudioFrame   (IN) AMRNB frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB(M4OSA_MemAddr8 pAudioFrame);
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC()
+ * @brief   Return the length, in bytes, of the EVRC frame contained in the given buffer
+ * @note
+ *     0 1 2 3
+ *    +-+-+-+-+
+ *    |fr type|              RFC 3558
+ *    +-+-+-+-+
+ *
+ * Frame Type: 4 bits
+ *    The frame type indicates the type of the corresponding codec data
+ *    frame in the RTP packet.
+ *
+ * For EVRC and SMV codecs, the frame type values and size of the
+ * associated codec data frame are described in the table below:
+ *
+ * Value   Rate      Total codec data frame size (in octets)
+ * ---------------------------------------------------------
+ *   0     Blank      0    (0 bit)
+ *   1     1/8        2    (16 bits)
+ *   2     1/4        5    (40 bits; not valid for EVRC)
+ *   3     1/2       10    (80 bits)
+ *   4     1         22    (171 bits; 5 padded at end with zeros)
+ *   5     Erasure    0    (SHOULD NOT be transmitted by sender)
+ *
+ * @param   pCpAudioFrame   (IN) EVRC frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+*/
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC(M4OSA_MemAddr8 pAudioFrame);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intBuildAnalysis()
+ * @brief    Get video and audio properties from the clip streams
+ * @note    This function must return fatal errors only (errors that should not happen in the
+ *             final integrated product).
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param    pClipProperties        (OUT) Pointer to a valid ClipProperties structure.
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intBuildAnalysis(M4VSS3GPP_ClipContext *pClipCtxt,
+                                     M4VIDEOEDITING_ClipProperties *pClipProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder()
+ * @brief    Reset the audio encoder (Create it if needed)
+ * @note
+  ******************************************************************************
+*/
+M4OSA_ERR  M4VSS3GPP_intCreateAudioEncoder(M4VSS3GPP_EncodeWriteContext *pC_ewc,
+                                             M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+                                             M4OSA_UInt32 uiAudioBitrate);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile()
+ * @brief    Creates and prepare the output MP3 file
+ * @note    Creates the writer, Creates the output file, Adds the streams, Readies the
+ *            writing process
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile(M4VSS3GPP_EncodeWriteContext *pC_ewc,
+                                            M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+                                            M4OSA_FileWriterPointer *pOsaFileWritPtr,
+                                            M4OSA_Void* pOutputFile,
+                                            M4OSA_FileReadPointer *pOsaFileReadPtr,
+                                            M4OSA_Void* pTempFile,
+                                            M4OSA_UInt32 maxOutputFileSize);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility()
+ * @brief    This function allows checking if two clips are compatible with each other for
+ *             VSS 3GPP audio mixing feature.
+ * @note
+ * @param    pC                            (IN) Context of the audio mixer
+ * @param    pInputClipProperties        (IN) Clip analysis of the first clip
+ * @param    pAddedClipProperties        (IN) Clip analysis of the second clip
+ * @return    M4NO_ERROR:            No error
+ * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return  M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP
+ * @return  M4NO_ERROR
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility(M4VSS3GPP_InternalAudioMixingContext *pC,
+                                                 M4VIDEOEDITING_ClipProperties \
+                                                 *pInputClipProperties,
+                                                 M4VIDEOEDITING_ClipProperties  \
+                                                 *pAddedClipProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
+ * @brief    Delete the audio track. Clip will be like if it had no audio track
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ ******************************************************************************
+*/
+M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack(M4VSS3GPP_ClipContext *pClipCtxt);
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+M4OSA_ERR M4VSS3GPP_intClipRegisterExternalVideoDecoder(M4VSS3GPP_ClipContext *pClipCtxt,
+                                     M4VD_VideoType decoderType,
+                                     M4VD_Interface*    pDecoderInterface,
+                                     M4OSA_Void* pUserData);
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intStartAU()
+ * @brief    StartAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param    pContext: (IN) It is the VSS 3GPP context in our case
+ * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param    pAU:      (IN/OUT) Access Unit to be prepared.
+ * @return    M4NO_ERROR: there is no error
+ ******************************************************************************
+*/
+M4OSA_ERR  M4VSS3GPP_intStartAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,
+                                 M4SYS_AccessUnit* pAU);
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intProcessAU()
+ * @brief    ProcessAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param    pContext: (IN) It is the VSS 3GPP context in our case
+ * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param    pAU:      (IN/OUT) Access Unit to be written
+ * @return    M4NO_ERROR: there is no error
+ ******************************************************************************
+*/
+M4OSA_ERR  M4VSS3GPP_intProcessAU(M4WRITER_Context pContext, M4SYS_StreamID streamID,
+                                     M4SYS_AccessUnit* pAU);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVPP()
+ * @brief    We implement our own VideoPreProcessing function
+ * @note    It is called by the video encoder
+ * @param    pContext    (IN) VPP context, which actually is the VSS 3GPP context in our case
+ * @param    pPlaneIn    (IN)
+ * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the
+ *                             output YUV420 image
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+*/
+M4OSA_ERR  M4VSS3GPP_intVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+                             M4VIFI_ImagePlane* pPlaneOut);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4VSS3GPP_INTERNALFUNCTIONS_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h b/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h
new file mode 100755
index 0000000..55513bc
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4VSS3GPP_InternalTypes.h
@@ -0,0 +1,779 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_InternalTypes.h
+ * @brief    This file contains all enum and types not visible to the external world.
+ * @note
+ ******************************************************************************
+*/
+
+
+#ifndef __M4VSS3GPP_INTERNALTYPES_H__
+#define __M4VSS3GPP_INTERNALTYPES_H__
+
+#define M4VSS_VERSION_MAJOR        3
+#define M4VSS_VERSION_MINOR        2
+#define M4VSS_VERSION_REVISION    5
+
+#include "NXPSW_CompilerSwitches.h"
+
+/**
+ *    VSS public API and types */
+#include "M4VSS3GPP_API.h"
+
+/**
+ *    Internally used modules */
+#include "M4READER_Common.h"        /**< Reader common interface */
+#include "M4WRITER_common.h"        /**< Writer common interface */
+#include "M4DECODER_Common.h"        /**< Decoder common interface */
+#include "M4ENCODER_common.h"        /**< Encoder common interface */
+#include "M4VIFI_FiltersAPI.h"        /**< Image planes definition */
+#include "M4READER_3gpCom.h"        /**< Read 3GPP file     */
+#include "M4AD_Common.h"            /**< Decoder audio   */
+#include "M4ENCODER_AudioCommon.h"  /**< Encode audio    */
+
+
+#include "SSRC.h"                    /**< SSRC             */
+#include "From2iToMono_16.h"        /**< Stereo to Mono     */
+#include "MonoTo2I_16.h"            /**< Mono to Stereo     */
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_HW_API.h"
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define WINDOW_SIZE 10
+/**
+ ******************************************************************************
+ * enum            M4VSS3GPP_EditState
+ * @brief        Main state machine of the VSS 3GPP edit operation.
+ ******************************************************************************
+*/
+
+typedef enum
+{
+    M4VSS3GPP_kEditState_CREATED    = 0,    /**< M4VSS3GPP_editInit has been called */
+    M4VSS3GPP_kEditState_VIDEO        = 1,    /**< Processing video track */
+    M4VSS3GPP_kEditState_AUDIO        = 2,    /**< Processing audio track */
+    M4VSS3GPP_kEditState_MP3        = 3,    /**< Processing MP3 audio track */
+    M4VSS3GPP_kEditState_MP3_JUMP   = 4,        /**< Processing a jump in a MP3 audio track */
+    M4VSS3GPP_kEditState_FINISHED    = 5,    /**< Processing done, VSS 3GPP can be closed */
+    M4VSS3GPP_kEditState_CLOSED        = 6        /**< Output file has been closed,
+                                                     VSS 3GPP can be destroyed */
+}
+M4VSS3GPP_EditState;
+
+typedef enum
+{
+    M4VSS3GPP_kEditVideoState_READ_WRITE    = 10,    /**< Doing Read/Write operation
+                                                        (no decoding/encoding) */
+    M4VSS3GPP_kEditVideoState_BEGIN_CUT     = 11,    /**< Decode encode to create an I frame */
+    M4VSS3GPP_kEditVideoState_DECODE_ENCODE = 12,    /**< Doing Read-Decode/Filter/
+                                                        Encode-Write operation */
+    M4VSS3GPP_kEditVideoState_TRANSITION    = 13,    /**< Transition; blending of two videos */
+    M4VSS3GPP_kEditVideoState_AFTER_CUT     = 14    /**< Special Read/Write mode after a
+                                                            begin cut (time frozen) */
+}
+M4VSS3GPP_EditVideoState;
+
+typedef enum
+{
+    M4VSS3GPP_kEditAudioState_READ_WRITE    = 20,    /**< Doing Read/Write operation
+                                                        (no decoding/encoding) */
+    M4VSS3GPP_kEditAudioState_DECODE_ENCODE = 21,    /**< Doing Read-Decode/Filter/
+                                                            Encode-Write operation */
+    M4VSS3GPP_kEditAudioState_TRANSITION    = 22    /**< Transition; blending of two audio */
+}
+M4VSS3GPP_EditAudioState;
+
+
+/**
+ ******************************************************************************
+ * enum            M4VSS3GPP_ClipStatus
+ * @brief        Status of the clip.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VSS3GPP_kClipStatus_READ            = 0,    /**< The clip is currently ready for reading */
+    M4VSS3GPP_kClipStatus_DECODE        = 1,    /**< The clip is currently ready for decoding */
+    M4VSS3GPP_kClipStatus_DECODE_UP_TO    = 2        /**< The clip is currently in splitted
+                                                         decodeUpTo() processing */
+}
+M4VSS3GPP_ClipStatus;
+
+
+/**
+ ******************************************************************************
+ * enum            M4VSS3GPP_ClipCurrentEffect
+ * @brief        Current effect applied to the clip.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VSS3GPP_kClipCurrentEffect_NONE    = 0,    /**< None */
+    M4VSS3GPP_kClipCurrentEffect_BEGIN    = 1,    /**< Begin effect currently applied */
+    M4VSS3GPP_kClipCurrentEffect_END    = 2        /**< End effect currently applied */
+}
+M4VSS3GPP_ClipCurrentEffect;
+
+
+/**
+ ******************************************************************************
+ * enum            M4VSS3GPP_AudioMixingState
+ * @brief        Main state machine of the VSS audio mixing operation.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VSS3GPP_kAudioMixingState_VIDEO = 0,            /**< Video is being processed */
+    M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT,  /**< Audio is being processed */
+    M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT, /**< Audio is being processed */
+    M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT,  /**< Audio is being processed */
+    M4VSS3GPP_kAudioMixingState_FINISHED              /**< Processing finished, user must now
+                                                            call M4VSS3GPP_audioMixingCleanUp*/
+}
+M4VSS3GPP_AudioMixingState;
+
+
+/**
+ ******************************************************************************
+ * enum            M4VSS3GPP_ExtractPictureState
+ * @brief        Main state machine of the VSS picture extraction.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4VSS3GPP_kExtractPictureState_OPENED   = 0,  /**< Video clip is opened and ready to be read
+                                                     until the RAP before the picture to extract */
+    M4VSS3GPP_kExtractPictureState_PROCESS    = 1,  /**< Video is decoded from the previous RAP
+                                                        to the picture to extract */
+    M4VSS3GPP_kExtractPictureState_EXTRACTED= 2   /**< Video AU has been  decoded, user must now
+                                                        call M4VSS3GPP_extractPictureCleanUp */
+}
+M4VSS3GPP_ExtractPictureState;
+
+
+/**
+ ******************************************************************************
+ * @brief        Codecs registration same as in VPS and VES, so less mapping
+ *              is required toward VSS api types
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4WRITER_GlobalInterface*    pGlobalFcts;    /**< open, close, setoption,etc... functions */
+    M4WRITER_DataInterface*        pDataFcts;        /**< data manipulation functions */
+} M4VSS3GPP_WriterInterface;
+/**
+ ******************************************************************************
+ * struct AAC_DEC_STREAM_PROPS
+ * @brief AAC Stream properties
+ * @Note aNoChan and aSampFreq are used for parsing even the user parameters
+ *        are different.  User parameters will be input for the output behaviour
+ *        of the decoder whereas for parsing bitstream properties are used.
+ ******************************************************************************
+ */
+typedef struct {
+  M4OSA_Int32 aAudioObjectType;     /**< Audio object type of the stream - in fact
+                                         the type found in the Access Unit parsed */
+  M4OSA_Int32 aNumChan;             /**< number of channels (=1(mono) or =2(stereo))
+                                         as indicated by input bitstream*/
+  M4OSA_Int32 aSampFreq;            /**< sampling frequency in Hz */
+  M4OSA_Int32 aExtensionSampFreq;   /**< extended sampling frequency in Hz, = 0 is
+                                         no extended frequency */
+  M4OSA_Int32 aSBRPresent;          /**< presence=1/absence=0 of SBR */
+  M4OSA_Int32 aPSPresent;           /**< presence=1/absence=0 of PS */
+  M4OSA_Int32 aMaxPCMSamplesPerCh;  /**< max number of PCM samples per channel */
+} AAC_DEC_STREAM_PROPS;
+
+
+/**
+ ******************************************************************************
+ * enum            M4VSS3GPP_MediaAndCodecCtxt
+ * @brief        Filesystem and codec registration function pointers
+ ******************************************************************************
+*/
+typedef struct {
+    /**
+      * Media and Codec registration */
+    /**< Table of M4VES_WriterInterface structures for avalaible Writers list */
+    M4VSS3GPP_WriterInterface    WriterInterface[M4WRITER_kType_NB];
+    /**< open, close, setoption,etc... functions of the used writer*/
+    M4WRITER_GlobalInterface*    pWriterGlobalFcts;
+    /**< data manipulation functions of the used writer */
+    M4WRITER_DataInterface*        pWriterDataFcts;
+
+    /**< Table of M4ENCODER_GlobalInterface structures for avalaible encoders list */
+    M4ENCODER_GlobalInterface*    pVideoEncoderInterface[M4ENCODER_kVideo_NB];
+    /**< Functions of the used encoder */
+    M4ENCODER_GlobalInterface*    pVideoEncoderGlobalFcts;
+
+    M4OSA_Void*                    pVideoEncoderExternalAPITable[M4ENCODER_kVideo_NB];
+    M4OSA_Void*                    pCurrentVideoEncoderExternalAPI;
+    M4OSA_Void*                    pVideoEncoderUserDataTable[M4ENCODER_kVideo_NB];
+    M4OSA_Void*                    pCurrentVideoEncoderUserData;
+
+    /**< Table of M4ENCODER_AudioGlobalInterface structures for avalaible encoders list */
+    M4ENCODER_AudioGlobalInterface*    pAudioEncoderInterface[M4ENCODER_kAudio_NB];
+    /**< Table of internal/external flags for avalaible encoders list */
+    M4OSA_Bool                      pAudioEncoderFlag[M4ENCODER_kAudio_NB];
+    /**< Functions of the used encoder */
+    M4ENCODER_AudioGlobalInterface*    pAudioEncoderGlobalFcts;
+
+    M4READER_GlobalInterface*   m_pReaderGlobalItTable[M4READER_kMediaType_NB];
+    M4READER_DataInterface*     m_pReaderDataItTable[M4READER_kMediaType_NB];
+    M4READER_GlobalInterface*   m_pReader;
+    M4READER_DataInterface*     m_pReaderDataIt;
+    M4OSA_UInt8                 m_uiNbRegisteredReaders;
+
+    M4DECODER_VideoInterface*   m_pVideoDecoder;
+    M4DECODER_VideoInterface*   m_pVideoDecoderItTable[M4DECODER_kVideoType_NB];
+    M4OSA_UInt8                 m_uiNbRegisteredVideoDec;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    M4OSA_Void*                    m_pCurrentVideoDecoderUserData;
+    M4OSA_Void*                    m_pVideoDecoderUserDataTable[M4DECODER_kVideoType_NB];
+#endif
+
+    M4AD_Interface*             m_pAudioDecoder;
+    M4AD_Interface*                m_pAudioDecoderItTable[M4AD_kType_NB];
+    /**< store indices of external decoders */
+    M4OSA_Bool                    m_pAudioDecoderFlagTable[M4AD_kType_NB];
+
+    M4OSA_Void*                pAudioEncoderUserDataTable[M4ENCODER_kAudio_NB];
+    M4OSA_Void*                pCurrentAudioEncoderUserData;
+
+    M4OSA_Void*                pAudioDecoderUserDataTable[M4AD_kType_NB];
+    M4OSA_Void*                pCurrentAudioDecoderUserData;
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+    /* boolean to tell whether registered external OMX codecs should be freed during cleanup
+     or new codec registration*/
+    M4OSA_Bool    bAllowFreeingOMXCodecInterface;
+#endif
+
+
+} M4VSS3GPP_MediaAndCodecCtxt;
+
+
+/**
+ ******************************************************************************
+ * structure    M4VSS3GPP_ClipContext
+ * @brief        This structure contains informations related to one 3GPP clip (private)
+ * @note        This structure is used to store the context related to one clip
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4VSS3GPP_ClipSettings*        pSettings;            /**< Pointer to the clip settings
+                                                            (not possessed) */
+
+    M4VSS3GPP_ClipStatus        Vstatus;            /**< Video status of the clip reading */
+    M4VSS3GPP_ClipStatus        Astatus;            /**< Audio status of the clip reading */
+
+    M4OSA_Int32                    iVoffset;            /**< [Milliseconds] Offset between the
+                                                            clip and the output video stream
+                                                            (begin cut taken into account) */
+    M4OSA_Int32                    iAoffset;           /**< [Timescale] Offset between the clip
+                                                            and the output audio stream (begin
+                                                            cut taken into account) */
+
+    /**
+     * 3GPP reader Stuff */
+    M4OSA_FileReadPointer*        pFileReadPtrFct;
+    M4OSA_Context                pReaderContext;         /**< Context of the 3GPP reader module */
+    M4_VideoStreamHandler*        pVideoStream;        /**< Description of the read video stream */
+    M4_AudioStreamHandler*        pAudioStream;        /**< Description of the read audio stream */
+    M4_AccessUnit                VideoAU;            /**< Read video access unit (we do not use a
+                                                            pointer to allocate later, because
+                                                            most of the time we will need it) */
+    M4_AccessUnit                AudioAU;            /**< Read audio access unit (we do not use a
+                                                         pointer to allocate later, because most
+                                                         of the time we will need it) */
+    M4OSA_Bool                    bVideoAuAvailable;    /**< Tell if a video AU is available
+                                                            (previously read) */
+    /**< Boolean only used to fix the BZZ bug... */
+    M4OSA_Bool                    bFirstAuWritten;
+
+    /**
+     * Video decoder stuff */
+    M4OSA_Context                pViDecCtxt;            /**< Video decoder context */
+    M4OSA_Int32                 iVideoDecCts;       /**< [Milliseconds] For video decodeUpTo(),
+                                                             the actual reached cts */
+    M4OSA_Int32                    iVideoRenderCts;    /**< [Milliseconds] For video render(),
+                                                             the actual reached cts */
+    M4OSA_Bool                    isRenderDup;        /**< To handle duplicate frame rendering in
+                                                             case of external decoding */
+    M4VIFI_ImagePlane*            lastDecodedPlane;    /**< Last decoded plane */
+
+    /**
+     * MPEG4 time info stuff at clip level */
+    M4OSA_Bool             bMpeg4GovState;            /**< Namely, update or initialization */
+    M4OSA_UInt32           uiMpeg4PrevGovValueGet;    /**< Previous Gov value read (in second) */
+    M4OSA_UInt32           uiMpeg4PrevGovValueSet;    /**< Previous Gov value write (in second) */
+
+    /**
+     * Time-line stuff */
+     /**< [Milliseconds] CTS at which the video clip actually starts */
+    M4OSA_Int32                    iActualVideoBeginCut;
+    /**< [Milliseconds] CTS at which the audio clip actually starts */
+    M4OSA_Int32                    iActualAudioBeginCut;
+    /**< [Milliseconds] Time at which the clip must end */
+    M4OSA_Int32                    iEndTime;
+
+    /**
+     * Audio decoder stuff */
+    M4OSA_Context                pAudioDecCtxt;        /**< Context of the AMR decoder */
+    M4AD_Buffer                 AudioDecBufferIn;    /**< Input structure for the audio decoder */
+    M4AD_Buffer                    AudioDecBufferOut;    /**< Buffer for the decoded PCM data */
+    AAC_DEC_STREAM_PROPS        AacProperties;      /**< Structure for new api to get AAC
+                                                            properties */
+
+    /**
+     * Audio AU to Frame split stuff */
+    M4OSA_Bool                bAudioFrameAvailable;  /**< True if an audio frame is available */
+    M4OSA_MemAddr8            pAudioFramePtr;        /**< Pointer to the Audio frame */
+    M4OSA_UInt32              uiAudioFrameSize;        /**< Size of the audio frame available */
+    M4OSA_Int32               iAudioFrameCts;       /**< [Timescale] CTS of the audio frame
+                                                            available */
+
+    /**
+     * Silence frame stuff */
+     /**< Size to reserve to store a pcm full of zeros compatible with master clip stream type */
+    M4OSA_UInt32                uiSilencePcmSize;
+    /**< Pointer to silence frame data compatible with master clip stream type */
+    M4OSA_UInt8*                pSilenceFrameData;
+    /**< Size of silence frame data compatible with master clip stream type */
+    M4OSA_UInt32                uiSilenceFrameSize;
+    /**< [Timescale] Duration of silence frame data compatible with master clip stream type */
+    M4OSA_Int32                 iSilenceFrameDuration;
+    M4OSA_Double                scale_audio;            /**< frequency / 1000.0 */
+
+    /**
+     * Interfaces of the used modules */
+     /**< Filesystem and shell reader, decoder functions */
+    M4VSS3GPP_MediaAndCodecCtxt ShellAPI;
+} M4VSS3GPP_ClipContext;
+
+
+/**
+ ******************************************************************************
+ * enum            anonymous enum
+ * @brief        enum to keep track of the encoder state
+ ******************************************************************************
+*/
+enum
+{
+    M4VSS3GPP_kNoEncoder,
+    M4VSS3GPP_kEncoderClosed,
+    M4VSS3GPP_kEncoderStopped,
+    M4VSS3GPP_kEncoderRunning
+};
+
+/**
+ ******************************************************************************
+ * structure    M4VSS3GPP_AudioVideoContext
+ * @brief        This structure defines the audio video context (private)
+ * @note        This structure is used for all audio/video, encoding/writing operations.
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**
+     * Timing Stuff */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    /**< [Milliseconds] Duration of the output file, used for progress computation */
+    M4OSA_Double                dInputVidCts;
+    /**< [Milliseconds] Current CTS of the video output stream */
+    M4OSA_Double                dOutputVidCts;
+/**< [Milliseconds] Current CTS of the audio output stream */
+    M4OSA_Double                dATo;
+     /**< [Milliseconds] Duration of the output file, used for progress computation */
+    M4OSA_Int32                    iOutputDuration;
+
+    /**
+     * Output Video Stream Stuff */
+    M4SYS_StreamType            VideoStreamType;        /**< Output video codec */
+    M4OSA_UInt32                uiVideoBitrate;     /**< Average video bitrate of the output file,
+                                                         computed from input bitrates, durations,
+                                                          transitions and cuts */
+    M4OSA_UInt32                uiVideoWidth;            /**< Output image width */
+    M4OSA_UInt32                uiVideoHeight;            /**< Output image height */
+    M4OSA_UInt32                uiVideoTimeScale;        /**< Time scale to use for the encoding
+                                                            of the transition (if MPEG-4) */
+    M4OSA_Bool                    bVideoDataPartitioning;    /**< Data partitioning to use for the
+                                                                 encoding of the transition
+                                                                 (if MPEG-4) */
+    M4OSA_MemAddr8                pVideoOutputDsi;        /**< Decoder Specific Info of the output
+                                                                 MPEG-4 track */
+    M4OSA_UInt16                uiVideoOutputDsiSize;    /**< Size of the Decoder Specific Info
+                                                                of the output MPEG-4 track */
+    M4OSA_Bool                  bActivateEmp;           /**< Encode in Mpeg4 format with
+                                                            limitations for EMP */
+
+    /**
+     * Output Audio Stream Stuff */
+    M4SYS_StreamType            AudioStreamType;        /**< Type of the output audio stream */
+    M4OSA_UInt32                uiNbChannels;           /**< Number of channels in the output
+                                                            stream (1=mono, 2=stereo) */
+    M4OSA_UInt32                uiAudioBitrate;         /**< Audio average bitrate (in bps) */
+    M4OSA_UInt32                uiSamplingFrequency;    /**< Sampling audio frequency (8000 for
+                                                                amr, 16000 or more for aac) */
+    M4OSA_MemAddr8                pAudioOutputDsi;        /**< Decoder Specific Info of the
+                                                                output audio track */
+    M4OSA_UInt16                uiAudioOutputDsiSize;    /**< Size of the Decoder Specific Info
+                                                                of the output audio track */
+
+    /**
+     * Audio Encoder stuff */
+    M4OSA_Context                   pAudioEncCtxt;        /**< Context of the audio encoder */
+    M4ENCODER_AudioDecSpecificInfo  pAudioEncDSI;       /**< Decoder specific info built by the
+                                                                encoder */
+    M4ENCODER_AudioParams           AudioEncParams;     /**< Config of the audio encoder */
+
+    /**
+     * Silence frame stuff */
+    M4OSA_UInt32                uiSilencePcmSize;       /**< Size to reserve to store a pcm full
+                                                             of zeros compatible with master clip
+                                                             stream type */
+    M4OSA_UInt8*                pSilenceFrameData;      /**< Pointer to silence frame data
+                                                                compatible with master clip
+                                                                stream type */
+    M4OSA_UInt32                uiSilenceFrameSize;     /**< Size of silence frame data compatible
+                                                             with master clip stream type */
+    M4OSA_Int32                 iSilenceFrameDuration;  /**< [Timescale] Duration of silence frame
+                                                                 data compatible with master clip
+                                                                 stream type */
+    M4OSA_Double                scale_audio;            /**< frequency / 1000.0 */
+
+    /**
+     * Video Encoder stuff */
+    M4ENCODER_Context            pEncContext;            /**< Context of the encoder */
+    M4WRITER_DataInterface        OurWriterDataInterface;    /**< Our own implementation of the
+                                                                    writer interface, to give to
+                                                                    the encoder shell */
+    M4OSA_MemAddr32                pDummyAuBuffer;            /**< Buffer given to the encoder for
+                                                                   it to write AUs we don't want
+                                                                    in the output */
+    M4OSA_Int32                    iMpeg4GovOffset;        /**< Clip GOV offset in ms between
+                                                                 video and system time */
+    M4OSA_ERR                    VppError;                /**< Error for VPP are masked by Video
+                                                               Encoder, so we must remember it */
+    M4OSA_UInt32                encoderState;
+
+    /**
+     * Writer stuff */
+    M4WRITER_Context            p3gpWriterContext;        /**< Context of the 3GPP writer module */
+    M4SYS_StreamDescription        WriterVideoStream;        /**< Description of the written
+                                                                    video stream */
+    M4SYS_StreamDescription        WriterAudioStream;        /**< Description of the written
+                                                                    audio stream */
+    M4WRITER_StreamVideoInfos    WriterVideoStreamInfo;    /**< Video properties of the written
+                                                                     video stream */
+    M4WRITER_StreamAudioInfos    WriterAudioStreamInfo;    /**< Audio properties of the written
+                                                                    audio stream */
+    M4SYS_AccessUnit            WriterVideoAU;            /**< Written video access unit */
+    M4SYS_AccessUnit            WriterAudioAU;            /**< Written audio access unit */
+    M4OSA_UInt32                uiVideoMaxAuSize;        /**< Max AU size set to the writer
+                                                                for the video */
+    M4OSA_UInt32                uiAudioMaxAuSize;        /**< Max AU size set to the writer
+                                                                for the audio */
+    M4OSA_UInt32                uiOutputAverageVideoBitrate; /**< Average video bitrate of the
+                                                                    output file, computed from
+                                                                    input bitrates, durations,
+                                                                    transitions and cuts */
+
+} M4VSS3GPP_EncodeWriteContext;
+
+
+/**
+ ******************************************************************************
+ * structure    M4VSS3GPP_InternalEditContext
+ * @brief        This structure defines the edit VSS context (private)
+ * @note        This structure is used for all VSS edit operations to store the context
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**
+     * VSS 3GPP main variables */
+    M4VSS3GPP_EditState         State;                    /**< VSS internal state */
+    M4VSS3GPP_EditVideoState    Vstate;
+    M4VSS3GPP_EditAudioState    Astate;
+
+    /**
+     * User Settings (copied, thus owned by VSS3GPP) */
+    M4OSA_UInt8                        uiClipNumber;        /**< Number of element of the clip
+                                                                 list pClipList. */
+    M4VSS3GPP_ClipSettings           *pClipList;            /**< List of the input clips settings
+                                                            Array of uiClipNumber clip settings */
+    M4VSS3GPP_TransitionSettings   *pTransitionList;    /**< List of the transition settings.
+                                                    Array of uiClipNumber-1 transition settings */
+    M4VSS3GPP_EffectSettings       *pEffectsList;        /**< List of the effects settings.
+                                                             Array of nbEffects RC */
+    M4OSA_UInt8                       *pActiveEffectsList;    /**< List of the active effects
+                                                                settings. Array of nbEffects RC */
+    M4OSA_UInt8                        nbEffects;            /**< Numbers of effects RC */
+    M4OSA_UInt8                        nbActiveEffects;    /**< Numbers of active effects RC */
+
+    /**
+     * Input Stuff */
+    M4OSA_UInt8                        uiCurrentClip;        /**< Index of the current clip 1 in
+                                                                    the input clip list */
+    M4VSS3GPP_ClipContext*            pC1;                /**< Context of the current clip 1 */
+    M4VSS3GPP_ClipContext*            pC2;                /**< Context of the current clip 2 */
+
+    /**
+     * Decoder stuff */
+    M4OSA_Double                dOutputFrameDuration;    /**< [Milliseconds] directly related to
+                                                                 output frame rate */
+    M4VIFI_ImagePlane            yuv1[3];            /**< First temporary YUV420 image plane */
+    M4VIFI_ImagePlane            yuv2[3];            /**< Second temporary YUV420 image plane */
+    M4VIFI_ImagePlane            yuv3[3];            /**< Third temporary YUV420 image plane RC */
+    M4VIFI_ImagePlane            yuv4[3];            /**< Fourth temporary YUV420 image plane RC */
+
+    /**
+     * Effect stuff */
+    M4OSA_Bool                    bClip1AtBeginCut;        /**< [Milliseconds] The clip1 is at
+                                                                its begin cut */
+    M4OSA_Int8                    iClip1ActiveEffect;        /**< The index of the active effect
+                                                                    on Clip1 (<0 means none)
+                                                                    (used for video and audio but
+                                                                     not simultaneously) */
+    M4OSA_Int8                    iClip2ActiveEffect;        /**< The index of the active effect
+                                                                 on Clip2 (<0 means none)
+                                                                 (used for video and audio but
+                                                                 not simultaneously) */
+    M4OSA_Bool                    bTransitionEffect;        /**< True if the transition effect
+                                                                 must be applied at the current
+                                                                 time */
+
+    /**
+     * Encoding and Writing operations */
+    M4OSA_Bool                      bSupportSilence;    /**< Flag to know if the output stream can
+                                                             support silence (even if not editable,
+                                                              for example AAC+, but not EVRC) */
+    M4VSS3GPP_EncodeWriteContext    ewc;                /**< Audio and video encode/write stuff */
+    M4OSA_Bool                        bIsMMS;                /**< Boolean used to know if we are
+                                                                processing a file with an output
+                                                                size constraint */
+    M4OSA_UInt32                    uiMMSVideoBitrate;    /**< If in MMS mode,
+                                                                 targeted video bitrate */
+    M4VIDEOEDITING_VideoFramerate    MMSvideoFramerate;    /**< If in MMS mode,
+                                                                 targeted video framerate */
+
+    /**
+     * Filesystem functions */
+    M4OSA_FileReadPointer*        pOsaFileReadPtr;     /**< OSAL file read functions,
+                                                             to be provided by user */
+    M4OSA_FileWriterPointer*    pOsaFileWritPtr;     /**< OSAL file write functions,
+                                                             to be provided by user */
+
+    /**
+     * Interfaces of the used modules */
+    M4VSS3GPP_MediaAndCodecCtxt         ShellAPI;           /**< Filesystem and shell reader,
+                                                                 decoder functions */
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    struct
+    {
+        M4VD_Interface*    pDecoderInterface;
+        M4OSA_Void*        pUserData;
+        M4OSA_Bool        registered;
+    } registeredExternalDecs[M4VD_kVideoType_NB];
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+    M4OSA_Context        m_codecInterface[M4VSS3GPP_kCodecType_NB];
+    M4OSA_Context        pOMXUserData;
+#endif
+    M4OSA_Bool               bIssecondClip;
+    M4OSA_UInt8              *pActiveEffectsList1;  /**< List of the active effects settings. Array of nbEffects RC */
+    M4OSA_UInt8              nbActiveEffects1;  /**< Numbers of active effects RC */
+} M4VSS3GPP_InternalEditContext;
+
+
+/**
+ ******************************************************************************
+ * structure    M4VSS3GPP_InternalAudioMixingContext
+ * @brief        This structure defines the audio mixing VSS 3GPP context (private)
+ * @note        This structure is used for all VSS 3GPP audio mixing operations to store
+ *                the context
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**
+     *    VSS main variables */
+    M4VSS3GPP_AudioMixingState State;                    /**< VSS audio mixing internal state */
+
+    /**
+     * Internal copy of the input settings */
+    M4OSA_Int32                iAddCts;                 /**< [Milliseconds] Time, in milliseconds,
+                                                             at which the added audio track is
+                                                              inserted */
+    M4OSA_UInt32               uiBeginLoop;                /**< Describes in milli-second the
+                                                                start time of the loop */
+    M4OSA_UInt32               uiEndLoop;                /**< Describes in milli-second the end
+                                                            time of the loop (0 means no loop) */
+    M4OSA_Bool                 bRemoveOriginal;            /**< If true, the original audio track
+                                                                is not taken into account */
+
+    /**
+     * Input audio/video file */
+    M4VSS3GPP_ClipSettings        InputClipSettings;        /**< Structure internally used to
+                                                                 manage the input 3GPP settings */
+    M4VSS3GPP_ClipContext*        pInputClipCtxt;           /**< Context of the input 3GPP clip */
+
+    /**
+     * Added audio file stuff */
+    M4VSS3GPP_ClipSettings        AddedClipSettings;        /**< Structure internally used to
+                                                                    manage the added settings */
+    M4VSS3GPP_ClipContext*        pAddedClipCtxt;           /**< Context of the added 3GPP clip */
+
+    /**
+     * Audio stuff */
+    M4OSA_Float                    fOrigFactor;            /**< Factor to apply to the original
+                                                                audio track for the mixing */
+    M4OSA_Float                    fAddedFactor;            /**< Factor to apply to the added
+                                                                    audio track for the mixing */
+    M4OSA_Bool                  bSupportSilence;        /**< Flag to know if the output stream can
+                                                             support silence (even if not editable,
+                                                              for example AAC+, but not EVRC) */
+    M4OSA_Bool                  bHasAudio;              /**< Flag to know if we have to delete
+                                                            audio track */
+    M4OSA_Bool                  bAudioMixingIsNeeded;  /**< Flag to know if we have to do mixing */
+
+    /**
+     * Encoding and Writing operations */
+    M4VSS3GPP_EncodeWriteContext    ewc;                /**< Audio and video encode/write stuff */
+
+    /**
+     * Filesystem functions */
+    M4OSA_FileReadPointer*        pOsaFileReadPtr;     /**< OSAL file read functions,
+                                                             to be provided by user */
+    M4OSA_FileWriterPointer*    pOsaFileWritPtr;     /**< OSAL file write functions,
+                                                            to be provided by user */
+
+    /**
+     * Interfaces of the used modules */
+    M4VSS3GPP_MediaAndCodecCtxt ShellAPI;               /**< Filesystem and shell reader,
+                                                                 decoder functions */
+
+    /**
+     * Sample Rate Convertor (SSRC) stuff (needed in case of mixing with != ASF/nb of channels) */
+    M4OSA_Bool                  b_SSRCneeded;        /**< If true, SSRC is needed
+                                                            (!= ASF or nb of channels) */
+    M4OSA_UInt8                 ChannelConversion;    /**< 1=Conversion from Mono to Stereo
+                                                             2=Stereo to Mono, 0=no conversion */
+    SSRC_Instance_t             SsrcInstance;        /**< Context of the Ssrc */
+    SSRC_Scratch_t*             SsrcScratch;        /**< Working memory of the Ssrc */
+    short                       iSsrcNbSamplIn;    /**< Number of sample the Ssrc needs as input */
+    short                       iSsrcNbSamplOut;    /**< Number of sample the Ssrc outputs */
+    M4OSA_MemAddr8              pSsrcBufferIn;        /**< Input of the SSRC */
+    M4OSA_MemAddr8              pSsrcBufferOut;        /**< Output of the SSRC */
+    M4OSA_MemAddr8              pPosInSsrcBufferIn;    /**< Position into the SSRC in buffer */
+    M4OSA_MemAddr8              pPosInSsrcBufferOut;/**< Position into the SSRC out buffer */
+    M4OSA_MemAddr8              pTempBuffer;        /**< Temporary buffer */
+    M4OSA_MemAddr8              pPosInTempBuffer;    /**< Position in temporary buffer */
+    M4OSA_UInt32                minimumBufferIn;    /**< Minimum amount of decoded data to be
+                                                            processed by SSRC and channel
+                                                             convertor */
+    M4OSA_Bool                  b_DuckingNeedeed;
+    M4OSA_Int32                 InDucking_threshold;  /**< Threshold value at which background
+                                                                 music shall duck */
+    M4OSA_Float                 InDucking_lowVolume;  /**< lower the background track to this
+                                                                factor and increase the primary
+                                                                track to inverse of this factor */
+    M4OSA_Float                 lowVolume;
+    M4OSA_Int32                 audioVolumeArray[WINDOW_SIZE]; // store peak audio vol. level
+                                                                  // for duration for WINDOW_SIZE
+    M4OSA_Int32                 audVolArrIndex;
+    M4OSA_Float                 duckingFactor ;     /**< multiply by this factor to bring
+                                                             FADE IN/FADE OUT effect */
+    M4OSA_Float                 fBTVolLevel;
+    M4OSA_Float                 fPTVolLevel;
+    M4OSA_Bool                  bDoDucking;
+    M4OSA_Bool                  bLoop;
+    M4OSA_Bool                  bNoLooping;
+    M4OSA_Int32                 pLVAudioResampler;
+    M4OSA_Bool                  bjumpflag;
+
+} M4VSS3GPP_InternalAudioMixingContext;
+
+
+/**
+ ******************************************************************************
+ * structure    M4VSS3GPP_InternalExtractPictureContext
+ * @brief        This structure defines the extract picture VSS context (private)
+ * @note        This structure is used for all VSS picture extractions to store the context
+ ******************************************************************************
+*/
+typedef struct
+{
+    /**
+     *    VSS main variables */
+    M4VSS3GPP_ExtractPictureState State;                /**< VSS extract pictureinternal state */
+
+    /**
+     * Input files */
+    M4VSS3GPP_ClipSettings        ClipSettings;            /**< Structure internally used to
+                                                                manage the input 3FPP settings */
+    M4VSS3GPP_ClipContext*        pInputClipCtxt;           /**< Context of the input 3GPP clip */
+
+    /**
+     * Settings */
+    M4OSA_Int32                    iExtractCts;            /**< [Milliseconds] Cts of the AU
+                                                                to be extracted */
+
+    /**
+     * Video stuff */
+    M4VIFI_ImagePlane            decPlanes[3];            /**< Decoded YUV420 picture plane */
+    M4OSA_UInt32                uiVideoWidth;            /**< Decoded image width */
+    M4OSA_UInt32                uiVideoHeight;            /**< Decoded image height */
+
+    /*
+     * Decoder info */
+    M4OSA_Int32                iDecCts;      /**< [Milliseconds] Decoded AU Cts */
+    M4OSA_Bool                 bJumpFlag;     /**< 1 if a jump has been made */
+    M4OSA_Int32                iDeltaTime;   /**< [Milliseconds] Time between previous RAP and
+                                                     picture to extract */
+    M4OSA_Int32                iGap;         /**< [Milliseconds] Time between jump AU and
+                                                    extraction time */
+    M4OSA_UInt32               uiStep;          /**< [Milliseconds] Progress bar time increment */
+
+    /**
+     * Filesystem functions */
+     /**< OSAL file read functions, to be provided by user */
+    M4OSA_FileReadPointer*        pOsaFileReadPtr;
+    /**< OSAL file write functions, to be provided by user */
+    M4OSA_FileWriterPointer*    pOsaFileWritPtr;
+
+    M4OSA_Bool                    bClipOpened;
+} M4VSS3GPP_InternalExtractPictureContext;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4VSS3GPP_INTERNALTYPES_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4xVSS_API.h b/libvideoeditor/vss/inc/M4xVSS_API.h
new file mode 100755
index 0000000..d69a17c
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4xVSS_API.h
@@ -0,0 +1,633 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4XVSS_API_H__
+#define __M4XVSS_API_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/**
+ ******************************************************************************
+ * @file    M4xVSS_API.h
+ * @brief    API of Video Studio 2.1
+ * @note
+ ******************************************************************************
+*/
+
+#define M4VSS_SUPPORT_EXTENDED_FEATURES
+
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_Extended_API.h"
+
+/* Errors codes */
+
+/**
+ * End of analyzing => the user can call M4xVSS_PreviewStart or M4xVSS_SaveStart */
+#define M4VSS3GPP_WAR_ANALYZING_DONE                  M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0001)
+
+/**
+ * End of preview generating => the user can launch vps to see preview. Once preview is over,
+   the user must call M4xVSS_PreviewStop() to be able to save edited file, or to call another
+   M4xVSS_SendCommand() */
+#define M4VSS3GPP_WAR_PREVIEW_READY                   M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0002)
+
+/**
+ * End of saved file generation => the user must call M4xVSS_SaveStop() */
+#define M4VSS3GPP_WAR_SAVING_DONE                     M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0003)
+
+/**
+ * Transcoding is necessary to go further -> if the user does not want to continue,
+  he must call M4xVSS_sendCommand() */
+#define M4VSS3GPP_WAR_TRANSCODING_NECESSARY           M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0004)
+
+/**
+ * In case of MMS, the output file size won't be reached */
+#define M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED           M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0005)
+
+/**
+ * JPG input file dimensions are too high */
+#define M4VSS3GPP_ERR_JPG_TOO_BIG                     M4OSA_ERR_CREATE( M4_ERR, M4VS, 0x0001)
+
+/**
+ * UTF Conversion, warning on the size of the temporary converted buffer*/
+#define M4xVSSWAR_BUFFER_OUT_TOO_SMALL                M4OSA_ERR_CREATE( M4_WAR, M4VS, 0x0006)
+
+/**
+ * SWIKAR :Error whan NO_MORE_SPACE*/
+#define M4xVSSERR_NO_MORE_SPACE                       M4OSA_ERR_CREATE( M4_ERR, M4VS, 0x0007)
+
+/**
+ ******************************************************************************
+ * enum     M4xVSS_VideoEffectType
+ * @brief   This enumeration defines the video effect types of the xVSS
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4xVSS_kVideoEffectType_BlackAndWhite = M4VSS3GPP_kVideoEffectType_External+1, /* 257 */
+    M4xVSS_kVideoEffectType_Pink,                                                  /* 258 */
+    M4xVSS_kVideoEffectType_Green,                                                 /* 259 */
+    M4xVSS_kVideoEffectType_Sepia,                                                 /* 260 */
+    M4xVSS_kVideoEffectType_Negative,                                              /* 261 */
+    M4xVSS_kVideoEffectType_Framing,                                               /* 262 */
+    M4xVSS_kVideoEffectType_Text, /* Text overlay */                               /* 263 */
+    M4xVSS_kVideoEffectType_ZoomIn,                                                /* 264 */
+    M4xVSS_kVideoEffectType_ZoomOut,                                               /* 265 */
+    M4xVSS_kVideoEffectType_Fifties,                                                /*266 */
+    M4xVSS_kVideoEffectType_ColorRGB16,                                                /*267 */
+    M4xVSS_kVideoEffectType_Gradient                                                /*268*/
+} M4xVSS_VideoEffectType;
+
+/**
+ ******************************************************************************
+ * enum     M4xVSS_VideoTransitionType
+ * @brief   This enumeration defines the video effect that can be applied during a transition.
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4xVSS_kVideoTransitionType_External = M4VSS3GPP_kVideoTransitionType_External, /*256*/
+    M4xVSS_kVideoTransitionType_AlphaMagic,
+    M4xVSS_kVideoTransitionType_SlideTransition,
+    M4xVSS_kVideoTransitionType_FadeBlack
+
+} M4xVSS_VideoTransitionType;
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_PreviewSettings
+ * @brief    This structure gathers all the information needed by the VPS for preview
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Void                                *p3gpPreviewFile;
+    M4OSA_Void                                *pPCMFile;
+    M4VIDEOEDITING_AudioSamplingFrequency    outPCM_ASF;
+    M4OSA_Bool                                bAudioMono;
+    M4VSS3GPP_EffectSettings                   *Effects;
+    M4OSA_UInt8                                nbEffects;
+
+} M4xVSS_PreviewSettings;
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_toUTF8Fct
+ * @brief        This prototype defines the function implemented by the integrator
+ *                to convert a string encoded in any format to an UTF8 string.
+ * @note
+ *
+ * @param    pBufferIn        IN            Buffer containing the string to convert to UTF8
+ * @param    pBufferOut        IN            Buffer containing the UTF8 converted string
+ * @param    bufferOutSize    IN/OUT    IN:     Size of the given output buffer
+ *                                    OUT: Size of the converted buffer
+ *
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (*M4xVSS_toUTF8Fct)
+(
+    M4OSA_Void            *pBufferIn,
+    M4OSA_UInt8            *pBufferOut,
+    M4OSA_UInt32        *bufferOutSize
+);
+
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_fromUTF8Fct
+ * @brief        This prototype defines the function implemented by the integrator
+ *                to convert an UTF8 string to a string encoded in any format.
+ * @note
+ *
+ * @param    pBufferIn        IN            Buffer containing the UTF8 string to convert
+ *                                        to the desired format.
+ * @param    pBufferOut        IN            Buffer containing the converted string
+ * @param    bufferOutSize    IN/OUT    IN:     Size of the given output buffer
+ *                                    OUT: Size of the converted buffer
+ *
+ ******************************************************************************
+*/
+typedef M4OSA_ERR (*M4xVSS_fromUTF8Fct)
+(
+    M4OSA_UInt8            *pBufferIn,
+    M4OSA_Void            *pBufferOut,
+    M4OSA_UInt32        *bufferOutSize
+);
+
+
+
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_InitParams
+ * @brief    This structure defines parameters for xVSS.
+ * @note
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_FileReadPointer*            pFileReadPtr;
+    M4OSA_FileWriterPointer*        pFileWritePtr;
+    M4OSA_Void*                        pTempPath;
+    /*Function pointer on an external text conversion function */
+    M4xVSS_toUTF8Fct                pConvToUTF8Fct;
+    /*Function pointer on an external text conversion function */
+    M4xVSS_fromUTF8Fct                pConvFromUTF8Fct;
+
+
+
+} M4xVSS_InitParams;
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_Init
+ * @brief        This function initializes the xVSS
+ * @note        Initializes the xVSS edit operation (allocates an execution context).
+ *
+ * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
+ * @param    params                (IN) Parameters mandatory for xVSS
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_Init(M4OSA_Context* pContext, M4xVSS_InitParams* params);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_ReduceTranscode
+ * @brief        This function changes the given editing structure in order to
+ *                minimize the transcoding time.
+ * @note        The xVSS analyses this structure, and if needed, changes the
+ *                output parameters (Video codec, video size, audio codec,
+ *                audio nb of channels) to minimize the transcoding time.
+ *
+ * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
+ * @param    pSettings            (IN) Edition settings (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_ReduceTranscode(M4OSA_Context pContext, M4VSS3GPP_EditSettings* pSettings);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_SendCommand
+ * @brief        This function gives to the xVSS an editing structure
+ * @note        The xVSS analyses this structure, and prepare edition
+ *                This function must be called after M4xVSS_Init, after
+ *                M4xVSS_CloseCommand, or after M4xVSS_PreviewStop.
+ *                After this function, the user must call M4xVSS_Step until
+ *                it returns another error than M4NO_ERROR.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    pSettings            (IN) Edition settings (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_SendCommand(M4OSA_Context pContext, M4VSS3GPP_EditSettings* pSettings);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_PreviewStart
+ * @brief        This function prepare the preview
+ * @note        The xVSS create 3GP preview file and fill pPreviewSettings with
+ *                preview parameters.
+ *                This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_ANALYZING_DONE
+ *                After this function, the user must call M4xVSS_Step until
+ *                it returns another error than M4NO_ERROR.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    pPreviewSettings    (IN) Preview settings (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_PreviewStart(M4OSA_Context pContext, M4xVSS_PreviewSettings* pPreviewSettings);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_PreviewStop
+ * @brief        This function unallocate preview ressources and change xVSS
+ *                internal state to allow saving or resend an editing command
+ * @note        This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_PREVIEW_READY
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_PreviewStop(M4OSA_Context pContext);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_SaveStart
+ * @brief        This function prepare the save
+ * @note        The xVSS create 3GP edited final file
+ *                This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_ANALYZING_DONE
+ *                After this function, the user must call M4xVSS_Step until
+ *                it returns another error than M4NO_ERROR.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    pFilePath            (IN) If the user wants to provide a different
+ *                                output filename, else can be NULL (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_SaveStart(M4OSA_Context pContext, M4OSA_Void* pFilePath,
+                            M4OSA_UInt32 filePathSize);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_SaveStop
+ * @brief        This function unallocate save ressources and change xVSS
+ *                internal state.
+ * @note        This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_SAVING_DONE
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_SaveStop(M4OSA_Context pContext);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_Step
+ * @brief        This function executes differents tasks, depending of xVSS
+ *                internal state.
+ * @note        This function:
+ *                    - analyses editing structure if called after M4xVSS_SendCommand
+ *                    - generates preview file if called after M4xVSS_PreviewStart
+ *                    - generates final edited file if called after M4xVSS_SaveStart
+ *
+ * @param    pContext                        (IN) Pointer on the xVSS edit context
+ * @param    pContext                        (OUT) Progress indication from 0 to 100
+ * @return    M4NO_ERROR:                        No error, the user must call M4xVSS_Step again
+ * @return    M4ERR_PARAMETER:                At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:                    This function cannot not be called at this time
+ * @return    M4VSS3GPP_WAR_PREVIEW_READY:    Preview file is generated
+ * @return    M4VSS3GPP_WAR_SAVING_DONE:        Final edited file is generated
+ * @return    M4VSS3GPP_WAR_ANALYZING_DONE:    Analyse is done
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_Step(M4OSA_Context pContext, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_CloseCommand
+ * @brief        This function deletes current editing profile, unallocate
+ *                ressources and change xVSS internal state.
+ * @note        After this function, the user can call a new M4xVSS_SendCommand
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_CloseCommand(M4OSA_Context pContext);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_CleanUp
+ * @brief        This function deletes all xVSS ressources
+ * @note        This function must be called after M4xVSS_CloseCommand.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_CleanUp(M4OSA_Context pContext);
+
+/**
+******************************************************************************
+ * M4OSA_ERR M4xVSS_RegisterExternalVideoDecoder(M4OSA_Context pContext,
+ *                                     M4VD_VideoType decoderType,
+ *                                     M4VD_Interface*    pDecoderInterface,
+ *                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video decoder
+ * @note
+ * @param   pContext           (IN) xVSS context
+ * @param   decoderType        (IN) Type of decoder (MPEG4 ...)
+ * @param   pDecoderInterface  (IN) Decoder interface
+ * @param   pUserData          (IN) Pointer on a user data to give to external decoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        xVSS is not in an appropriate state for this function to be called
+******************************************************************************
+*/
+M4OSA_ERR M4xVSS_RegisterExternalVideoDecoder(M4OSA_Context pContext,
+                                     M4VD_VideoType decoderType,
+                                     M4VD_Interface*    pDecoderInterface,
+                                     M4OSA_Void* pUserData);
+
+/**
+******************************************************************************
+ * M4OSA_ERR M4xVSS_RegisterExternalVideoEncoder(M4OSA_Context pContext,
+ *                                     M4VE_EncoderType encoderType,
+ *                                     M4VE_Interface*    pEncoderInterface,
+ *                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video decoder
+ * @note
+ * @param   pContext           (IN) xVSS context
+ * @param   encoderType        (IN) Type of encoder (MPEG4 ...)
+ * @param   pEncoderInterface  (IN) Encoder interface
+ * @param   pUserData          (IN) Pointer on a user data to give to external encoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        xVSS is not in an appropriate state for this function to be called
+******************************************************************************
+*/
+M4OSA_ERR M4xVSS_RegisterExternalVideoEncoder(M4OSA_Context pContext,
+                                     M4VE_EncoderType encoderType,
+                                     M4VE_Interface*    pEncoderInterface,
+                                     M4OSA_Void* pUserData);
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_GetVersion(M4_VersionInfo *pVersion)
+ * @brief        This function get the version of the Video Studio 2.1
+ *
+ * @param    pVersion            (IN) Pointer on the version info struct
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_GetVersion(M4_VersionInfo *pVersion);
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function apply a color effect on an input YUV420 planar frame
+ * @note    The prototype of this effect function is exposed because it needs to
+ *            called by the VPS during the preview
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectColor
+(
+    M4OSA_Void *pFunctionContext,
+    M4VIFI_ImagePlane *pInputPlanes,
+    M4VIFI_ImagePlane *pOutputPlanes,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiEffectKind
+);
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function add a fixed or animated image on an input YUV420 planar frame
+ * @note    The prototype of this effect function is exposed because it needs to
+ *            called by the VPS during the preview
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming
+(
+    M4OSA_Void *pFunctionContext,
+    M4VIFI_ImagePlane *pInputPlanes,
+    M4VIFI_ImagePlane *pOutputPlanes,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiEffectKind
+);
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function make a video look as if it was taken in the fifties
+ * @note
+ * @param    pUserData       (IN) Context
+ * @param    pPlaneIn        (IN) Input YUV420 planar
+ * @param    pPlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:            No error
+ * @return  M4ERR_PARAMETER:    pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties
+(
+    M4OSA_Void *pUserData,
+    M4VIFI_ImagePlane *pInputPlanes,
+    M4VIFI_ImagePlane *pPlaneOut,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiEffectKind
+);
+
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectZoom(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function add a fixed or animated image on an input YUV420 planar frame
+ * @note    The prototype of this effect function is exposed because it needs to
+ *            called by the VPS during the preview
+ * @param    pFunctionContext(IN) Contains which zoom to apply (In/Out)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+*/
+M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom
+(
+    M4OSA_Void *pFunctionContext,
+    M4VIFI_ImagePlane *pInputPlanes,
+    M4VIFI_ImagePlane *pOutputPlanes,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiEffectKind
+);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_CreateClipSettings()
+ * @brief    Allows filling a clip settings structure with default values
+ *
+ * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ *                   pClipSettings->pFile      will be allocated in this function.
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   pFile               (IN) Clip file name
+ * @param   filePathSize        (IN) Size of the clip path (needed for the UTF16 conversion)
+ * @param    nbEffects           (IN) Nb of effect settings to allocate
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_CreateClipSettings(M4VSS3GPP_ClipSettings *pClipSettings, M4OSA_Void* pFile,
+                                    M4OSA_UInt32 filePathSize, M4OSA_UInt8 nbEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_DuplicateClipSettings()
+ * @brief    Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_DuplicateClipSettings(M4VSS3GPP_ClipSettings *pClipSettingsDest,
+                                         M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+                                         M4OSA_Bool bCopyEffects);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_FreeClipSettings()
+ * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects).
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_FreeClipSettings(M4VSS3GPP_ClipSettings *pClipSettings);
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext)
+ * @brief        This function returns the MCS context within the xVSS internal context
+ * @note        This function must be called only after VSS state has moved to analyzing state
+ *                or beyond
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    mcsContext        (OUT) Pointer to pointer of mcs context to return
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext);
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext,
+ *                                                     M4OSA_Context* mcsContext)
+ * @brief        This function returns the VSS3GPP context within the xVSS internal context
+ * @note        This function must be called only after VSS state has moved to Generating
+ *                preview or beyond
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    vss3gppContext        (OUT) Pointer to pointer of vss3gpp context to return
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+*/
+M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext, M4OSA_Context* vss3gppContext);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+#endif /* __M4XVSS_API_H__ */
+
diff --git a/libvideoeditor/vss/inc/M4xVSS_Internal.h b/libvideoeditor/vss/inc/M4xVSS_Internal.h
new file mode 100755
index 0000000..c0490e6
--- /dev/null
+++ b/libvideoeditor/vss/inc/M4xVSS_Internal.h
@@ -0,0 +1,610 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __M4XVSS_INTERNAL_H__
+#define __M4XVSS_INTERNAL_H__
+
+/**
+ ******************************************************************************
+ * @file    M4xVSS_Internal.h
+ * @brief    Internal of Video Authoring.
+ * @note
+ ******************************************************************************
+*/
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+
+#include "M4PTO3GPP_API.h"
+#include "M4PTO3GPP_ErrorCodes.h"
+
+#include "M4AIR_API.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define M4_xVSS_MAJOR        1
+#define M4_xVSS_MINOR        5
+#define M4_xVSS_REVISION    5
+
+/* The following defines describe the max dimensions of an input JPG */
+#define M4XVSS_MX_JPG_NB_OF_PIXELS    3926016
+
+/*Size of the UTF temporary conversion buffer keep in the VA internal context and
+allocate at the initialization*/
+#define UTF_CONVERSION_BUFFER_SIZE            2048
+
+/** Determine absolute value of a. */
+#define M4xVSS_ABS(a)               ( ( (a) < (0) ) ? (-(a)) : (a) )
+
+/** Y,U,V values in case of black borders rendering */
+#define Y_PLANE_BORDER_VALUE    0x00
+#define U_PLANE_BORDER_VALUE    0x80
+#define V_PLANE_BORDER_VALUE    0x80
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_EffectsAlphaBlending
+ * @brief    Internal effects alpha blending parameters
+ * @note    This structure contains all internal informations to create an alpha
+ *            blending for the effects text and framing
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt8                    m_fadeInTime;        /*Start percentage of Alpha blending*/
+    M4OSA_UInt8                    m_fadeOutTime;        /*Middle percentage of Alpha blending*/
+    M4OSA_UInt8                    m_end;            /*End percentage of Alpha blending*/
+    M4OSA_UInt8                    m_middle;    /*Duration, in percentage of effect duration,
+                                                 of the FadeIn phase*/
+    M4OSA_UInt8                    m_start;    /*Duration, in percentage of effect duration,
+                                                of the FadeOut phase*/
+
+} M4xVSS_internalEffectsAlphaBlending;
+
+/**
+ ******************************************************************************
+ * THIS STRUCTURE MUST NOT BE MODIFIED
+ * struct    M4xVSS_FramingStruct
+ * @brief    It is used internally by xVSS for framing effect, and by VPS for previewing
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4VIFI_ImagePlane *FramingRgb;                /**< decoded BGR565 plane */
+    M4VIFI_ImagePlane *FramingYuv;                /**< converted YUV420 planar plane */
+    M4OSA_Int32 duration;                        /**< Duration of the frame */
+    M4OSA_Int32 previousClipTime;                /**< Previous clip time, used by framing
+                                                     filter for SAVING */
+    M4OSA_Int32 previewOffsetClipTime;            /**< Previous clip time, used by framing
+                                                     filter for PREVIEW */
+    M4OSA_Int32 previewClipTime;                /**< Current clip time, used by framing
+                                                     filter for PREVIEW */
+    M4OSA_Void* pCurrent;                        /**< Current M4xVSS_FramingStruct used by
+                                                         framing filter */
+    M4OSA_Void* pNext;                            /**< Next M4xVSS_FramingStruct, if no more,
+                                                         point on current M4xVSS_FramingStruct */
+    M4OSA_UInt32 topleft_x;                        /**< The top-left X coordinate in the output
+                                                         picture of the first decoded pixel */
+    M4OSA_UInt32 topleft_y;                        /**< The top-left Y coordinate in the output
+                                                         picture of the first decoded pixel */
+    M4xVSS_internalEffectsAlphaBlending* alphaBlendingStruct; /* Alpha blending Struct */
+/*To support ARGB8888 : get the width and height in case of file ARGB888 used in framing
+ as video effect */
+    M4OSA_UInt32                width;   /*width of the ARGB8888 clip
+                                        .Used only if video effect is framming */
+    M4OSA_UInt32                height; /*height of the ARGB8888 clip .
+                                        Used only if video effect is framming */
+
+} M4xVSS_FramingStruct;
+
+#ifdef DECODE_GIF_ON_SAVING
+/**
+ ******************************************************************************
+ * THIS STRUCTURE MUST NOT BE MODIFIED
+ * struct    M4xVSS_FramingContext
+ * @brief    It is used internally by xVSS for framing effect, when the flag
+                DECODE_GIF_ON_SAVING is activated
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4xVSS_FramingStruct*            aFramingCtx;        /**<Framing struct for the decoding
+                                                            of the current frame of the gif*/
+    M4xVSS_FramingStruct*            aFramingCtx_last;    /**<Framing struct for the decoding of
+                                                             the previous frame of the gif*/
+    M4OSA_FileReadPointer*            pFileReadPtr;    /**< Pointer on OSAL file read functions */
+    M4OSA_FileWriterPointer*        pFileWritePtr;     /**< Pointer on OSAL file write functions */
+    M4OSA_Void*                        pSPSContext;        /**<SPS context for the GIF decoding*/
+    //M4SPS_Stream                    inputStream;        /**<GIF input stream buffer pointer*/
+    M4OSA_Void*                        pEffectFilePath;    /**<file path of the gif*/
+    M4VIDEOEDITING_VideoFrameSize    outputVideoSize;    /**< Output video size RC */
+    //M4SPS_DisposalMode                disposal;            /**<previous frame GIF disposal*/
+    M4OSA_UInt16                    b_animated;            /**<Is the GIF animated?*/
+    M4OSA_Bool                        bEffectResize;        /**<Is the gif resize*/
+    M4OSA_UInt32                    topleft_x;            /**< The top-left X coordinate in the
+                                                                 output picture of the first
+                                                                 decoded pixel */
+    M4OSA_UInt32                    topleft_y;            /**< The top-left Y coordinate in the
+                                                                 output picture of the first
+                                                                 decoded pixel */
+    M4OSA_UInt32                    width;                /**<GIF width, fill during the
+                                                                initialization with the SPS*/
+    M4OSA_UInt32                    height;                /**<GIF height, fill during the
+                                                                 initialization with the SPS*/
+    M4OSA_UInt32                    effectDuration;        /**<Effect duration*/
+    M4OSA_Int32                        effectStartTime;    /**<Effect start time*/
+    M4OSA_UInt32                    clipTime;            /**<current output clip time for the
+                                                                current frame*/
+    M4OSA_UInt32                    last_clipTime;        /**<previous output clip time for the
+                                                                previous frame*/
+    M4OSA_UInt32                    lastStepDuration;    /**<Time interval between the previous
+                                                             frame and the current frame*/
+    M4OSA_Bool                        b_IsFileGif;        /**<Is the framing using a gif file*/
+    M4OSA_UInt32                    last_width;            /**<Last frame width*/
+    M4OSA_UInt32                    last_height;        /**<Last frame height*/
+    M4OSA_UInt32                    last_topleft_x;        /**<Last frame x topleft*/
+    M4OSA_UInt32                    last_topleft_y;        /**<Last frame y topleft*/
+    M4OSA_UInt32                    current_gif_time;    /**< Current time os the GIF in output
+                                                              file time */
+    M4OSA_Float                        frameDurationRatio;    /**< Frame duration ratio */
+    M4xVSS_internalEffectsAlphaBlending*    alphaBlendingStruct;/*Alpha blending structure*/
+#ifdef DEBUG_GIF
+    M4OSA_UInt8                        uiDebug_fileCounter;/**<for debug purpose,
+                                                                 count the frame of the gif*/
+#endif /*DEBUG_GIF*/
+}M4xVSS_FramingContext;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_Pto3GPP_params
+ * @brief    Internal xVSS parameter for Pto3GPP module
+ * @note    This structure is filled by M4xVSS_sendCommand function,
+ * @note    and is used during M4xVSS_Step function to initialize Pto3GPP module
+ * @note    All the JPG files to transform to 3GP are chained
+ ******************************************************************************
+*/
+typedef struct {
+    M4OSA_Char*                        pFileIn;
+    M4OSA_Char*                        pFileOut;
+    M4OSA_Char*                        pFileTemp;            /**< temporary file used for
+                                                                 metadata writing, NULL is cstmem
+                                                                 writer not used */
+    M4OSA_UInt32                    duration;
+    M4VIDEOEDITING_FileType            InputFileType;
+    M4OSA_Bool                        isCreated;            /**< This boolean is used to know if
+                                                                    the output file is already
+                                                                    created or not */
+    M4OSA_Bool                        isPanZoom;            /**< RC: Boolean used to know if the
+                                                                pan and zoom mode is enabled */
+    M4OSA_UInt16                    PanZoomXa;            /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftXa;    /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftYa;    /**< RC */
+    M4OSA_UInt16                    PanZoomXb;            /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftXb;    /**< RC */
+    M4OSA_UInt16                    PanZoomTopleftYb;    /**< RC */
+    M4xVSS_MediaRendering            MediaRendering;        /**< FB: to render or not picture
+                                                                aspect ratio */
+    M4VIDEOEDITING_VideoFramerate    framerate;            /**< RC */
+    M4OSA_Void*                pNext;                /**< Address of next M4xVSS_Pto3GPP_params*
+                                                             element */
+    /*To support ARGB8888:width and height */
+    M4OSA_UInt32            width;
+    M4OSA_UInt32             height;
+
+} M4xVSS_Pto3GPP_params;
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_fiftiesStruct
+ * @brief    It is used internally by xVSS for fifties effect
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32 fiftiesEffectDuration;    /**< Duration of the same effect in a video */
+    M4OSA_Int32 previousClipTime;          /**< Previous clip time, used by framing filter
+                                                for SAVING */
+    M4OSA_UInt32 shiftRandomValue;                /**< Vertical shift of the image */
+      M4OSA_UInt32 stripeRandomValue;                /**< Horizontal position of the stripe */
+
+} M4xVSS_FiftiesStruct;
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_ColorRGB16
+ * @brief    It is used internally by xVSS for RGB16 color effect
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4xVSS_VideoEffectType colorEffectType;    /*Color type of effect*/
+    M4OSA_UInt16    rgb16ColorData;            /*RGB16 color only for the RGB16 color effect*/
+} M4xVSS_ColorStruct;
+
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_PictureCallbackCtxt
+ * @brief    The Callback Context parameters for Pto3GPP
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_Char*                m_FileIn;
+    M4OSA_UInt32            m_NbImage;
+    M4OSA_UInt32            m_ImageCounter;
+    M4OSA_Double            m_timeDuration;
+    M4OSA_FileReadPointer*  m_pFileReadPtr;
+    M4VIFI_ImagePlane*        m_pDecodedPlane; /* Used for Pan and Zoom only */
+    M4xVSS_Pto3GPP_params*    m_pPto3GPPparams;
+    M4OSA_Context            m_air_context;
+    M4xVSS_MediaRendering    m_mediaRendering;
+
+} M4xVSS_PictureCallbackCtxt;
+
+/**
+ ******************************************************************************
+ * enum        M4xVSS_State
+ * @brief    Internal State of the xVSS
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4xVSS_kStateInitialized = 0,
+    M4xVSS_kStateAnalyzing,
+    M4xVSS_kStateOpened,
+    //M4xVSS_kStateGeneratingPreview,
+    //M4xVSS_kStatePreview,
+    M4xVSS_kStateSaving,
+    M4xVSS_kStateSaved
+
+} M4xVSS_State;
+
+/**
+ ******************************************************************************
+ * enum        M4xVSS_editMicroState
+ * @brief    Internal Micro state of the xVSS for previewing/saving states
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4xVSS_kMicroStateEditing = 0,
+    M4xVSS_kMicroStateAudioMixing
+
+} M4xVSS_editMicroState;
+
+/**
+ ******************************************************************************
+ * enum        M4xVSS_editMicroState
+ * @brief    Internal Micro state of the xVSS for analyzing states
+ ******************************************************************************
+*/
+typedef enum
+{
+    M4xVSS_kMicroStateAnalysePto3GPP = 0,
+    M4xVSS_kMicroStateConvertPto3GPP,
+    M4xVSS_kMicroStateAnalyzeMCS,
+    M4xVSS_kMicroStateTranscodeMCS
+
+} M4xVSS_analyseMicroState;
+
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_MCS_params
+ * @brief    Internal xVSS parameter for MCS module
+ * @note    This structure is filled by M4xVSS_sendCommand function,
+ * @note    and is used during M4xVSS_Step function to initialize MCS module
+ * @note    All the input files to transcode are chained
+ ******************************************************************************
+*/
+typedef struct {
+    M4OSA_Void*                                pFileIn;
+    M4OSA_Void*                                pFileOut;
+    /**< temporary file used for metadata writing, NULL is cstmem writer not used */
+    M4OSA_Void*                             pFileTemp;
+    M4VIDEOEDITING_FileType                    InputFileType;
+    M4VIDEOEDITING_FileType                    OutputFileType;
+    M4VIDEOEDITING_VideoFormat                OutputVideoFormat;
+    M4VIDEOEDITING_VideoFrameSize            OutputVideoFrameSize;
+    M4VIDEOEDITING_VideoFramerate            OutputVideoFrameRate;
+    M4VIDEOEDITING_AudioFormat                OutputAudioFormat;
+    M4VIDEOEDITING_AudioSamplingFrequency    OutputAudioSamplingFrequency;
+    M4OSA_Bool                                bAudioMono;
+    M4VIDEOEDITING_Bitrate                    OutputVideoBitrate;
+    M4VIDEOEDITING_Bitrate                    OutputAudioBitrate;
+#ifdef TIMESCALE_BUG
+    M4OSA_UInt32                            OutputVideoTimescale;
+#endif
+    M4OSA_Bool                                isBGM;
+    /**< This boolean is used to know if the output file is already created or not */
+    M4OSA_Bool                                isCreated;
+    /**< Address of next M4xVSS_MCS_params* element */
+    M4OSA_Void*                                pNext;
+
+    /*FB: transcoding per parts*/
+    M4OSA_UInt32                         BeginCutTime;    /**< Beginning cut time in input file */
+    M4OSA_UInt32                         EndCutTime;      /**< End cut time in input file */
+    M4OSA_UInt32                         OutputVideoTimescale;    /*Output timescale*/
+
+    M4MCS_MediaRendering                 MediaRendering;   /**< FB: to crop, resize, or render
+                                                                black borders*/
+
+} M4xVSS_MCS_params;
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_internal_AlphaMagicSettings
+ * @brief    This structure defines the alpha magic transition settings
+ ******************************************************************************
+*/
+typedef struct {
+    M4VIFI_ImagePlane    *pPlane;
+    M4OSA_Int32         blendingthreshold;    /**< Blending Range */
+    M4OSA_Bool            isreverse;            /**< direct effect or reverse */
+
+} M4xVSS_internal_AlphaMagicSettings;
+
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_internal_SlideTransitionSettings
+ * @brief    This structure defines the internal slide transition settings
+ * @note    This type happens to match the external transition settings
+ *            structure (i.e. the one which is given by the application), but are
+ *            conceptually different types, so that if (or rather when) some day
+ *            translation needs to occur when loading the settings from the app,
+ *            this separate type will already be ready.
+ ******************************************************************************
+*/
+
+typedef M4xVSS_SlideTransitionSettings    M4xVSS_internal_SlideTransitionSettings;
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_internalJpegChunkMode
+ * @brief    This structure defines the parameters of the chunk callback to decode
+ *            a JPEG by chunk mode.
+ ******************************************************************************
+*/
+#if 0
+typedef struct {
+    M4OSA_FileReadPointer*    m_pFileReadPtr;
+    M4OSA_Context            m_pJPEGFileIn;
+    M4OSA_Void*                m_pFileIn;
+    M4SPS_Stream            m_inputStream;
+    M4OSA_UInt32            m_total_read;
+    M4OSA_UInt32            m_fileSize;
+
+} M4xVSS_internalJpegChunkMode;
+#endif
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_UTFConversionContext
+ * @brief    Internal UTF conversion context
+ * @note    This structure contains the UTF conversion informations
+ *            needed by the xVSS to manage the different formats (UTF8/16/ASCII)
+ ******************************************************************************
+*/
+typedef struct
+{
+    /*Function pointer on an external text conversion function */
+    M4xVSS_toUTF8Fct                pConvToUTF8Fct;
+    /*Function pointer on an external text conversion function */
+    M4xVSS_fromUTF8Fct                pConvFromUTF8Fct;
+    /*Temporary buffer that contains the result of each conversion*/
+    M4OSA_Void*                        pTempOutConversionBuffer;
+    /*Size of the previous buffer, the size is prederminated*/
+    M4OSA_UInt32                    m_TempOutConversionSize;
+} M4xVSS_UTFConversionContext;
+
+
+
+/**
+ ******************************************************************************
+ * struct    M4xVSS_Context
+ * @brief    Internal context of the xVSS
+ * @note    This structure contains all internal informations needed by the xVSS
+ ******************************************************************************
+*/
+typedef struct {
+    /**< Pointer on OSAL file read functions */
+    M4OSA_FileReadPointer*            pFileReadPtr;
+    /**< Pointer on OSAL file write functions */
+    M4OSA_FileWriterPointer*        pFileWritePtr;
+    /**< Local copy of video editor settings */
+    M4VSS3GPP_EditSettings*            pSettings;
+    /**< Current Settings of video editor to use in step functions for preview/save */
+    M4VSS3GPP_EditSettings*            pCurrentEditSettings;
+    /**< Current context of video editor to use in step functions for preview/save */
+    M4VSS3GPP_EditContext            pCurrentEditContext;
+    /**< This is to know if a previous M4xVSS_sendCommand has already been called */
+    M4OSA_UInt8                        previousClipNumber;
+    /**< Audio mixing settings, needed to free it in M4xVSS_internalCloseAudioMixedFile function*/
+    M4VSS3GPP_AudioMixingSettings*    pAudioMixSettings;
+    /**< Audio mixing context */
+    M4VSS3GPP_AudioMixingContext    pAudioMixContext;
+    /**< File path for PCM output file: used for preview, given to user */
+    M4OSA_Char*                        pcmPreviewFile;
+    /**< Duplication of output file pointer, to be able to use audio mixing */
+    M4OSA_Char*                        pOutputFile;
+    /**< Duplication of temporary file pointer, to be able to use audio mixing */
+    M4OSA_Char*                        pTemporaryFile;
+    /**< Micro state for Saving/Previewing state */
+    M4xVSS_editMicroState            editingStep;
+    /**< Micro state for Analyzing state */
+    M4xVSS_analyseMicroState        analyseStep;
+    /**< Nb of step for analysis or save/preview. Used to compute progression
+         of analysis or save/preview */
+    M4OSA_UInt8                        nbStepTotal;
+    /**< Current step number for analysis or save/preview */
+    M4OSA_UInt8                        currentStep;
+    /**< To be able to free pEffects during preview close */
+    M4xVSS_PreviewSettings*            pPreviewSettings;
+    /**< Temporary file path: all temporary files are created here */
+    M4OSA_Char*                        pTempPath;
+    /**< Current state of xVSS */
+    M4xVSS_State                    m_state;
+    /**< List of still pictures input to convert to 3GP with parameters */
+    M4xVSS_Pto3GPP_params*            pPTo3GPPparamsList;
+    /**< Current element of the above chained list beeing processd by the Pto3GPP */
+    M4xVSS_Pto3GPP_params*            pPTo3GPPcurrentParams;
+    /**< Current Pto3GPP context, needed to call Pto3GPP_step function in M4xVSS_step function */
+    M4PTO3GPP_Context                pM4PTO3GPP_Ctxt;
+    /**< Pointer on the callback function of the Pto3GPP module */
+    M4xVSS_PictureCallbackCtxt*        pCallBackCtxt;
+    /**< List of files to transcode with parameters */
+    M4xVSS_MCS_params*                pMCSparamsList;
+    /**< Current element of the above chained list beeing processd by the MCS */
+    M4xVSS_MCS_params*                pMCScurrentParams;
+    /**< Current MCS context, needed to call MCS_step function in M4xVSS_step function*/
+    M4MCS_Context                    pMCS_Ctxt;
+    /**< Index to have unique temporary filename */
+    M4OSA_UInt32                    tempFileIndex;
+    /**< In case of MMS use case, targeted bitrate to reach output file size */
+    M4OSA_UInt32                    targetedBitrate;
+    /**< If the sendCommand fct is called twice or more, the first computed timescale
+        recorded here must be reused */
+    M4OSA_UInt32                    targetedTimescale;
+
+    /*UTF Conversion support*/
+    M4xVSS_UTFConversionContext    UTFConversionContext;    /*UTF conversion context structure*/
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    struct
+    {
+        M4VD_Interface*    pDecoderInterface;
+        M4OSA_Void*        pUserData;
+        M4OSA_Bool        registered;
+    } registeredExternalDecs[M4VD_kVideoType_NB];
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+    struct
+    {
+        M4VE_Interface*    pEncoderInterface;
+        M4OSA_Void*        pUserData;
+        M4OSA_Bool        registered;
+    } registeredExternalEncs[M4VE_kEncoderType_NB];
+} M4xVSS_Context;
+
+/**
+ * Internal function prototypes */
+
+M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalDecodeJPG(M4OSA_Void* pFileIn, M4OSA_FileReadPointer* pFileReadPtr,
+                                   M4VIFI_ImagePlane** pImagePlanes);
+
+M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+                                                 M4OSA_FileReadPointer* pFileReadPtr,
+                                                 M4VIFI_ImagePlane** pImagePlanes,
+                                                 M4OSA_UInt32 width,M4OSA_UInt32 height);
+M4OSA_ERR M4xVSS_internalDecodeAndResizeJPG(M4OSA_Void* pFileIn,
+                                            M4OSA_FileReadPointer* pFileReadPtr,
+                                            M4VIFI_ImagePlane* pImagePlanes);
+M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+                                                          M4OSA_FileReadPointer* pFileReadPtr,
+                                                          M4VIFI_ImagePlane* pImagePlanes,
+                                                          M4OSA_UInt32 width,M4OSA_UInt32 height);
+
+M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx);
+
+#ifdef DECODE_GIF_ON_SAVING
+M4OSA_ERR M4xVSS_internalDecodeGIF(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalDecodeGIF_Initialization(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalDecodeGIF_Cleaning(M4OSA_Context pContext);
+
+#else
+M4OSA_ERR M4xVSS_internalDecodeGIF(M4OSA_Context pContext, M4VSS3GPP_EffectSettings* pEffect,
+                                   M4xVSS_FramingStruct* framingCtx);
+#endif /*DECODE_GIF_ON_SAVING*/
+
+M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pContext,
+                                                               M4VSS3GPP_EffectSettings* pEffect,
+                                                               M4xVSS_FramingStruct* framingCtx,
+                                                               M4VIDEOEDITING_VideoFrameSize \
+                                                                    OutputVideoResolution);
+
+M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_freeSettings(M4VSS3GPP_EditSettings* pSettings);
+
+M4OSA_ERR M4xVSS_freeCommand(M4OSA_Context pContext);
+
+M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext, M4OSA_Char* pFile,
+                                         M4VIDEOEDITING_ClipProperties *pFileProperties);
+
+M4OSA_ERR M4xVSS_AlphaMagic( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                             M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                             M4VSS3GPP_ExternalProgress *pProgress,
+                             M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_AlphaMagicBlending( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                     M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                                     M4VSS3GPP_ExternalProgress *pProgress,
+                                     M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_SlideTransition( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                  M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                                  M4VSS3GPP_ExternalProgress *pProgress,
+                                  M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_FadeBlackTransition(M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                     M4VIFI_ImagePlane PlaneIn2[3],M4VIFI_ImagePlane *PlaneOut,
+                                     M4VSS3GPP_ExternalProgress *pProgress,
+                                     M4OSA_UInt32 uiTransitionKind);
+
+M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+                                              M4VSS3GPP_EditSettings* pSettings,
+                                              M4OSA_UInt32* pTargetedTimeScale);
+
+M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+                                       M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize);
+
+
+M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+                                         M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+#endif /* __M4XVSS_INTERNAL_H__ */
+
diff --git a/libvideoeditor/vss/mcs/Android.mk b/libvideoeditor/vss/mcs/Android.mk
new file mode 100755
index 0000000..5053e7d
--- /dev/null
+++ b/libvideoeditor/vss/mcs/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_API.h b/libvideoeditor/vss/mcs/inc/M4MCS_API.h
new file mode 100755
index 0000000..16c4fd9
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_API.h
@@ -0,0 +1,773 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4MCS_API.h
+ * @brief   Media Conversion Service public API.
+ * @note    MCS allows transcoding a 3gp/mp4 file into a new 3gp/mp4 file changing the
+ *          video and audio encoding settings.
+ *          It is a straightforward and fully synchronous API.
+ ******************************************************************************
+ */
+
+#ifndef __M4MCS_API_H__
+#define __M4MCS_API_H__
+
+/**
+ *    OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ *    OSAL types for file access */
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+
+/**
+ *    Definition of M4_VersionInfo */
+#include "M4TOOL_VersionInfo.h"
+
+/**
+ * Common definitions of video editing components */
+#include "M4_VideoEditingCommon.h"
+
+#include "M4VD_HW_API.h"
+#include "M4VE_API.h"
+
+/**
+ * To enable external audio codecs registering*/
+#include "M4AD_Common.h"
+#include "M4ENCODER_AudioCommon.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ *    Public type of the MCS context */
+typedef M4OSA_Void* M4MCS_Context;
+
+
+/**
+ ******************************************************************************
+ * enum        M4MCS_MediaRendering
+ * @brief    This enum defines different media rendering
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MCS_kResizing = 0,    /**< The media is resized, the aspect ratio can be
+                              different from the original one.
+                              All of the media is rendered */
+    M4MCS_kCropping,        /**< The media is cropped, the aspect ratio is the
+                              same as the original one.
+                              The media is not rendered entirely */
+    M4MCS_kBlackBorders     /**< Black borders are rendered in order to keep the
+                              original aspect ratio. All the media is rendered */
+} M4MCS_MediaRendering;
+
+
+/**
+ ******************************************************************************
+ * struct   M4MCS_ExternalProgress
+ * @brief   This structure contains information provided to the external Effect functions
+ * @note    The uiProgress value should be enough for most cases
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4OSA_UInt32    uiProgress;     /**< Progress of the Effect from 0 to 1000 (one thousand) */
+    M4OSA_UInt32    uiClipTime;     /**< Current time, in milliseconds,
+                                          in the current clip time-line */
+    M4OSA_UInt32    uiOutputTime;   /**< Current time, in milliseconds,
+                                          in the output clip time-line */
+
+} M4MCS_ExternalProgress;
+
+
+/**
+ ******************************************************************************
+ * enum     M4MCS_AudioEffectType
+ * @brief   This enumeration defines the audio effect types of the MCS
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MCS_kAudioEffectType_None    = 0,
+    M4MCS_kAudioEffectType_FadeIn  = 8, /**< Intended for begin effect */
+    M4MCS_kAudioEffectType_FadeOut = 16, /**< Intended for end effect */
+    M4MCS_kAudioEffectType_External = 256
+
+} M4MCS_AudioEffectType;
+
+
+/**
+ ******************************************************************************
+ * prototype    M4MCS_editAudioEffectFct
+ * @brief       Audio effect functions implemented by the integrator
+ *              must match this prototype.
+ * @note        The function is provided with the original PCM data buffer and its size.
+ *              Audio effect have to be applied on it.
+ *              The progress of the effect is given, on a scale from 0 to 1000.
+ *              When the effect function is called, all the buffers are valid and
+ *              owned by the MCS.
+ *
+ * @param   pFunctionContext    (IN) The function context, previously set by the integrator
+ * @param   pPCMdata            (IN/OUT) valid PCM data buffer
+ * @param   uiPCMsize           (IN/OUT) PCM data buffer corresponding size
+ * @param   pProgress           (IN) Set of information about the audio effect progress.
+ *
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+typedef M4OSA_ERR (*M4MCS_editAudioEffectFct)
+(
+    M4OSA_Void *pFunctionContext,
+    M4OSA_Int16 *pPCMdata,
+    M4OSA_UInt32 uiPCMsize,
+    M4MCS_ExternalProgress *pProgress
+);
+
+
+/**
+ ******************************************************************************
+ * struct   M4MCS_EffectSettings
+ * @brief   This structure defines an audio effect for the edition.
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4OSA_UInt32                 uiStartTime;              /**< In ms */
+    M4OSA_UInt32                 uiDuration;               /**< In ms */
+    M4MCS_editAudioEffectFct     ExtAudioEffectFct;        /**< External effect function */
+    M4OSA_Void                   *pExtAudioEffectFctCtxt;  /**< Context given to the external
+                                                                effect function */
+    M4MCS_AudioEffectType        AudioEffectType;         /**< None, FadeIn, FadeOut */
+
+} M4MCS_EffectSettings;
+
+
+/**
+ ******************************************************************************
+ * struct    M4MCS_OutputParams
+ * @brief    MCS Output parameters
+ * @note     Following parameters are used for still picture inputs :
+ *             - OutputFileType (must be set to M4VIDEOEDITING_kFileType_JPG)
+ *             - bDiscardExif must be set to M4OSA_TRUE or M4OSA_FALSE
+ *             - bAdjustOrientation must be set to M4OSA_TRUE or M4OSA_FALSE
+ *             - (MediaRendering is not handled : output image resolution is always
+                 set according to BestFit criteria)
+ *            bDiscardExif and bAdjustOrientation are still picture only parameters
+ ******************************************************************************
+ */
+typedef struct
+{
+    /**< Format of the output file */
+    M4VIDEOEDITING_FileType                 OutputFileType;
+    /**< Output video compression format, see enum */
+    M4VIDEOEDITING_VideoFormat              OutputVideoFormat;
+    /**< Output frame size : QQVGA, QCIF or SQCIF */
+    M4VIDEOEDITING_VideoFrameSize           OutputVideoFrameSize;
+    /**< Targeted Output framerate, see enum */
+    M4VIDEOEDITING_VideoFramerate           OutputVideoFrameRate;
+    /**< Format of the audio in the stream */
+    M4VIDEOEDITING_AudioFormat              OutputAudioFormat;
+    /**< Sampling frequency of the audio in the stream */
+    M4VIDEOEDITING_AudioSamplingFrequency   OutputAudioSamplingFrequency;
+    /**< Set to M4OSA_TRUE if the output audio is mono */
+    M4OSA_Bool                              bAudioMono;
+    /**< Output PCM file if not NULL */
+    M4OSA_Char                              *pOutputPCMfile;
+    /**< To crop, resize, or render black borders*/
+    M4MCS_MediaRendering                    MediaRendering;
+    /**< List of effects */
+    M4MCS_EffectSettings                    *pEffects;
+    /**< Number of effects in the above list */
+    M4OSA_UInt8                             nbEffects;
+
+    /*--- STILL PICTURE ---*/
+    /**< TRUE: Even if the input file contains an EXIF section,
+    the output file won't contain any EXIF section.*/
+    M4OSA_Bool                              bDiscardExif ;
+
+    /**< =TRUE : picture must be rotated if Exif tags hold a rotation info
+    (and rotation info is set to 0)*/
+    M4OSA_Bool                              bAdjustOrientation ;
+    /*--- STILL PICTURE ---*/
+} M4MCS_OutputParams;
+
+/*--- STILL PICTURE ---*/
+/**
+ ******************************************************************************
+ * enum      M4MCS_SPOutputResolution
+ * @brief    Still picture specific : MCS output targeted file resolution
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MCS_kResSameAsInput       = 0x00, /*width x height*/
+    M4MCS_kResQVGA              = 0x01, /*320x240*/
+    M4MCS_kResVGA               = 0x02, /*640x480*/
+    M4MCS_kResWQVGA             = 0x03, /*400x240*/
+    M4MCS_kResWVGA              = 0x04, /*800x480*/
+    M4MCS_kResXGA               = 0x05, /*1024x768*/
+    M4MCS_kResCustom            = 0xFF  /*Size is set via StillPictureCustomWidth/Height*/
+} M4MCS_SPOutputResolution ;
+
+
+/**
+ ******************************************************************************
+ * enum      M4MCS_SPStrategy
+ * @brief    Still picture specific : MCS strategy to configure the encoding parameters
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MCS_kFileSizeOnlyFixed            = 0x00, /*StillPictureResolution and
+                                                 QualityFactor are ignored*/
+    M4MCS_kFileSizeAndResFixed          = 0x01, /*QualityFactor is ignored*/
+    M4MCS_kQualityAndResFixed           = 0x02  /*OutputFileSize is ignored*/
+} M4MCS_SPStrategy ;
+
+
+/**
+ ******************************************************************************
+ * enum      M4MCS_SPCrop
+ * @brief    Still picture specific : indicate whether cropping should be done
+                                     before changing the resolution
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MCS_kNoCrop                = 0x00, /*No Cropping is performed*/
+    M4MCS_kCropBeforeResize      = 0x01  /*Input image is cropped (before changing resolution)*/
+} M4MCS_SPCrop ;
+
+/**
+ ******************************************************************************
+ * enum      M4MCS_ExifInfos
+ * @brief    Still picture specific : The following structure contains all available exif field
+ ******************************************************************************
+ */
+typedef struct {
+    M4OSA_Char* ImageTitle;              /* Image title */
+    M4OSA_Char* EquipmentManufacturer;   /* Image input equipment manufacturer */
+    M4OSA_Char* EquipmentModel;          /* Image input equipment model */
+    M4OSA_Char* Software;                /* Software used */
+    M4OSA_Char* Artist;                  /* Artist */
+    M4OSA_Char* Copyright;               /* Copyright */
+    M4OSA_Char* CreationDateTime;        /* Creation date and time */
+    M4OSA_UInt32 Orientation;            /* Orientation of the picture */
+    M4OSA_Char* LastChangeDateTime;      /* Last Change date and time*/
+    M4OSA_UInt32 PixelXDimension;        /* Image width*/
+    M4OSA_UInt32 PixelYDimension;        /* Image Height*/
+} M4MCS_ExifInfos;
+
+/*--- STILL PICTURE ---*/
+
+/**
+ ******************************************************************************
+ * struct    M4MCS_EncodingParams
+ * @brief    MCS file size, bitrate and cut parameters
+ * @note     Following parameters are used for still picture inputs :
+ *             - OutputFileSize
+ *             - StillPictureResolution
+ *             - QualityFactor
+ *             - StillPictureStrategy
+ *             - StillPictureCustomWidth/Height (if StillPictureResolution==M4MCS_kResCustom)
+ *            Still picture only parameters : StillPictureResolution, QualityFactor,
+ *            StillPictureStrategy and StillPictureCustomWidth/Height
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4VIDEOEDITING_Bitrate    OutputVideoBitrate;     /**< Targeted video bitrate */
+    M4VIDEOEDITING_Bitrate    OutputAudioBitrate;     /**< Targeted audio bitrate */
+    M4OSA_UInt32              BeginCutTime;           /**< Beginning cut time in input file */
+    M4OSA_UInt32              EndCutTime;             /**< End cut time in input file */
+    M4OSA_UInt32              OutputFileSize;         /**< Expected resulting file size */
+    M4OSA_UInt32              OutputVideoTimescale;   /**< Optional parameter used to fix a
+                                                           timescale during transcoding */
+
+    /*--- STILL PICTURE ---*/
+    M4OSA_Int32               QualityFactor ;         /**< =-1 (undefined) or 0(lowest)..
+                                                            50(best) : This parameter is the
+                                                            quality indication for the JPEG output
+                                                            file (if =-1 the MCS will set quality
+                                                            automatically)*/
+    M4MCS_SPStrategy            StillPictureStrategy ; /**< Defines which input parameters
+                                                            will be taken into account by MCS*/
+    M4MCS_SPOutputResolution    StillPictureResolution;/**< Desired output resolution for
+                                                            a still picture file */
+    /**< (only if Resolution==M4MCS_kResCustom) : Custom output image width */
+    M4OSA_UInt32                StillPictureCustomWidth;
+    /**< (only if Resolution==M4MCS_kResCustom) : Custom output image height */
+    M4OSA_UInt32                StillPictureCustomHeight;
+    /**< Indicate whether Crop should be performed */
+    M4MCS_SPCrop                StillPictureCrop;
+    /**< (only if cropping) X coordinate of topleft corner of the crop window */
+    M4OSA_UInt32                StillPictureCrop_X;
+    /**< (only if cropping) Y coordinate of topleft corner of the crop window */
+    M4OSA_UInt32                StillPictureCrop_Y;
+    /**< (only if cropping) Width of the crop window (in pixels) */
+    M4OSA_UInt32                StillPictureCrop_W;
+    /**< (only if cropping) Height of the crop window (in pixels) */
+    M4OSA_UInt32                StillPictureCrop_H;
+    /*--- STILL PICTURE ---*/
+} M4MCS_EncodingParams;
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
+ * @brief    Get the MCS version.
+ * @note Can be called anytime. Do not need any context.
+ * @param    pVersionInfo        (OUT) Pointer to a version info structure
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_init(M4MCS_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
+                        M4OSA_FileWriterPointer* pFileWritePtrFct);
+ * @brief    Initializes the MCS (allocates an execution context).
+ * @note
+ * @param    pContext            (OUT) Pointer on the MCS context to allocate
+ * @param    pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
+ * @param    pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (If Debug Level >= 2)
+ * @return   M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_init(M4MCS_Context* pContext, M4OSA_FileReadPointer* pFileReadPtrFct,
+                      M4OSA_FileWriterPointer* pFileWritePtrFct);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn, M4OSA_Void* pFileOut,
+                          M4OSA_UInt32 uiMaxMetadataSize);
+ * @brief   Set the MCS input and output files.
+ * @note    It opens the input file, but the output file is not created yet.
+ *          In case of still picture, four InputFileType are possible
+ *          (M4VIDEOEDITING_kFileType_JPG/BMP/GIF/PNG
+ *          If one of them is set, the OutputFileType SHALL be set to M4VIDEOEDITING_kFileType_JPG
+ * @param   pContext            (IN) MCS context
+ * @param   pFileIn             (IN) Input file to transcode (The type of this parameter
+ *                                    (URL, pipe...) depends on the OSAL implementation).
+ * @param   mediaType           (IN) Container type (.3gp,.amr, ...) of input file.
+ * @param   pFileOut            (IN) Output file to create  (The type of this parameter
+ *                                    (URL, pipe...) depends on the OSAL implementation).
+ * @param   pTempFile           (IN) Temporary file for the constant memory writer to store
+ *                                    metadata ("moov.bin").
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ * @return  M4ERR_FILE_NOT_FOUND:   The input file has not been found
+ * @return  M4MCS_ERR_INVALID_INPUT_FILE:   The input file is not a valid file, or is corrupted
+ * @return  M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM:  The input file contains no
+ *                                                               supported audio or video stream
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+                     M4VIDEOEDITING_FileType InputFileType,
+                     M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
+ * @brief   Perform one step of trancoding.
+ * @note
+ * @param   pContext            (IN) MCS context
+ * @param   pProgress           (OUT) Progress percentage (0 to 100) of the transcoding
+ * @note    pProgress must be a valid address.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    One of the parameters is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_WAR_TRANSCODING_DONE: Transcoding is over, user should now call M4MCS_close()
+ * @return  M4MCS_ERR_AUDIO_CONVERSION_FAILED: The audio conversion (AAC to AMR-NB, MP3) failed
+ * @return  M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY: The input file contains an AAC audio track
+ *                                                     with an invalid sampling frequency
+ *                                                     (should never happen)
+ * @return  M4MCS_WAR_PICTURE_AUTO_RESIZE: Picture will be automatically resized to fit
+ *                                          into requirements
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
+ * @brief   Pause the transcoding i.e. release the (external hardware) video decoder.
+ * @note    This function is not needed if no hardware accelerators are used.
+ *          In that case, pausing the MCS is simply achieved by temporarily suspending
+ *          the M4MCS_step function calls.
+ * @param   pContext            (IN) MCS context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
+ * @brief   Resume the transcoding after a pause (see M4MCS_pause).
+ * @note    This function is not needed if no hardware accelerators are used.
+ *          In that case, resuming the MCS is simply achieved by calling
+ *          the M4MCS_step function.
+ * @param   pContext            (IN) MCS context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
+ * @brief    Finish the MCS transcoding.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param    pContext            (IN) MCS context
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
+ * @brief    Free all resources used by the MCS.
+ * @note The context is no more valid after this call
+ * @param    pContext            (IN) MCS context
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
+ * @brief    Finish the MCS transcoding and free all resources used by the MCS
+ *          whatever the state is.
+ * @note    The context is no more valid after this call
+ * @param    pContext            (IN) MCS context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
+ *                                          M4VIDEOEDITING_ClipProperties* pFileProperties);
+ * @brief   Retrieves the properties of the audio and video streams from the input file.
+ * @param   pContext            (IN) MCS context
+ * @param   pProperties         (OUT) Pointer on an allocated M4VIDEOEDITING_ClipProperties
+                                structure which is filled with the input stream properties.
+ * @note    The structure pProperties must be allocated and further de-allocated
+            by the application. The function must be called in the opened state.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
+                                        M4VIDEOEDITING_ClipProperties *pFileProperties);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
+ * @brief   Set the MCS video output parameters.
+ * @note    Must be called after M4MCS_open. Must be called before M4MCS_step.
+ * @param   pContext            (IN) MCS context
+ * @param   pParams             (IN/OUT) Transcoding parameters
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263 : Output video frame size parameter is
+ *                                                          incompatible with H263 encoding
+ * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263 : Output video frame size parameter is
+ *                                                          incompatible with H263 encoding
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT     : Undefined output video format parameter
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE : Undefined output video frame size
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE : Undefined output video frame rate
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT : Undefined output audio format parameter
+ * @return  M4MCS_ERR_DURATION_IS_NULL : Specified output parameters define a null duration stream
+ *                                        (no audio and video)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief   Set the values of the encoding parameters
+ * @note    Must be called before M4MCS_checkParamsAndStart().
+ * @param   pContext           (IN) MCS context
+ * @param   pRates             (IN) Transcoding parameters
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac,
+ *                                           12.2 for amr, 8 for mp3)
+ * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
+ * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than
+ *                                                     the input clip duration
+ * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
+ * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output
+ *                                            file at given bitrates
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW: Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief   Get the extended values of the encoding parameters
+ * @note    Could be called after M4MCS_setEncodingParams.
+ * @param   pContext           (IN) MCS context
+ * @param   pRates             (OUT) Transcoding parameters
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Encoding settings would produce a
+ *                                              null duration clip = encoding is impossible
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext)
+ * @brief
+ * @note
+ * @param   pContext           (IN) MCS context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac,
+ *                                           12.2 for amr, 8 for mp3)
+ * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
+ * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than
+ *                                                    the input clip duration
+ * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
+ * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output
+ *                                            file at given bitrates
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW:  Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerExternalVideoDecoder(M4MCS_Context pContext,
+ *                                     M4VD_VideoType decoderType,
+ *                                     M4VD_Interface*    pDecoderInterface,
+ *                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video decoder
+ * @note
+ * @param   pContext           (IN) MCS context
+ * @param   decoderType        (IN) Type of decoder (MPEG4 ...)
+ * @param   pDecoderInterface  (IN) Decoder interface
+ * @param   pUserData          (IN) Pointer on a user data to give to external decoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerExternalVideoDecoder(M4MCS_Context pContext,
+                                     M4VD_VideoType decoderType,
+                                     M4VD_Interface*    pDecoderInterface,
+                                     M4OSA_Void* pUserData);
+
+M4OSA_ERR M4MCS_registerExternalVideoEncoder(M4MCS_Context pContext,
+                                     M4VE_EncoderType encoderType,
+                                     M4VE_Interface*    pEncoderInterface,
+                                     M4OSA_Void* pUserData);
+
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_registerExternalAudioDecoder(M4MCS_Context pContext,
+ *                                    M4AD_Type decoderType,
+ *                                    M4AD_Interface *pDecoderInterface);
+ * @brief    This function will register a specific external audio decoder.
+ * @note    According to the decoderType, this function will store in the internal context
+ *          the decoder interface.
+ * @param    context            (IN/OUT) MCS context.
+ * @param    decoderType        (IN) Audio decoder type
+ * @param    pDecoderInterface  (IN) Audio decoder interface.
+ * @return   M4NO_ERROR:        No error
+ * @return   M4ERR_PARAMETER:   A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_registerExternalAudioDecoder(M4MCS_Context pContext,
+                                    M4AD_Type decoderType,
+                                    M4AD_Interface *pDecoderInterface);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4MCS_registerExternalAudioEncoder(M4MCS_Context pContext,
+ *                                    M4ENCODER_AudioFormat mediaType,
+ *                                    M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
+ * @brief    This function will register a specific external audio encoder.
+ * @note    According to the Mediatype, this function will store in the internal context
+ *          the encoder interface.
+ * @param    pContext:                (IN) Execution context.
+ * @param    mediaType:                (IN) The media type.
+ * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerExternalAudioEncoder(M4MCS_Context pContext,
+                                    M4ENCODER_AudioFormat MediaType,
+                                    M4ENCODER_AudioGlobalInterface *pEncGlobalInterface);
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getExifInfo(M4MCS_Context pContext);
+ * @brief    Retrieve the EXIF tags informations from a Still picture
+ * @note    This function will allocate and fill a Exif tag struct
+ *            exifTags structure must be allocated/deallocated by the user
+ *            exifTags members will point to internal SPE information, user should not try
+ *          to modify or deallocate them
+ * @param    pContext            (IN) MCS context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getExifInfo(M4MCS_Context pContext, M4MCS_ExifInfos* exifTags);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerAudioEncoderExtended(M4MCS_Context pContext,
+ *                                     M4ENCODER_AudioFormat encoderType,
+ *                                     M4ENCODER_AudioGlobalInterface    *pEncoderInterface,
+ *                                     M4OSA_Void* pUserData);
+ * @brief    Registers an external Audio Encoder
+ * @note This is much different from the external audio encoder to cope up with specific
+ *       requirement of OMX codec implementation.
+ * @param  pContext           (IN) MCS context
+ * @param  encoderType        (IN) Type of encoder
+ * @param  pEncoderInterface  (IN) Encoder interface to OMX shell function
+ * @param  pUserData          (IN) Pointer on a user data to give to external encoder
+ *                                 (OMX Core Context)
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioEncoderExtended(M4MCS_Context pContext,
+                                     M4ENCODER_AudioFormat encoderType,
+                                     M4ENCODER_AudioGlobalInterface    *pEncoderInterface,
+                                     M4OSA_Void* pUserData);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerVideoDecoderExtended(M4MCS_Context    context,
+                                     M4VD_VideoType        decoderType,
+                                     M4DECODER_VideoInterface    *pDecoderInterface,
+                                     M4OSA_Void* pUserData)
+ * @brief    Registers an external Video decoder
+ * @note This is much different from the external video decoder to cope up with specific
+ *       requirement of OMX codec implementation.
+ * @param  pContext           (IN) MCS context
+ * @param  decoderType        (IN) Type of decoder (MPEG4 ...)
+ * @param  pVidDecoderInterface  (IN) Decoder interface of type 'M4DECODER_VideoInterface'
+ * @param  pUserData          (IN) Pointer on a user data to give to external decoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoDecoderExtended(M4MCS_Context    context,
+                                     M4VD_VideoType        decoderType,
+                                     M4OSA_Context    pVidDecoderInterface,
+                                     M4OSA_Void* pUserData);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerVideoEncoderExtended()
+ * @brief    Registers an external Video encoder
+ * @note This is much different from the external video encoder to cope up with specific
+ *       requirement of OMX codec implementation.
+             So we use M4ENCODER_GlobalInterface instead of M4VE_Interface.
+ * @param  pContext           (IN) MCS context
+ * @param  encoderType        (IN) Type of encoder (MPEG4 ...)
+ * @param  pEncoderInterface  (IN) Encoder interface of type 'M4ENCODER_VideoInterface'
+ * @param  pUserData          (IN) Pointer on a user data to give to external encoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoEncoderExtended(M4MCS_Context pContext,
+                                     M4VE_EncoderType encoderType,
+                                     M4OSA_Context    pEncoderInterface,
+                                     M4OSA_Void* pUserData);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerAudioDecoderExtended(M4MCS_Context pContext,
+                                     M4AD_Type decoderType,
+                                     M4AD_Interface    *pDecoderInterface,
+                                     M4OSA_Void* pUserData);
+ * @brief    Registers an external Audio Decoder
+ * @note This is much different from the external audio decoder to cope up with specific
+ *       requirement of OMX codec implementation.
+ * @param  pContext           (IN) MCS context
+ * @param  decoderType        (IN) Type of decoder
+ * @param  pDecoderInterface  (IN) Decoder interface to OMX shell function
+ * @param  pUserData          (IN) Pointer on a user data to give to external decoder
+ *                                 (OMX Core Context)
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioDecoderExtended(M4MCS_Context pContext,
+                                     M4AD_Type decoderType,
+                                     M4AD_Interface    *pDecoderInterface,
+                                     M4OSA_Void* pUserData);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __M4MCS_API_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h b/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h
new file mode 100755
index 0000000..1c66a75
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_ErrorCodes.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *************************************************************************
+ * @file   M4MCS_API.h
+ * @brief  MCS error codes definitions (Media Compressor Service)
+ * @note
+ *************************************************************************
+ **/
+
+#ifndef __M4MCS_ErrorCodes_H__
+#define __M4MCS_ErrorCodes_H__
+
+/**
+ *    OSAL basic types and errors */
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+
+/**
+ *    OSAL core ID definitions */
+#include "M4OSA_CoreID.h"
+
+
+/************************************************************************/
+/* Warning codes                                                        */
+/************************************************************************/
+
+/* End of processing, user should now call M4MCS_close() */
+#define M4MCS_WAR_TRANSCODING_DONE            M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x1)
+/* Mediatype is not supported by the MCS */
+#define M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED    M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x2)
+/* Indicate that picture will be automatically resized to fit into the required
+   parameters (file size) */
+#define M4MCS_WAR_PICTURE_AUTO_RESIZE        M4OSA_ERR_CREATE( M4_WAR, M4MCS, 0x3)
+
+/************************************************************************/
+/* Error codes                                                          */
+/************************************************************************/
+
+
+/* ----- OPEN ERRORS ----- */
+
+/* The input file contains no supported stream (may be a corrupted file) */
+#define M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM   M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x01)
+/* The input file is invalid/corrupted */
+#define M4MCS_ERR_INVALID_INPUT_FILE                        M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x02)
+
+
+/* ----- SET OUTPUT PARAMS ERRORS ----- */
+
+/* The output video format parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT             M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x10)
+/* The output video frame size parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x11)
+/* The output video frame rate parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x12)
+/* The output audio format parameter is undefined */
+#define M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT             M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x13)
+/* The output video frame size parameter is incompatible with H263 encoding */
+#define M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x14)
+/* The output video frame rate parameter is incompatible with H263 encoding
+   (It can't happen in current version of MCS!) */
+#define M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263         M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x15)
+/* A null clip duration as been computed, which is unvalid (should never happen!) */
+#define M4MCS_ERR_DURATION_IS_NULL                          M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x16)
+/* The .mp4 container cannot handle h263 codec */
+#define M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE                M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x17)
+
+
+/* ----- PREPARE DECODERS ERRORS ----- */
+
+/* H263 Profile (other than 0) is not supported */
+#define M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED                M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x20)
+/* The input file contains an AAC audio track with an invalid sampling frequency
+   (should never happen) */
+#define M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY            M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x21)
+/* The audio conversion (AAC to AMR-NB, or MP3) failed */
+#define M4MCS_ERR_AUDIO_CONVERSION_FAILED                   M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x22)
+
+
+/* ----- SET ENCODING PARAMS ERRORS ----- */
+
+/* Begin cut time is larger than the input clip duration */
+#define M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION            M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x30)
+/* Begin cut and End cut are equals */
+#define M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT                  M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x31)
+/* End cut time is smaller than begin cut time */
+#define M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT            M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x32)
+/* Not enough space to store whole output file at given bitrates */
+#define M4MCS_ERR_MAXFILESIZE_TOO_SMALL                     M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x33)
+/* Video bitrate is too low (avoid ugly video) */
+#define M4MCS_ERR_VIDEOBITRATE_TOO_LOW                      M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x34)
+/* Audio bitrate is too low (16 kbps min for aac, 12.2 for amr, 8 for mp3) */
+#define M4MCS_ERR_AUDIOBITRATE_TOO_LOW                      M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x35)
+/* Video bitrate too high (we limit to 800 kbps) */
+#define M4MCS_ERR_VIDEOBITRATE_TOO_HIGH                     M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x36)
+/* Audio bitrate too high (we limit to 96 kbps) */
+#define M4MCS_ERR_AUDIOBITRATE_TOO_HIGH                     M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x37)
+
+/* ----- OTHERS ERRORS ----- */
+#define M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL                M4OSA_ERR_CREATE( M4_ERR, M4MCS, 0x50)
+#define M4MCS_ERR_NOMORE_SPACE                              M4OSA_ERR_CREATE(M4_ERR, M4MCS, 0x51)
+
+#endif /* __M4MCS_ErrorCodes_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h
new file mode 100755
index 0000000..d3e75b6
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_InternalConfig.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *************************************************************************
+ * @file   M4MCS_API.h
+ * @brief  MCS internal constant values settings
+ * @note   This header file is not public
+ *************************************************************************
+ **/
+
+#ifndef __M4MCS_INTERNALCONFIG_H__
+#define __M4MCS_INTERNALCONFIG_H__
+
+
+/**
+ * Definition of max AU size */
+#define M4MCS_AUDIO_MAX_CHUNK_SIZE        7168 /**< add mp3 encoder and writer,
+                                                    max bitrate is now 320kbps instead of 128kbps
+                                                    so this value has to be increased accordingly
+                                                    = ((sizeof(M4OSA_UInt8)*max_channel_number)+3
+                                                    to take a margin(after tests, 2 was not enough
+                                                    ))*MAX_PCM_GRANULARITY_SAMPLES*/
+                                                    /**< Before: 4000*//**< Magical */
+
+/**
+ * Video max AU and fragment size */
+#define M4MCS_VIDEO_MIN_COMPRESSION_RATIO   0.8 /**< Magical. Used to define the max AU size */
+#define M4MCS_VIDEO_CHUNK_AU_SIZE_RATIO     1.2 /**< Magical. Used to define the max chunk size */
+
+/**
+ * Various Magicals */
+#define M4MCS_WRITER_AUDIO_STREAM_ID        1
+#define M4MCS_WRITER_VIDEO_STREAM_ID        2
+
+/**
+ * Granularity for audio encoder */
+ /**< minimum number of samples to pass in AMR encoding case */
+#define M4MCS_PCM_AMR_GRANULARITY_SAMPLES 160
+/**< minimum number of samples to pass in AAC encoding case */
+#define M4MCS_PCM_AAC_GRANULARITY_SAMPLES 1024
+/**< minimum number of samples to pass in MP3 encoding case */
+#define M4MCS_PCM_MP3_GRANULARITY_SAMPLES 576
+
+#define M4MCS_AUDIO_MAX_AU_SIZE           1024  /**< add mp3 encoder and writer
+                                                This value is not used anymore, now the max AU
+                                                size is computed dynamically according to the
+                                                number of channels,the max PCM granularity sample
+                                                and a margin.*/
+                                                /**< Before: 1024*//**< Magical */
+/**
+ * Writer file and moov size estimation */
+#define M4MCS_MOOV_OVER_FILESIZE_RATIO    1.04  /**< magical moov size is less than 4%
+                                                     of file size in average */
+
+/**
+ * If 3gp file does not contain an STSS table (no rap frames),
+   jump backward to a specified limit */
+#define M4MCS_NO_STSS_JUMP_POINT          40000 /**< 40 s */
+
+#endif /* __M4MCS_INTERNALCONFIG_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h
new file mode 100755
index 0000000..422e40d
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_InternalFunctions.h
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file    M4MCS_InternalFunctions.h
+ * @brief   This file contains all functions declarations internal
+ *          to the MCS.
+ *************************************************************************
+ */
+
+#ifndef __M4MCS_INTERNALFUNCTIONS_H__
+#define __M4MCS_INTERNALFUNCTIONS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "M4VPP_API.h"
+#include "M4ENCODER_common.h"
+
+/**
+ **************************************************************************
+ * M4OSA_ERR M4MCS_intApplyVPP( M4VPP_Context pContext,
+ *                              M4VIFI_ImagePlane* pPlaneIn,
+ *                              M4VIFI_ImagePlane* pPlaneOut)
+ * @brief   Do the video rendering and the resize (if needed)
+ * @note    It is called by the video encoder
+ * @param   pContext    (IN)     VPP context, which actually is the MCS
+ *                               internal context in our case
+ * @param   pPlaneIn    (IN)     Contains the image
+ * @param   pPlaneOut   (IN/OUT) Pointer to an array of 3 planes that will
+ *                               contain the output YUV420 image
+ * @return  M4NO_ERROR:                 No error
+ * @return  ERR_MCS_VIDEO_DECODE_ERROR: the video decoding failed
+ * @return  ERR_MCS_RESIZE_ERROR:       the resizing failed
+ * @return  Any error returned by an underlaying module
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+                            M4VIFI_ImagePlane* pPlaneOut);
+
+/**
+ **************************************************************************
+ * M4OSA_ERR M4MCS_SubscribeMediaAndCodec(M4MCS_Context pContext);
+ * @brief    This function registers the reader, decoders, writers and encoders
+ *           in the MCS.
+ * @note
+ * @param    pContext:    (IN) Execution context.
+ * @return   M4NO_ERROR:        there is no error
+ * @return   M4ERR_PARAMETER    pContext is NULL
+ **************************************************************************
+ */
+M4OSA_ERR M4MCS_subscribeMediaAndCodec(M4MCS_Context pContext);
+
+/**
+ **************************************************************************
+ * @brief    Clear encoders, decoders, reader and writers interfaces tables
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    The context is null
+ **************************************************************************
+ */
+M4OSA_ERR   M4MCS_clearInterfaceTables(M4MCS_Context pContext);
+
+/**
+ **************************************************************************
+ * M4OSA_ERR   M4MCS_registerWriter(M4MCS_Context pContext,
+ *                                  M4VIDEOEDITING_FileType MediaType,
+ *                                  M4WRITER_GlobalInterface *pWtrGlobalInterface,
+ *                                  M4WRITER_DataInterface *pWtrDataInterface)
+ * @brief   This function will register a specific file format writer.
+ * @note    According to the Mediatype, this function will store in the internal
+ *          context the writer context.
+ * @param   pContext:    (IN) Execution context.
+ * @return  M4NO_ERROR:         there is no error
+ * @return  M4ERR_PARAMETER     pContext,pWtrGlobalInterface or pWtrDataInterface
+ *                              is M4OSA_NULL (debug only), or invalid MediaType
+ **************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerWriter(
+                        M4MCS_Context pContext,
+                        M4WRITER_OutputFileType MediaType,
+                        M4WRITER_GlobalInterface* pWtrGlobalInterface,
+                        M4WRITER_DataInterface* pWtrDataInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4MCS_registerEncoder(   M4MCS_Context pContext,
+ *                                      M4VIDEOEDITING_VideoFormat mediaType,
+ *                                      M4ENCODER_GlobalInterface *pEncGlobalInterface)
+ * @brief   This function will register a specific video encoder.
+ * @note    According to the Mediatype, this function will store in the internal
+ *          context the encoder context.
+ * @param   pContext:    (IN) Execution context.
+ * @return  M4NO_ERROR:         there is no error
+ * @return  M4ERR_PARAMETER     pContext or pEncGlobalInterface is
+ *                              M4OSA_NULL (debug only), or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerVideoEncoder(
+                        M4MCS_Context pContext,
+                        M4ENCODER_Format MediaType,
+                        M4ENCODER_GlobalInterface *pEncGlobalInterface);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4MCS_registerAudioEncoder(  M4MCS_Context pContext,
+ *                                          M4ENCODER_AudioFormat mediaType,
+ *                                          M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
+ * @brief   This function will register a specific audio encoder.
+ * @note    According to the Mediatype, this function will store in the internal
+ *          context the encoder context.
+ * @param   pContext:               (IN)   Execution context.
+ * @param   mediaType:              (IN)   The media type.
+ * @param   pEncGlobalInterface:    (OUT)  The encoder interface functions.
+ * @return  M4NO_ERROR:       there is no error
+ * @return  M4ERR_PARAMETER:  pContext or pEncGlobalInterface is
+ *                              M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerAudioEncoder(
+                        M4MCS_Context pContext,
+                        M4ENCODER_AudioFormat MediaType,
+                        M4ENCODER_AudioGlobalInterface *pEncGlobalInterface);
+
+/**
+ **************************************************************************
+ * @brief    Register reader.
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ **************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerReader(   M4MCS_Context pContext,
+                                    M4READER_MediaType mediaType,
+                                    M4READER_GlobalInterface *pRdrGlobalInterface,
+                                    M4READER_DataInterface *pRdrDataInterface);
+
+/**
+ **************************************************************************
+ * @brief   Register video decoder
+ * @param   pContext             (IN/OUT) MCS context.
+ * @param   decoderType          (IN) Decoder type
+ * @param   pDecoderInterface    (IN) Decoder interface.
+ * @return  M4NO_ERROR:            No error
+ * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only),or the
+ *                              decoder type is invalid
+ **************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerVideoDecoder( M4MCS_Context pContext,
+                                        M4DECODER_VideoType decoderType,
+                                        M4DECODER_VideoInterface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * @brief   Register audio decoder
+ * @note    This function is used internaly by the MCS to register Core audio decoders,
+ * @param   context            (IN/OUT) MCS context.
+ * @param   decoderType        (IN)     Audio decoder type
+ * @param   pDecoderInterface  (IN)     Audio decoder interface.
+ * @return  M4NO_ERROR:        No error
+ * @return  M4ERR_PARAMETER:   A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerAudioDecoder(M4MCS_Context pContext, M4AD_Type decoderType,
+                                        M4AD_Interface *pDecoderInterface);
+
+/**
+ ************************************************************************
+ * @brief   Unregister writer
+ * @param   pContext            (IN/OUT) MCS context.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllWriters(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief   Unregister the encoders
+ * @param   pContext            (IN/OUT) MCS context.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllEncoders(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief   Unregister reader
+ * @param   pContext            (IN/OUT) MCS context.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllReaders(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief   Unregister the decoders
+ * @param   pContext            (IN/OUT) MCS context.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllDecoders(M4MCS_Context pContext);
+
+/**
+ ************************************************************************
+ * @brief   Set current writer
+ * @param   pContext            (IN/OUT) MCS context.
+ * @param   mediaType           (IN) Media type.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return  M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:  Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentWriter( M4MCS_Context pContext,
+                                    M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * @brief    Set a video encoder
+ * @param    pContext            (IN/OUT) MCS context.
+ * @param    MediaType           (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentVideoEncoder(   M4MCS_Context pContext,
+                                            M4VIDEOEDITING_VideoFormat mediaType);
+
+/**
+ ************************************************************************
+ * @brief    Set an audio encoder
+ * @param    context            (IN/OUT) MCS context.
+ * @param    MediaType        (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentAudioEncoder(   M4MCS_Context pContext,
+                                            M4VIDEOEDITING_AudioFormat mediaType);
+
+/**
+ ************************************************************************
+ * @brief    Set current reader
+ * @param    pContext            (IN/OUT) MCS context.
+ * @param    mediaType           (IN) Media type.
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:   A parameter is null (in DEBUG only)
+ * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentReader( M4MCS_Context pContext,
+                                    M4VIDEOEDITING_FileType mediaType);
+
+/**
+ ************************************************************************
+ * @brief    Set a video decoder
+ * @param    pContext           (IN/OUT) MCS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:       A parameter is null (in DEBUG only)
+ * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentVideoDecoder(   M4MCS_Context pContext,
+                                            M4_StreamType mediaType);
+
+/**
+ ************************************************************************
+ * @brief    Set an audio decoder
+ * @param    context            (IN/OUT) MCS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:         No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentAudioDecoder(M4MCS_Context pContext, M4_StreamType mediaType);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pContext)
+ * @brief    Check if an effect has to be applied currently
+ * @note     It is called by the stepEncoding function
+ * @param    pContext    (IN)   MCS internal context
+ * @return   M4NO_ERROR:        No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pC);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
+ * @brief    Apply audio effect FadeIn to pPCMdata
+ * @param    pC           (IN/OUT) Internal edit context
+ * @param    pPCMdata     (IN/OUT) Input and Output PCM audio data
+ * @param    uiPCMsize    (IN)     Size of pPCMdata
+ * @param    pProgress    (IN)     Effect progress
+ * @return   M4NO_ERROR:           No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn(  M4OSA_Void *pFunctionContext,
+                                            M4OSA_Int16 *pPCMdata,
+                                            M4OSA_UInt32 uiPCMsize,
+                                            M4MCS_ExternalProgress *pProgress);
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
+ * @brief    Apply audio effect FadeIn to pPCMdata
+ * @param    pC           (IN/OUT) Internal edit context
+ * @param    pPCMdata     (IN/OUT) Input and Output PCM audio data
+ * @param    uiPCMsize    (IN)     Size of pPCMdata
+ * @param    pProgress    (IN)     Effect progress
+ * @return   M4NO_ERROR:           No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut( M4OSA_Void *pFunctionContext,
+                                            M4OSA_Int16 *pPCMdata,
+                                            M4OSA_UInt32 uiPCMsize,
+                                            M4MCS_ExternalProgress *pProgress);
+
+#ifdef TIMESCALE_BUG
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_intParseVideoDSI( )
+ * @brief :  This function parses video DSI and changes writer vop time increment resolution
+ * @note  :  It also calculates the number of bits on which the vop_time_increment is coded
+ *           in the input stream
+ * @param
+ * @return
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_intParseVideoDSI(M4MCS_InternalContext* pC);
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_intChangeAUVideoTimescale( )
+ * @brief
+ * @note
+ * @param    pC           (IN/OUT) Internal edit context
+ * @return
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_intChangeAUVideoTimescale(M4MCS_InternalContext* pC);
+#endif /* TIMESCALE_BUG */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __M4MCS_INTERNALFUNCTIONS_H__ */
+
diff --git a/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h b/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h
new file mode 100755
index 0000000..9611fcf
--- /dev/null
+++ b/libvideoeditor/vss/mcs/inc/M4MCS_InternalTypes.h
@@ -0,0 +1,635 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4MCS_API.h
+ * @brief  MCS internal types and structures definitions
+ * @note   This header file is not public
+ *************************************************************************
+ **/
+
+#ifndef __M4MCS_INTERNALTYPES_H__
+#define __M4MCS_INTERNALTYPES_H__
+
+/**
+ *    MCS public API and types */
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+
+#include "NXPSW_CompilerSwitches.h"
+
+/** Determine absolute value of a. */
+#define M4MCS_ABS(a)               ( ( (a) < (0) ) ? (-(a)) : (a) )
+
+
+#define Y_PLANE_BORDER_VALUE    0x00
+#define U_PLANE_BORDER_VALUE    0x80
+#define V_PLANE_BORDER_VALUE    0x80
+
+
+/**
+ *    Internally used modules */
+#include "M4READER_3gpCom.h"        /**< Read 3GPP file     */
+#include "M4DECODER_Common.h"       /**< Decode video       */
+#include "M4VIFI_FiltersAPI.h"      /**< Video resize       */
+#include "M4AD_Common.h"            /**< Decoder audio      */
+#include "SSRC.h"                   /**< SSRC               */
+#include "From2iToMono_16.h"        /**< Stereo to Mono     */
+#include "MonoTo2I_16.h"            /**< Mono to Stereo     */
+#include "M4ENCODER_AudioCommon.h"  /**< Encode audio       */
+#include "M4WRITER_common.h"        /**< Writer common interface */
+#include "M4ENCODER_common.h"
+
+/**
+ *  Instead of including AAC core properties, it is better to redefine the needed type
+ *  AAC_DEC_STREAM_PROPS
+ *  In case of external AAC decoder, it will be necessary to put this type as public
+ */
+
+/**
+ ******************************************************************************
+ * struct AAC_DEC_STREAM_PROPS
+ * @brief AAC Stream properties
+ * @Note aNoChan and aSampFreq are used for parsing even the user parameters
+ *        are different.  User parameters will be input for the output behaviour
+ *        of the decoder whereas for parsing bitstream properties are used.
+ ******************************************************************************
+ */
+typedef struct {
+  M4OSA_Int32 aAudioObjectType;     /**< Audio object type of the stream - in fact
+                                         the type found in the Access Unit parsed */
+  M4OSA_Int32 aNumChan;             /**< number of channels (=1(mono) or =2(stereo))
+                                         as indicated by input bitstream*/
+  M4OSA_Int32 aSampFreq;            /**< sampling frequency in Hz */
+  M4OSA_Int32 aExtensionSampFreq;   /**< extended sampling frequency in Hz, = 0 is
+                                         no extended frequency */
+  M4OSA_Int32 aSBRPresent;          /**< presence=1/absence=0 of SBR */
+  M4OSA_Int32 aPSPresent;           /**< presence=1/absence=0 of PS */
+  M4OSA_Int32 aMaxPCMSamplesPerCh;  /**< max number of PCM samples per channel */
+} AAC_DEC_STREAM_PROPS;
+
+/**
+ ******************************************************************************
+ * @brief        Codecs registration same as in VPS and VES, so less mapping
+ *              is required toward MCS api types
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4WRITER_GlobalInterface* pGlobalFcts;    /**< open, close, setoption,etc... functions */
+    M4WRITER_DataInterface*    pDataFcts;        /**< data manipulation functions */
+} M4MCS_WriterInterface;
+
+/**
+ ******************************************************************************
+ * enum            M4MCS_States
+ * @brief        Main state machine of the MCS.
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MCS_kState_CREATED,           /**< M4MCS_init has been called                */
+    M4MCS_kState_OPENED,            /**< M4MCS_open has been called                */
+    M4MCS_kState_SET,               /**< All mandatory parameters have been set    */
+    M4MCS_kState_READY,             /**< All optionnal parameters have been set    */
+    M4MCS_kState_BEGINVIDEOJUMP,    /**< Must jump to the Iframe before the begin cut */
+    M4MCS_kState_BEGINVIDEODECODE,  /**< Must decode up to the begin cut        */
+    M4MCS_kState_PROCESSING,        /**< Step can be called                        */
+    M4MCS_kState_PAUSED,            /**< Paused, Resume can be called            */
+    M4MCS_kState_FINISHED,          /**< Transcoding is finished                */
+    M4MCS_kState_CLOSED             /**< Output file has been created            */
+} M4MCS_States;
+
+/**
+ ******************************************************************************
+ * enum            M4MCS_StreamState
+ * @brief        State of a media stream encoding (audio or video).
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4MCS_kStreamState_NOSTREAM  = 0,    /**< No stream present                    */
+    M4MCS_kStreamState_STARTED   = 1,    /**< The stream encoding is in progress */
+    M4MCS_kStreamState_FINISHED  = 2    /**< The stream has finished encoding    */
+} M4MCS_StreamState;
+
+#ifdef TIMESCALE_BUG
+/**
+ ******************************************************************************
+ * enum            M4MCS_VolParse
+ * @brief        VOL parsing results needed for VOP parsing
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4OSA_UInt8 video_object_layer_shape;
+    M4OSA_UInt8 sprite_enable;
+    M4OSA_UInt8 reduced_resolution_vop_enable;
+    M4OSA_UInt8 scalability;
+    M4OSA_UInt8 enhancement_type;
+    M4OSA_UInt8 complexity_estimation_disable;
+    M4OSA_UInt8 interlaced;
+    M4OSA_UInt8 sprite_warping_points;
+    M4OSA_UInt8 sprite_brightness_change;
+    M4OSA_UInt8 quant_precision;
+
+} M4MCS_VolParse;
+#endif
+
+/**
+ ******************************************************************************
+ * enum            anonymous enum
+ * @brief        enum to keep track of the encoder state
+ ******************************************************************************
+ */
+enum
+{
+    M4MCS_kNoEncoder,
+    M4MCS_kEncoderClosed,
+    M4MCS_kEncoderStopped,
+    M4MCS_kEncoderRunning
+};
+
+/**
+ ******************************************************************************
+ * structure    M4MCS_InternalContext
+ * @brief        This structure defines the MCS context (private)
+ * @note        This structure is used for all MCS calls to store the context
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4OSA_UInt32    bitPos;
+                 /* bit count of number of bits used so far */
+
+    M4OSA_UInt8   *streamBuffer;
+                /* Bitstream Buffer */
+
+    M4OSA_UInt32    byteCnt;
+                /* Number of Bytes written in Bitstream buffer*/
+
+    M4OSA_UInt32    currBuff;
+                /* Current buffer holds, 4bytes of bitstream*/
+
+    M4OSA_UInt8   prevByte;
+                /* Previous byte written in the buffer */
+
+    M4OSA_UInt8   prevPrevByte;
+                /* Previous to previous byte written in the buffer */
+
+}NSWAVC_bitStream_t_MCS;
+
+#define _MAXnum_slice_groups  8
+#define _MAXnum_ref_frames_in_pic_order_cnt_cycle  256
+
+typedef struct
+{
+  M4OSA_UInt32  level_idc_index;
+  M4OSA_UInt32  MaxFrameNum;
+  M4OSA_UInt32  expectedDeltaPerPicOrderCntCycle;
+  M4OSA_Int32   MaxPicOrderCntLsb;
+  M4OSA_Int32   max_dec_frame_buffering;
+
+  /* (pic_order_cnt_type == 1) */
+  M4OSA_Int32   offset_for_non_ref_pic;
+  M4OSA_Int32   offset_for_top_to_bottom_field;
+  M4OSA_Int32   frame_crop_left_offset;
+  M4OSA_Int32   frame_crop_right_offset;
+  M4OSA_Int32   frame_crop_top_offset;
+  M4OSA_Int32   frame_crop_bottom_offset;
+  M4OSA_Int32   offset_for_ref_frame[_MAXnum_ref_frames_in_pic_order_cnt_cycle];
+
+  M4OSA_UInt16 PicWidthInMbs;
+  M4OSA_UInt16 FrameHeightInMbs;
+  M4OSA_UInt16  pic_width_in_mbs_minus1;
+  M4OSA_UInt16  pic_height_in_map_units_minus1;
+
+#ifdef _CAP_FMO_
+  M4OSA_UInt16 NumSliceGroupMapUnits;
+  M4OSA_UInt16 MaxPicSizeInMbs;
+#endif /*_CAP_FMO_*/
+
+  M4OSA_UInt8   profile_idc;
+  M4OSA_UInt8   reserved_zero_4bits;
+  M4OSA_UInt8   level_idc;
+  M4OSA_UInt8   seq_parameter_set_id;
+  M4OSA_UInt8   log2_max_frame_num_minus4;
+  M4OSA_UInt8   pic_order_cnt_type;
+  /* if(pic_order_cnt_type == 0) */
+  M4OSA_UInt8   log2_max_pic_order_cnt_lsb_minus4;
+
+  M4OSA_UInt8   num_ref_frames_in_pic_order_cnt_cycle;
+  /* for( i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++ ) */
+  M4OSA_UInt8   num_ref_frames;
+
+  M4OSA_Bool    constraint_set0_flag;
+  M4OSA_Bool    constraint_set1_flag;
+  M4OSA_Bool    constraint_set2_flag;
+  M4OSA_Bool    constraint_set3_flag;
+  M4OSA_Bool    delta_pic_order_always_zero_flag;
+  M4OSA_Bool    gaps_in_frame_num_value_allowed_flag;
+  M4OSA_Bool    frame_mbs_only_flag;
+  M4OSA_Bool    mb_adaptive_frame_field_flag;
+  M4OSA_Bool    direct_8x8_inference_flag;
+  M4OSA_Bool    frame_cropping_flag;
+  M4OSA_Bool    vui_parameters_present_flag;
+  M4OSA_Bool    Active;
+
+  /* vui_seq_parameters_t vui_seq_parameters; */
+} ComSequenceParameterSet_t_MCS;
+
+typedef struct
+{
+  M4OSA_Int16       pic_init_qp_minus26;
+  M4OSA_Int16       pic_init_qs_minus26;
+  M4OSA_Int16       chroma_qp_index_offset;
+
+//#ifdef _CAP_FMO_
+  /* if( slice_group_map_type = = 0 ) */
+  M4OSA_UInt16      run_length_minus1[_MAXnum_slice_groups];
+  /* else if( slice_group_map_type = = 2 ) */
+  M4OSA_UInt16      top_left[_MAXnum_slice_groups];
+  M4OSA_UInt16      bottom_right[_MAXnum_slice_groups];
+  /* else if( slice_group_map_type = = 6 ) */
+  M4OSA_UInt16      pic_size_in_map_units_minus1;
+  M4OSA_UInt16      slice_group_change_rate_minus1;
+
+  M4OSA_UInt16 FirstMbInSliceGroup[_MAXnum_slice_groups];
+  M4OSA_UInt16 LastMbInSliceGroup[_MAXnum_slice_groups];
+
+
+  M4OSA_UInt8  *slice_group_id;
+  M4OSA_UInt8  *MapUnitToSliceGroupMap;
+  M4OSA_UInt8  *MbToSliceGroupMap;
+  M4OSA_UInt16  NumSliceGroupMapUnits;
+
+  M4OSA_UInt8       slice_group_map_type;
+  /* else if( slice_group_map_type = = 3 || 4 || 5 */
+  M4OSA_Bool        slice_group_change_direction_flag;
+  M4OSA_Bool   map_initialized;
+// #endif /*_CAP_FMO_*/
+
+  M4OSA_UInt8       pic_parameter_set_id;
+  M4OSA_UInt8       seq_parameter_set_id;
+  M4OSA_UInt8      num_ref_idx_l0_active_minus1;
+  M4OSA_UInt8      num_ref_idx_l1_active_minus1;
+  M4OSA_UInt8       weighted_bipred_idc;
+  M4OSA_UInt8       num_slice_groups_minus1;
+
+  M4OSA_Bool        entropy_coding_mode_flag;
+  /* if( pic_order_cnt_type < 2 )  in the sequence parameter set */
+  M4OSA_Bool        pic_order_present_flag;
+  M4OSA_Bool        weighted_pred_flag;
+  M4OSA_Bool        deblocking_filter_control_present_flag;
+  M4OSA_Bool        constrained_intra_pred_flag;
+  M4OSA_Bool        redundant_pic_cnt_present_flag;
+  M4OSA_Bool    Active;
+
+  ComSequenceParameterSet_t_MCS *p_active_sps;
+} ComPictureParameterSet_t_MCS;
+
+typedef struct
+{
+      M4OSA_UInt32 bitPos;                /*!< bit position in buffer */
+      M4OSA_UInt32 totalBits;             /*!< bit position in file (total bits read so far) */
+
+      M4OSA_UInt32 lastTotalBits;         /*!< bit position in file of the last VOP */
+      M4OSA_UInt32 numBitsInBuffer;       /*!< number of bits in buffer */
+      M4OSA_UInt32 readableBytesInBuffer; /*!< number of bytes that can be read in decoder buffer*/
+      M4OSA_UInt32 maxBufferSize;         /*!< max buffer size in bit units */
+      M4OSA_UInt8  *Buffer;               /*!< char buffer at reading from file */
+      M4OSA_Int32     i8BitCnt;
+      M4OSA_UInt32     ui32TempBuff;
+      M4OSA_Int8*pui8BfrPtr;
+      M4OSA_UInt32    ui32LastTwoBytes;  /*!< stores the last read two bytes */
+} ComBitStreamMCS_t;
+
+
+typedef struct
+{
+
+    M4OSA_Int32 prev_frame_num;
+    M4OSA_Int32 cur_frame_num;
+    M4OSA_Int32 prev_new_frame_num;
+    M4OSA_Int32 log2_max_frame_num_minus4;
+    M4OSA_Int32 is_done;
+    M4OSA_Int32 is_first;
+    M4OSA_Int32 frame_count;
+    M4OSA_Int32 frame_mod_count;
+    M4OSA_Int32 POC_lsb;
+    M4OSA_Int32 POC_lsb_mod;
+
+
+    M4OSA_UInt32    m_Num_Bytes_NALUnitLength;
+
+    M4OSA_UInt8*    m_pDecoderSpecificInfo;   /**< Pointer on specific information required
+                                                   to create a decoder */
+    M4OSA_UInt32    m_decoderSpecificInfoSize;/**< Size of the specific information pointer above*/
+
+    M4OSA_UInt8*    m_pEncoderSPS;
+    M4OSA_UInt32    m_encoderSPSSize;
+
+    M4OSA_UInt8*    m_pEncoderPPS;
+    M4OSA_UInt32    m_encoderPPSSize;
+
+    M4OSA_UInt8*    m_pFinalDSI;
+    M4OSA_UInt32    m_pFinalDSISize;
+
+    M4OSA_UInt32    m_encoder_SPS_Cnt;
+    ComSequenceParameterSet_t_MCS *p_clip_sps;
+    M4OSA_UInt32    m_encoder_PPS_Cnt;
+    ComPictureParameterSet_t_MCS  *p_clip_pps;
+
+    ComSequenceParameterSet_t_MCS *p_encoder_sps;
+    ComPictureParameterSet_t_MCS  *p_encoder_pps;
+
+
+    ComSequenceParameterSet_t_MCS  encoder_sps;
+    ComPictureParameterSet_t_MCS   encoder_pps;
+    ComSequenceParameterSet_t_MCS  clip_sps;
+
+    /* Encoder SPS parameters */
+    M4OSA_UInt32 enc_seq_parameter_set_id;
+    M4OSA_UInt32 enc_log2_max_frame_num_minus4;
+    M4OSA_UInt32 enc_pic_order_cnt_type;
+    M4OSA_UInt32 enc_log2_max_pic_order_cnt_lsb_minus4; /* applicable when POC type = 0 */
+    M4OSA_UInt32 enc_delta_pic_order_always_zero_flag;
+    M4OSA_Int32 enc_offset_for_non_ref_pic;
+    M4OSA_Int32 enc_offset_for_top_to_bottom_field;
+    M4OSA_UInt32 enc_num_ref_frames_in_pic_order_cnt_cycle; /* range 0 to 255 */
+    /* array of size num_ref_frames_in_pic_order_cnt_cycle */
+    M4OSA_Int32   enc_offset_for_ref_frame[256];
+    M4OSA_UInt32 enc_num_ref_frames;
+    M4OSA_UInt32 enc_gaps_in_frame_num_value_allowed_flag;
+
+
+    /* Input clip SPS parameters */
+    M4OSA_UInt32 clip_seq_parameter_set_id;
+    M4OSA_UInt32 clip_log2_max_frame_num_minus4;
+    M4OSA_UInt32 clip_pic_order_cnt_type;
+    M4OSA_UInt32 clip_log2_max_pic_order_cnt_lsb_minus4; /* applicable when POC type = 0 */
+    M4OSA_UInt32 clip_delta_pic_order_always_zero_flag;
+    M4OSA_Int32  clip_offset_for_non_ref_pic;
+    M4OSA_Int32  clip_offset_for_top_to_bottom_field;
+    M4OSA_UInt32 clip_num_ref_frames_in_pic_order_cnt_cycle; /* range 0 to 255 */
+    /* array of size num_ref_frames_in_pic_order_cnt_cycle */
+    M4OSA_Int32  clip_offset_for_ref_frame[256];
+    M4OSA_UInt32 clip_num_ref_frames;
+    M4OSA_UInt32 clip_gaps_in_frame_num_value_allowed_flag;
+
+    M4OSA_UInt32 final_PPS_ID;
+    M4OSA_UInt32 final_SPS_ID;
+    NSWAVC_bitStream_t_MCS  encbs;
+
+} NSWAVC_MCS_t;
+
+
+
+/**
+ ******************************************************************************
+ * structure    M4MCS_InternalContext
+ * @brief       This structure defines the MCS context (private)
+ * @note        This structure is used for all MCS calls to store the context
+ ******************************************************************************
+ */
+typedef struct
+{
+    /**
+     * MCS State and settings stuff */
+    M4MCS_States            State;     /**< MCS internal state */
+    M4MCS_StreamState       VideoState;/**< State of the video encoding */
+    M4MCS_StreamState       AudioState;/**< State of the audio encoding */
+    M4OSA_Bool              noaudio;/**< Flag to know if we have to deal with audio transcoding */
+    M4OSA_Bool              novideo;/**< Flag to know if we have to deal with video transcoding */
+
+    M4VIDEOEDITING_ClipProperties  InputFileProperties;/**< Input audio/video stream properties */
+    M4OSA_Void*             pInputFile;             /**< Remember input file pointer between fast
+                                                         open and normal open */
+    M4VIDEOEDITING_FileType InputFileType;          /**< Remember input file type between fast
+                                                         open and normal open */
+    M4OSA_Bool              bFileOpenedInFastMode;  /**< Flag to know if a particular reader
+                                                         supports fast open */
+    M4OSA_UInt32            uiMaxMetadataSize;      /**< Limitation on the max acceptable moov
+                                                         size of a 3gpp file */
+
+    M4ENCODER_Format        EncodingVideoFormat;    /**< Output video format, set by the user */
+    M4ENCODER_FrameWidth    EncodingWidth;          /**< Output video width, set by the user */
+    M4ENCODER_FrameHeight   EncodingHeight;         /**< Output video height, set by the user */
+    M4ENCODER_FrameRate     EncodingVideoFramerate; /**< Output video framerate, set by the user*/
+
+    M4OSA_UInt32            uiBeginCutTime;     /**< Begin cut time, in milliseconds */
+    M4OSA_UInt32            uiEndCutTime;       /**< Begin cut time, in milliseconds */
+    M4OSA_UInt32            uiMaxFileSize;      /**< Maximum output file size, in bytes */
+    M4OSA_UInt32            uiAudioBitrate;     /**< Targeted audio bitrate in bps */
+    M4OSA_UInt32            uiVideoBitrate;     /**< Targeted video bitrate in bps */
+
+#ifdef TIMESCALE_BUG
+    M4OSA_UInt32    uiVideoTimescale;     /**< Targeted timescale without decode/encode process */
+    M4OSA_UInt32    uiTimescaleLength;    /**< Length of the VOP time increment in bits */
+    M4OSA_UInt32    uiOrigVideoTimescale; /**< Original timescale */
+    M4OSA_UInt32    uiOrigTimescaleLength;/**< Original length of the VOP time increment in bits*/
+    M4MCS_VolParse  volParsing;           /**< VOL parsing results needed for VOP parsing */
+#endif
+    M4OSA_UInt8     uiProgress;  /**< Progress information saved at each step to be able to
+                                      return it in case of pause */
+
+    /**
+     * Reader stuff */
+    M4OSA_Context           pReaderContext;           /**< Context of the reader module */
+    M4_VideoStreamHandler*  pReaderVideoStream;       /**< Description of the read video stream */
+    M4_AudioStreamHandler*  pReaderAudioStream;       /**< Description of the read audio stream */
+    M4OSA_Bool              bUnsupportedVideoFound;   /**< True if an unsupported video stream
+                                                            type has been found */
+    M4OSA_Bool              bUnsupportedAudioFound;   /**< True if an unsupported audio stream
+                                                            type has been found */
+    M4_AccessUnit           ReaderVideoAU;            /**< Read video access unit */
+    M4_AccessUnit           ReaderVideoAU1;           /**< Read video access unit */
+    M4_AccessUnit           ReaderVideoAU2;           /**< Read video access unit */
+    M4_AccessUnit           ReaderAudioAU;            /**< Read audio access unit */
+    M4_AccessUnit           ReaderAudioAU1;           /**< Read audio access unit */
+    M4_AccessUnit           ReaderAudioAU2;           /**< Read audio access unit */
+    M4OSA_MemAddr8          m_pDataAddress1;          /**< Temporary buffer for Access Unit */
+    M4OSA_MemAddr8          m_pDataAddress2;          /**< Temporary buffer for Access Unit */
+    M4OSA_MemAddr8          m_pDataVideoAddress1;     /**< Temporary buffer for Access Unit */
+    M4OSA_MemAddr8          m_pDataVideoAddress2;     /**< Temporary buffer for Access Unit */
+    M4OSA_UInt32            m_audioAUDuration;        /**< Audio AU duration */
+    M4OSA_Int32             iAudioCtsOffset;          /**< Audio AU CTS offset due to begin cut */
+
+    /**
+     * Video decoder stuff */
+    M4OSA_Context         pViDecCtxt;         /**< Video decoder context */
+    M4OSA_Double          dViDecStartingCts;  /**< Video CTS at which the decode/encode will start
+                                                   (used for begin cut and pause/resume) */
+    M4OSA_Double          dViDecCurrentCts;   /**< Video CTS to decode */
+    M4OSA_Int32           iVideoBeginDecIncr; /**< CTS step for the begin cut decode (doesn't
+                                                    need floating point precision) */
+    M4OSA_Double          dCtsIncrement;      /**< Cts increment from one video frame to another*/
+    M4OSA_Bool            isRenderDup;        /**< To handle duplicate frame rendering in case of
+                                                    external decoding */
+    M4VIFI_ImagePlane*    lastDecodedPlane;   /**< Last decoded plane */
+
+    /**
+     * Video encoder stuff */
+    M4OSA_Context         pViEncCtxt;         /**< Video encoder context */
+    M4VIFI_ImagePlane*    pPreResizeFrame;    /**< The decoded image before resize
+                                                  (allocated if resize needed only)*/
+    M4OSA_UInt32          uiEncVideoBitrate;  /**< Actual video bitrate for the video encoder */
+    M4OSA_Bool            bActivateEmp;    /**< Encode in Mpeg4 format with limitations for EMP */
+    M4OSA_UInt32          outputVideoTimescale;
+    M4OSA_UInt32          encoderState;
+
+    /**
+     * Audio decoder stuff */
+    M4OSA_Context         pAudioDecCtxt;        /**< Audio (AAC) decoder context */
+    M4AD_Buffer           AudioDecBufferIn;     /**< Input structure for the audio decoder */
+    M4AD_Buffer           AudioDecBufferOut;    /**< Output structure for the audio decoder */
+    M4OSA_MemAddr8        pPosInDecBufferOut;   /**< Position into the decoder buffer */
+    AAC_DEC_STREAM_PROPS  AacProperties;   /**< Structure for new api to get AAC properties */
+
+    /**
+     * Sample Rate Convertor (SSRC) stuff */
+    SSRC_Instance_t        SsrcInstance;       /**< Context of the Ssrc */
+    SSRC_Scratch_t*        SsrcScratch;        /**< Working memory of the Ssrc */
+    short                  iSsrcNbSamplIn;     /**< Number of sample the Ssrc needs as input */
+    short                  iSsrcNbSamplOut;    /**< Number of sample the Ssrc outputs */
+    M4OSA_MemAddr8         pSsrcBufferIn;      /**< Input of the SSRC */
+    M4OSA_MemAddr8         pSsrcBufferOut;     /**< Output of the SSRC */
+    M4OSA_MemAddr8         pPosInSsrcBufferIn; /**< Position into the SSRC in buffer */
+    M4OSA_MemAddr8         pPosInSsrcBufferOut;/**< Position into the SSRC out buffer */
+
+    M4OSA_Int32            *pLVAudioResampler;
+
+
+    /**
+     * audio encoder stuff */
+    M4OSA_Context                   pAudioEncCtxt; /**< Context of the audio encoder */
+    M4ENCODER_AudioDecSpecificInfo  pAudioEncDSI; /**< Decoder specific info built by the encoder*/
+    M4ENCODER_AudioParams           AudioEncParams;/**< Config of the audio encoder */
+    M4OSA_MemAddr8            pAudioEncoderBuffer;      /**< Input of the encoder */
+    M4OSA_MemAddr8            pPosInAudioEncoderBuffer; /**< Position into the encoder buffer */
+    M4OSA_UInt32              audioEncoderGranularity;  /**< Minimum number of pcm samples needed
+                                                             to feed audio encoder */
+
+    /**
+     * Writer stuff */
+    M4OSA_Context             pWriterContext;     /**< Context of the writer module */
+    M4OSA_Void*               pOutputFile;        /**< Output file to be created */
+    M4OSA_Void*               pTemporaryFile;     /**< Temporary file to be created to store
+                                                        metadata ("moov.bin") */
+    M4SYS_StreamDescription   WriterVideoStream;  /**< Description of the written video stream */
+    M4SYS_StreamDescription   WriterAudioStream;  /**< Description of the written audio stream */
+    M4WRITER_StreamVideoInfos WriterVideoStreamInfo;/**< Video properties of the written video
+                                                          stream */
+    M4SYS_AccessUnit          WriterVideoAU;        /**< Written video access unit */
+    M4SYS_AccessUnit          WriterAudioAU;        /**< Written audio access unit */
+    M4OSA_UInt32              uiVideoAUCount;       /**< Number of video AU written in output
+                                                          file */
+    M4OSA_UInt32              uiVideoMaxAuSize;     /**< Max access unit size for the output
+                                                          video stream */
+    M4OSA_UInt32              uiVideoMaxChunckSize; /**< Max chunck size for the output video
+                                                          stream */
+    M4OSA_UInt32              uiAudioAUCount;   /**< Number of audio AU written in output file */
+    M4OSA_UInt32              uiAudioMaxAuSize; /**< Max access unit size for the output
+                                                       audio stream */
+    M4OSA_UInt32              uiAudioCts;       /**< Audio AU cts (when audio is transcoded) */
+    M4OSA_Bool                b_isRawWriter;    /**< Boolean to know if the raw writer is
+                                                      registered or not */
+    M4OSA_Context             pOutputPCMfile;   /**< Output PCM file if not NULL */
+
+    /**
+     * Filesystem functions */
+    M4OSA_FileReadPointer*    pOsaFileReadPtr; /**< OSAL file read functions,
+                                                    to be provided by user */
+    M4OSA_FileWriterPointer*  pOsaFileWritPtr; /**< OSAL file write functions,
+                                                    to be provided by user */
+
+    /**
+      * Media and Codec registration */
+    /**< Table of M4VES_WriterInterface structures for avalaible Writers list */
+    M4MCS_WriterInterface               WriterInterface[M4WRITER_kType_NB];
+    /**< open, close, setoption,etc... functions of the used writer*/
+    M4WRITER_GlobalInterface*           pWriterGlobalFcts;
+    /**< data manipulation functions of the used writer */
+    M4WRITER_DataInterface*             pWriterDataFcts;
+    /**< Table of M4ENCODER_GlobalInterface structures for avalaible encoders list */
+    M4ENCODER_GlobalInterface*          pVideoEncoderInterface[M4ENCODER_kVideo_NB];
+    /**< Functions of the used encoder */
+    M4ENCODER_GlobalInterface*          pVideoEncoderGlobalFcts;
+
+    M4OSA_Void*                         pVideoEncoderExternalAPITable[M4ENCODER_kVideo_NB];
+    M4OSA_Void*                         pCurrentVideoEncoderExternalAPI;
+    M4OSA_Void*                         pVideoEncoderUserDataTable[M4ENCODER_kVideo_NB];
+    M4OSA_Void*                         pCurrentVideoEncoderUserData;
+
+    /**< Table of M4ENCODER_AudioGlobalInterface structures for avalaible encoders list */
+    M4ENCODER_AudioGlobalInterface*     pAudioEncoderInterface[M4ENCODER_kAudio_NB];
+    /**< Table of internal/external flags for avalaible encoders list */
+    M4OSA_Bool                          pAudioEncoderFlag[M4ENCODER_kAudio_NB];
+    /**< Functions of the used encoder */
+    M4ENCODER_AudioGlobalInterface*     pAudioEncoderGlobalFcts;
+    M4OSA_Void*                         pAudioEncoderUserDataTable[M4ENCODER_kAudio_NB];
+    M4OSA_Void*                         pCurrentAudioEncoderUserData;
+
+    M4READER_GlobalInterface*           m_pReaderGlobalItTable[M4READER_kMediaType_NB];
+    M4READER_DataInterface*             m_pReaderDataItTable[M4READER_kMediaType_NB];
+    M4READER_GlobalInterface*           m_pReader;
+    M4READER_DataInterface*             m_pReaderDataIt;
+    M4OSA_UInt8                         m_uiNbRegisteredReaders;
+
+    M4DECODER_VideoInterface*           m_pVideoDecoder;
+    M4DECODER_VideoInterface*           m_pVideoDecoderItTable[M4DECODER_kVideoType_NB];
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    M4OSA_Void*                         m_pCurrentVideoDecoderUserData;
+    M4OSA_Void*                         m_pVideoDecoderUserDataTable[M4DECODER_kVideoType_NB];
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+    M4OSA_UInt8                         m_uiNbRegisteredVideoDec;
+
+    M4AD_Interface*         m_pAudioDecoder;
+    M4AD_Interface*         m_pAudioDecoderItTable[M4AD_kType_NB];
+    M4OSA_Bool              m_pAudioDecoderFlagTable[M4AD_kType_NB]; /**< store indices of external
+                                                                      decoders */
+    M4OSA_Void*             m_pAudioDecoderUserDataTable[M4AD_kType_NB];
+    M4OSA_Void*             m_pCurrentAudioDecoderUserData;
+
+    M4MCS_MediaRendering    MediaRendering;     /**< FB: to crop, resize, or render black borders*/
+    M4OSA_Context           m_air_context;
+    M4OSA_Bool              bExtOMXAudDecoder;  /* External OMX Audio decoder */
+
+    /**< FlB 2009.03.04: Audio effects*/
+    M4MCS_EffectSettings    *pEffects;              /**< List of effects */
+    M4OSA_UInt8             nbEffects;              /**< Number of effects in the above list */
+    M4OSA_Int8              pActiveEffectNumber;    /**< Effect ID to be applied, if -1,
+                                                       no effect has to be applied currently*/
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+    M4OSA_Bool              m_bIsStillPicture;       /**< =TRUE if input file is a still picture
+                                                        (JPEG, PNG, BMP, GIF)*/
+    M4MCS_Context           m_pStillPictureContext; /**< Context of the still picture part of MCS*/
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+    NSWAVC_MCS_t            *m_pInstance;
+    M4OSA_UInt8             *H264MCSTempBuffer;
+    M4OSA_UInt32            H264MCSTempBufferSize;
+    M4OSA_UInt32            H264MCSTempBufferDataSize;
+    M4OSA_Bool              bH264Trim;
+    /* Flag when to get  lastdecodedframeCTS */
+    M4OSA_Bool              bLastDecodedFrameCTS;
+
+} M4MCS_InternalContext;
+
+
+#endif /* __M4MCS_INTERNALTYPES_H__ */
+
diff --git a/libvideoeditor/vss/mcs/src/Android.mk b/libvideoeditor/vss/mcs/src/Android.mk
new file mode 100755
index 0000000..4c1f948
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/Android.mk
@@ -0,0 +1,67 @@
+#
+# Copyright (C) 2011 NXP Software
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvideoeditor_mcs
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_mcs
+
+LOCAL_SRC_FILES:=          \
+      M4MCS_API.c \
+      M4MCS_AudioEffects.c \
+      M4MCS_BitstreamParser.c \
+      M4MCS_Codecs.c \
+      M4MCS_MediaAndCodecSubscription.c \
+      M4MCS_VideoPreProcessing.c
+
+LOCAL_MODULE_TAGS := development
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+    libvideoeditor_osal
+
+LOCAL_C_INCLUDES += \
+    $(TOP)/frameworks/media/libvideoeditor/osal/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/mcs/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/common/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/stagefrightshells/inc
+
+ifeq ($(TARGET_SIMULATOR),true)
+else
+    LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+    -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+    -DM4MCS_WITH_FAST_OPEN
+
+
+# Don't prelink this library.  For more efficient code, you may want
+# to add this library to the prelink map and set this to true.
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_API.c b/libvideoeditor/vss/mcs/src/M4MCS_API.c
new file mode 100755
index 0000000..bc60688
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_API.c
@@ -0,0 +1,11604 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *************************************************************************
+ * @file   M4MCS_API.c
+ * @brief  MCS implementation (Video Compressor Service)
+ * @note   This file implements the API and the processing of the MCS
+ *************************************************************************
+ **/
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h"  /**< OSAL debug management */
+
+/* PCM samples */
+#include "gLVAudioResampler.h"
+/**
+ * Decoder interface */
+#include "M4DECODER_Common.h"
+
+/* Encoder interface*/
+#include "M4ENCODER_common.h"
+
+/* Enable for DEBUG logging */
+//#define MCS_DUMP_PCM_TO_FILE
+#ifdef MCS_DUMP_PCM_TO_FILE
+#include <stdio.h>
+FILE *file_au_reader = NULL;
+FILE *file_pcm_decoder = NULL;
+FILE *file_pcm_encoder = NULL;
+#endif
+
+/* Core headers */
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+#include "M4MCS_InternalTypes.h"
+#include "M4MCS_InternalConfig.h"
+#include "M4MCS_InternalFunctions.h"
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+#include "M4MCS_StillPicture.h"
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+/* Common headers (for aac) */
+#include "M4_Common.h"
+
+#include "NXPSW_CompilerSwitches.h"
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#include "M4AIR_API.h"
+
+/* Version */
+#define M4MCS_VERSION_MAJOR 3
+#define M4MCS_VERSION_MINOR 4
+#define M4MCS_VERSION_REVISION  3
+
+/**
+ ********************************************************************
+ * Static local functions
+ ********************************************************************
+ */
+
+static M4OSA_ERR M4MCS_intStepSet( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareVideoDecoder(
+                                    M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareVideoEncoder(
+                                    M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareAudioProcessing(
+                                    M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareWriter( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intPrepareAudioBeginCut(
+                                    M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intStepEncoding(
+                                    M4MCS_InternalContext *pC,
+                                    M4OSA_UInt8 *pTranscodedTime );
+static M4OSA_ERR M4MCS_intStepBeginVideoJump(
+                                    M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intStepBeginVideoDecode(
+                                    M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intAudioNullEncoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intAudioTranscoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intVideoNullEncoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intVideoTranscoding( M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intGetInputClipProperties(
+                                    M4MCS_InternalContext   *pContext );
+static M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB(
+                                    M4OSA_MemAddr8 pAudioFrame );
+static M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC(
+                                    M4OSA_MemAddr8 pAudioFrame );
+static M4OSA_ERR M4MCS_intCheckMaxFileSize( M4MCS_Context pContext );
+static M4VIDEOEDITING_Bitrate M4MCS_intGetNearestBitrate(
+                                    M4OSA_Int32 freebitrate,
+                                    M4OSA_Int8 mode );
+static M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders(
+                                    M4MCS_InternalContext *pC );
+static M4OSA_ERR M4MCS_intReallocTemporaryAU(
+                                    M4OSA_MemAddr8 *addr,
+                                    M4OSA_UInt32 newSize );
+
+/**
+ **********************************************************************
+ * External function used only by VideoEditor and that does not appear
+ * in the API
+ **********************************************************************
+ */
+
+M4OSA_ERR M4MCS_open_normalMode( M4MCS_Context pContext,
+                                 M4OSA_Void *pFileIn,
+                                 M4VIDEOEDITING_FileType InputFileType,
+                                 M4OSA_Void *pFileOut,
+                                 M4OSA_Void *pTempFile );
+
+/* All errors are fatal in the MCS */
+#define M4ERR_CHECK_RETURN(err) if(M4NO_ERROR!=err) return err;
+
+/* A define used with SSRC 1.04 and above to avoid taking blocks smaller
+ * that the minimal block size
+ */
+#define M4MCS_SSRC_MINBLOCKSIZE        100
+
+static M4OSA_UChar Tab_MCS[8] =
+{
+    17, 5, 3, 3, 1, 1, 1, 1
+};
+
+M4OSA_ERR H264MCS_Getinstance( NSWAVC_MCS_t ** instance )
+{
+    NSWAVC_MCS_t *p_bs = M4OSA_NULL;
+    M4OSA_ERR err = M4NO_ERROR;
+    p_bs = (NSWAVC_MCS_t *)M4OSA_malloc(sizeof(NSWAVC_MCS_t), M4MCS,
+        (M4OSA_Char *)"NSWAVC_MCS_t");
+
+    if( M4OSA_NULL == p_bs )
+    {
+        M4OSA_TRACE1_0("H264MCS_Getinstance: allocation error");
+        return M4ERR_ALLOC;
+    }
+
+    p_bs->prev_frame_num = 0;
+    p_bs->cur_frame_num = 0;
+    p_bs->log2_max_frame_num_minus4 = 0;
+    p_bs->prev_new_frame_num = 0;
+    p_bs->is_done = 0;
+    p_bs->is_first = 1;
+
+    p_bs->m_pDecoderSpecificInfo = M4OSA_NULL;
+    p_bs->m_decoderSpecificInfoSize = 0;
+
+    p_bs->m_pEncoderSPS = M4OSA_NULL;
+    p_bs->m_encoderSPSSize = 0;
+
+    p_bs->m_pEncoderPPS = M4OSA_NULL;
+    p_bs->m_encoderPPSSize = 0;
+
+    p_bs->m_pFinalDSI = M4OSA_NULL;
+    p_bs->m_pFinalDSISize = 0;
+
+    p_bs->p_clip_sps = M4OSA_NULL;
+    p_bs->m_encoder_SPS_Cnt = 0;
+
+    p_bs->p_clip_pps = M4OSA_NULL;
+    p_bs->m_encoder_PPS_Cnt = 0;
+
+    p_bs->p_encoder_sps = M4OSA_NULL;
+    p_bs->p_encoder_pps = M4OSA_NULL;
+
+    p_bs->encoder_pps.slice_group_id = M4OSA_NULL;
+
+    *instance = (NSWAVC_MCS_t *)p_bs;
+    return err;
+}
+
+M4OSA_UInt32 H264MCS_getBits( ComBitStreamMCS_t *p_bs, M4OSA_UInt32 numBits )
+{
+    M4OSA_UInt32 ui32RetBits;
+    M4OSA_UInt8 *pbs;
+    M4OSA_Int32 bcnt;
+    p_bs->i8BitCnt -= numBits;
+    bcnt = p_bs->i8BitCnt;
+
+    /* Measure the quantity of bits to be read in ui32TempBuff */
+    ui32RetBits = p_bs->ui32TempBuff >> (32 - numBits);
+
+    /* Read numBits in ui32TempBuff */
+    p_bs->ui32TempBuff <<= numBits;
+    p_bs->bitPos += numBits;
+
+    if( bcnt > 24 )
+    {
+        return (ui32RetBits);
+    }
+    else
+    { /* at least one byte can be buffered in ui32TempBuff */
+        pbs = (M4OSA_UInt8 *)p_bs->pui8BfrPtr;
+
+        if( bcnt < (int)(p_bs->numBitsInBuffer - p_bs->bitPos) )
+        { /* not enough remaining bits in ui32TempBuff: need to be filled */
+            do
+            {
+                /* On the fly detection of EPB byte */
+                if( ( *(pbs) == 0x03)
+                    && (!(( pbs[-1])
+                    | (pbs[-2])))) //(p_bs->ui32LastTwoBytes & 0x0000FFFF) == 0)
+                {
+                    /* EPB byte found: skip it and update bitPos accordingly */
+                            (pbs)++;
+                            p_bs->bitPos += 8;
+                        }
+
+                        p_bs->ui32TempBuff |= *(pbs)++ << (24 - bcnt);
+                        bcnt += 8;
+            } while ( bcnt <= 24 );
+
+            p_bs->pui8BfrPtr = (M4OSA_Int8 *)pbs;
+            p_bs->i8BitCnt = bcnt;
+            return (ui32RetBits);
+        }
+    }
+
+    if( p_bs->bitPos <= p_bs->numBitsInBuffer )
+    {
+        return (ui32RetBits);
+    }
+    else
+    {
+        return (0);
+    }
+}
+
+M4OSA_Void H264MCS_flushBits( ComBitStreamMCS_t *p_bs, M4OSA_UInt32 numBits )
+{
+    M4OSA_UInt8 *pbs;
+    M4OSA_UInt32 bcnt;
+    p_bs->i8BitCnt -= numBits;
+    bcnt = p_bs->i8BitCnt;
+
+    p_bs->ui32TempBuff <<= numBits;
+    p_bs->bitPos += numBits;
+
+    if( bcnt > 24 )
+    {
+        return;
+    }
+    else
+    { /* at least one byte can be buffered in ui32TempBuff */
+        pbs = (M4OSA_UInt8 *)p_bs->pui8BfrPtr;
+
+        if( bcnt < (p_bs->numBitsInBuffer - p_bs->bitPos) )
+        {   /* Not enough remaining bits in ui32TempBuff: need to be filled */
+            do
+            {
+                /*  On the fly detection of EPB byte */
+                if( ( *(pbs) == 0x03) && (!(( pbs[-1]) | (pbs[-2]))) )
+                { /* JC: EPB byte found: skip it and update bitPos accordingly */
+                    (pbs)++;
+                    p_bs->bitPos += 8;
+                }
+                p_bs->ui32TempBuff |= *(pbs)++ << (24 - bcnt);
+                bcnt += 8;
+            } while ( bcnt <= 24 );
+
+            p_bs->pui8BfrPtr = (M4OSA_Int8 *)pbs;
+            p_bs->i8BitCnt = bcnt;
+        }
+    }
+
+    return;
+}
+
+M4OSA_UInt32 H264MCS_DecVLCReadExpGolombCode( ComBitStreamMCS_t *p_bs )
+{
+    M4OSA_UInt32 code, l0 = 0, l1;
+    /* Reading 32 Bits from local cache buffer of Bitstream structure*/
+    code = p_bs->ui32TempBuff;
+
+    /* Checking in first 3 bits*/
+    if( code >> 29 )
+    {
+        l0 = Tab_MCS[(code >> 29)];
+        code = code >> (32 - l0);
+        H264MCS_flushBits(p_bs, l0);
+    }
+    else
+        {
+            if( code )
+            {
+                code <<= 3;
+
+                for ( l0 = 3; code < 0x80000000; code <<= 1, l0++ );
+
+                if( l0 < 16 ) /*all useful bits are inside the 32 bits read */
+                {
+                    code = code >> (31 - l0);
+                    H264MCS_flushBits(p_bs, 2 * l0 + 1);
+                }
+                else
+            { /* Read the useful bits in 2 parts */
+                    l1 = ( l0 << 1) - 31;
+                    code >>= l0;
+                    H264MCS_flushBits(p_bs, 32);
+                    code = ( code << l1) | H264MCS_getBits(p_bs, l1);
+                }
+            }
+            else
+            {
+                H264MCS_flushBits(p_bs, 32);
+
+                if( H264MCS_getBits(p_bs, 1) )
+                {
+                    /* if number of leading 0's is 32, the only code allowed is 1 followed
+                    by 32 0's */
+
+                    /*reading 32 more bits from bitstream buffer*/
+                    code = H264MCS_getBits(p_bs, 32);
+
+                    if( code == 0 )
+                    {
+                        return (code - 1);
+                    }
+                }
+                /*if number of leading 0's is >32, then symbol is >32 bits,
+                which is an error */
+                //p_bs->state = _BS_ERR;
+                //p_bs->flags |= _BF_SYM_ERR;
+                return (0);
+            }
+        }
+
+        if( 1 ) //(p_bs->state == _BS_OK)
+        {
+            return (code - 1);
+        }
+        else
+        {
+            return (0);
+        }
+    }
+
+M4OSA_Int32 H264MCS_DecVLCReadSignedExpGolombCode( ComBitStreamMCS_t *p_bs )
+{
+    M4OSA_Int32 codeNo, ret;
+
+    /* read the unsigned code number */
+    codeNo = H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+    /* map to the signed value, if value is odd then it's positive,
+    if even then it's negative, formula is (-1)^(k+1)*CEIL(k/2) */
+
+    ret = (codeNo & 0x01) ? (( codeNo + 1) >> 1) : (( -codeNo) >> 1);
+
+    return ret;
+}
+
+M4OSA_Void DecBitStreamReset_MCS( ComBitStreamMCS_t *p_bs,
+                                 M4OSA_UInt32 bytes_read )
+{
+    p_bs->bitPos = 0;
+
+    p_bs->lastTotalBits = 0;
+    p_bs->numBitsInBuffer = bytes_read << 3;
+    p_bs->readableBytesInBuffer = bytes_read;
+    //p_bs->state = M4NO_ERROR;//_BS_OK;
+    //p_bs->flags = 0;
+
+    p_bs->ui32TempBuff = 0;
+    p_bs->i8BitCnt = 0;
+    p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+    p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+    H264MCS_getBits(p_bs, 0);
+}
+
+M4OSA_ERR NSWAVCMCS_initBitstream( NSWAVC_bitStream_t_MCS *bS )
+{
+    bS->bitPos = 0;
+    bS->byteCnt = 0;
+    bS->currBuff = 0;
+    bS->prevByte = 0xff;
+    bS->prevPrevByte = 0xff;
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_putBits( NSWAVC_bitStream_t_MCS *bS, M4OSA_UInt32 value,
+                            M4OSA_UInt8 length )
+{
+    M4OSA_UInt32 maskedValue = 0, temp = 0;
+    M4OSA_UInt8 byteOne;
+
+    M4OSA_UInt32 len1 = (length == 32) ? 31 : length;
+
+    if( !(length) )
+    {
+        /* Length = 0, return OK*/
+        return M4NO_ERROR;
+    }
+
+    maskedValue = (M4OSA_UInt32)(value &(( 1 << len1) - 1));
+
+    if( 32 > (length + bS->bitPos) )
+    {
+        bS->bitPos += length;
+        bS->currBuff |= maskedValue << (32 - bS->bitPos);
+    }
+    else
+    {
+        temp = (( bS->bitPos + length) - 32);
+
+        bS->currBuff |= (maskedValue >> (temp));
+
+        byteOne =
+            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+        byteOne = bS->streamBuffer[bS->byteCnt++] =
+            (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+        byteOne = bS->streamBuffer[bS->byteCnt++] =
+            (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+        byteOne = bS->streamBuffer[bS->byteCnt++] =
+            (M4OSA_UInt8)((bS->currBuff) &0xff);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+
+        bS->currBuff = 0;
+
+        bS->currBuff |= ( maskedValue &(( 1 << temp) - 1)) << (32 - temp);
+
+        bS->bitPos = temp;
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_putBit( NSWAVC_bitStream_t_MCS *bS, M4OSA_UInt32 value )
+{
+    M4OSA_UInt32 maskedValue = 0, temp = 0;
+    M4OSA_UInt8 byteOne;
+
+    maskedValue = (value ? 1 : 0);
+
+    if( 32 > (1 + bS->bitPos) )
+    {
+        bS->bitPos += 1;
+        bS->currBuff |= maskedValue << (32 - bS->bitPos);
+    }
+    else
+    {
+        temp = 0;
+
+        bS->currBuff |= (maskedValue);
+
+        /* writing it to memory*/
+        byteOne =
+            bS->streamBuffer[bS->byteCnt++] =
+            (M4OSA_UInt8)(bS->currBuff >> 24);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+        byteOne = bS->streamBuffer[bS->byteCnt++] =
+            (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+        byteOne = bS->streamBuffer[bS->byteCnt++] =
+            (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+        byteOne = bS->streamBuffer[bS->byteCnt++] =
+            (M4OSA_UInt8)((bS->currBuff) &0xff);
+
+        if( (( bS->prevPrevByte
+            == 0) & (bS->prevByte == 0) & (!(byteOne & 0xFC))) )
+        {
+            bS->byteCnt -= 1;
+            bS->prevPrevByte = bS->streamBuffer[bS->byteCnt++] = 0x03;
+            bS->prevByte = bS->streamBuffer[bS->byteCnt++] = byteOne;
+        }
+        else
+        {
+            bS->prevPrevByte = bS->prevByte;
+            bS->prevByte = byteOne;
+        }
+        bS->currBuff = 0;
+        bS->bitPos = 0;
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_Int32 NSWAVCMCS_putRbspTbits( NSWAVC_bitStream_t_MCS *bS )
+{
+    M4OSA_UInt8 trailBits = 0;
+    M4OSA_UInt8 byteCnt = 0;
+
+    trailBits = (M4OSA_UInt8)(bS->bitPos % 8);
+
+    /* Already in the byte aligned position,
+    RBSP trailing bits will be 1000 0000 */
+    if( 0 == trailBits )
+    {
+        trailBits = (1 << 7);
+        NSWAVCMCS_putBits(bS, trailBits, 8);
+    }
+    else
+    {
+        trailBits = (8 - trailBits);
+        NSWAVCMCS_putBit(bS, 1);
+        trailBits--;
+
+        if( trailBits )
+        { /* put trailBits times zeros */
+            NSWAVCMCS_putBits(bS, 0, trailBits);
+        }
+    }
+
+    /* For writting the currBuff in streamBuff 4byte alignment is required*/
+    byteCnt = (M4OSA_UInt8)(( bS->bitPos + 4) / 8);
+
+    switch( byteCnt )
+    {
+        case 1:
+            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+            break;
+
+        case 2:
+            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+            bS->streamBuffer[bS->byteCnt++] =
+                (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+            break;
+
+        case 3:
+            bS->streamBuffer[bS->byteCnt++] = (M4OSA_UInt8)(bS->currBuff >> 24);
+            bS->streamBuffer[bS->byteCnt++] =
+                (M4OSA_UInt8)(( bS->currBuff >> 16) & 0xff);
+            bS->streamBuffer[bS->byteCnt++] =
+                (M4OSA_UInt8)(( bS->currBuff >> 8) & 0xff);
+
+            break;
+
+        default:
+            /* It will not come here */
+            break;
+    }
+
+    //    bS->bitPos =0;
+    //    bS->currBuff = 0;
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_uExpVLC( NSWAVC_bitStream_t_MCS *bS, M4OSA_Int32 codeNum )
+{
+
+    M4OSA_Int32 loop, temp;
+    M4OSA_Int32 data = 0;
+    M4OSA_UInt8 codeLen = 0;
+
+    /* The codeNum cannot be less than zero for this ue(v) */
+    if( codeNum < 0 )
+    {
+        return 0;
+    }
+
+    /* Implementation for Encoding of the Table 9-1 in the Standard */
+    temp = codeNum + 1;
+
+    for ( loop = 0; temp != 0; loop++ )
+    {
+        temp /= 2;
+    }
+
+    codeLen = (( loop * 2) - 1);
+
+    data = codeNum + 1;
+
+    NSWAVCMCS_putBits(bS, data, codeLen);
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR NSWAVCMCS_sExpVLC( NSWAVC_bitStream_t_MCS *bS, M4OSA_Int32 codeNum )
+{
+
+    M4OSA_Int32 loop, temp1, temp2;
+    M4OSA_Int32 data = 0;
+    M4OSA_UInt8 codeLen = 0, isPositive = 0;
+    M4OSA_UInt32 abscodeNum;
+
+    if( codeNum > 0 )
+    {
+        isPositive = 1;
+    }
+
+    if( codeNum > 0 )
+    {
+        abscodeNum = codeNum;
+    }
+    else
+    {
+        abscodeNum = -codeNum;
+    }
+
+    temp1 = ( ( ( abscodeNum) << 1) - isPositive) + 1;
+    temp2 = temp1;
+
+    for ( loop = 0; loop < 16 && temp2 != 0; loop++ )
+    {
+        temp2 /= 2;
+    }
+
+    codeLen = ( loop * 2) - 1;
+
+    data = temp1;
+
+    NSWAVCMCS_putBits(bS, data, codeLen);
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR H264MCS_ProcessEncodedNALU(   M4OSA_Void *ainstance,
+                                        M4OSA_UInt8 *inbuff,
+                                        M4OSA_Int32 inbuf_size,
+                                        M4OSA_UInt8 *outbuff,
+                                        M4OSA_Int32 *outbuf_size )
+{
+    ComBitStreamMCS_t *p_bs, bs;
+    NSWAVC_MCS_t *instance;
+    M4OSA_UInt8 nalu_info;
+    M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
+    M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id, frame_num;
+    M4OSA_Int32 seq_parameter_set_id;
+    M4OSA_UInt8 temp1, temp2, temp3, temp4;
+    M4OSA_Int32 temp_frame_num;
+    M4OSA_Int32 bitstoDiacard, bytes;
+    M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
+    M4OSA_Int32 new_bytes, init_bit_pos;
+    M4OSA_UInt32 nal_size;
+    M4OSA_UInt32 cnt;
+    M4OSA_UInt32 outbuffpos = 0;
+    M4OSA_UInt32 nal_size_low16, nal_size_high16;
+    M4OSA_UInt32 frame_size = 0;
+    M4OSA_UInt32 temp = 0;
+
+    // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
+    M4OSA_Int8 *pTmpBuff1 = M4OSA_NULL;
+    M4OSA_Int8 *pTmpBuff2 = M4OSA_NULL;
+
+    p_bs = &bs;
+    instance = (NSWAVC_MCS_t *)ainstance;
+
+    M4OSA_TRACE1_2(
+        "In  H264MCS_ProcessEncodedNALU with FrameSize = %d  inBuf_Size=%d",
+        frame_size, inbuf_size);
+
+    // StageFright codecs may add a start code, make sure it is not present
+
+    if( !M4OSA_memcmp((M4OSA_MemAddr8)inbuff,
+        (M4OSA_MemAddr8)"\x00\x00\x00\x01", 4) )
+    {
+        M4OSA_TRACE1_3(
+            "H264MCS_ProcessNALU ERROR : NALU start code has not been removed %d "
+            "0x%X 0x%X", inbuf_size, ((M4OSA_UInt32 *)inbuff)[0],
+            ((M4OSA_UInt32 *)inbuff)[1]);
+
+        return M4ERR_PARAMETER;
+    }
+
+    // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
+    pTmpBuff1 = (M4OSA_Int8 *)M4OSA_malloc(inbuf_size + 4, M4MCS,
+        (M4OSA_Char *)"tmpNALU");
+    M4OSA_memcpy((M4OSA_MemAddr8)(pTmpBuff1 + 4), (M4OSA_MemAddr8)inbuff,
+        inbuf_size);
+    pTmpBuff1[3] = ( (M4OSA_UInt32)inbuf_size) & 0x000000FF;
+    pTmpBuff1[2] = ( (M4OSA_UInt32)inbuf_size >> 8) & 0x000000FF;
+    pTmpBuff1[1] = ( (M4OSA_UInt32)inbuf_size >> 16) & 0x000000FF;
+    pTmpBuff1[0] = ( (M4OSA_UInt32)inbuf_size >> 24) & 0x000000FF;
+    pTmpBuff2 = (M4OSA_Int8 *)inbuff;
+    inbuff = (M4OSA_UInt8 *)pTmpBuff1;
+    inbuf_size += 4;
+
+    // Make sure the available size was set
+    if( inbuf_size >= *outbuf_size )
+    {
+        M4OSA_TRACE1_1(
+            "!!! H264MCS_ProcessNALU ERROR : specified available size is incorrect %d ",
+            *outbuf_size);
+        return M4ERR_PARAMETER;
+    }
+
+
+
+    while( (M4OSA_Int32)frame_size < inbuf_size )
+    {
+        mask_bits = 0xFFFFFFFF;
+        p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
+
+        // Use unsigned value to fix errors due to bit sign extension, this fix should be generic
+
+        nal_size_high16 = ( ( (M4OSA_UInt8 *)p_bs->Buffer)[0] << 8)
+            + ((M4OSA_UInt8 *)p_bs->Buffer)[1];
+        nal_size_low16 = ( ( (M4OSA_UInt8 *)p_bs->Buffer)[2] << 8)
+            + ((M4OSA_UInt8 *)p_bs->Buffer)[3];
+
+        nalu_info = (unsigned char)p_bs->Buffer[4];
+
+        outbuff[outbuffpos] = p_bs->Buffer[4];
+
+        p_bs->Buffer = p_bs->Buffer + 5;
+
+        p_bs->bitPos = 0;
+        p_bs->lastTotalBits = 0;
+        p_bs->numBitsInBuffer = ( inbuf_size - frame_size - 5) << 3;
+        p_bs->readableBytesInBuffer = inbuf_size - frame_size - 5;
+
+        p_bs->ui32TempBuff = 0;
+        p_bs->i8BitCnt = 0;
+        p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+        p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+
+        H264MCS_getBits(p_bs, 0);
+
+        nal_size = ( nal_size_high16 << 16) + nal_size_low16;
+
+        frame_size += nal_size + 4;
+
+        forbidden_bit = ( nalu_info >> 7) & 1;
+        nal_ref_idc = ( nalu_info >> 5) & 3;
+        nal_unit_type = (nalu_info) &0x1f;
+
+        NSWAVCMCS_initBitstream(&instance->encbs);
+
+        instance->encbs.streamBuffer = outbuff + outbuffpos + 1;
+
+        if( nal_unit_type == 8 )
+        {
+            M4OSA_TRACE1_0("Error : PPS");
+            return 0;
+        }
+
+        if( nal_unit_type == 7 )
+        {
+            /*SPS Packet */
+            M4OSA_TRACE1_0("Error : SPS");
+            return 0;
+        }
+
+        if( (nal_unit_type == 5) )
+        {
+            instance->frame_count = 0;
+            instance->POC_lsb = 0;
+        }
+
+        if( ( nal_unit_type == 1) || (nal_unit_type == 5) )
+        {
+            first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+            /* First MB in slice */
+            NSWAVCMCS_uExpVLC(&instance->encbs, first_mb_in_slice);
+
+            /* Slice Type */
+            NSWAVCMCS_uExpVLC(&instance->encbs, slice_type);
+
+            /* Picture Parameter set Id */
+            pic_parameter_set_id = instance->encoder_pps.pic_parameter_set_id;
+            NSWAVCMCS_uExpVLC(&instance->encbs, pic_parameter_set_id);
+
+            temp = H264MCS_getBits(p_bs,
+                instance->encoder_sps.log2_max_frame_num_minus4 + 4);
+            NSWAVCMCS_putBits(&instance->encbs, instance->frame_count,
+                instance->clip_sps.log2_max_frame_num_minus4 + 4);
+
+            // In Baseline Profile: frame_mbs_only_flag should be ON
+            if( nal_unit_type == 5 )
+            {
+                temp = H264MCS_DecVLCReadExpGolombCode(p_bs);
+                NSWAVCMCS_uExpVLC(&instance->encbs, temp);
+            }
+
+            if( instance->encoder_sps.pic_order_cnt_type == 0 )
+            {
+                temp = H264MCS_getBits(p_bs,
+                    instance->encoder_sps.log2_max_pic_order_cnt_lsb_minus4
+                    + 4);
+
+                // in baseline profile field_pic_flag should be off.
+                if( instance->encoder_pps.pic_order_present_flag )
+                {
+                    temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+                }
+            }
+
+            if( ( instance->encoder_sps.pic_order_cnt_type == 1)
+                && (instance->encoder_sps.delta_pic_order_always_zero_flag) )
+            {
+                temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+                // in baseline profile field_pic_flag should be off.
+                if( instance->encoder_pps.pic_order_present_flag )
+                {
+                    temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+                }
+            }
+
+            if( instance->clip_sps.pic_order_cnt_type == 0 )
+            {
+                NSWAVCMCS_putBits(&instance->encbs, instance->POC_lsb,
+                    instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+                // in baseline profile field_pic_flag should be off.
+                if( instance->encoder_pps.pic_order_present_flag )
+                {
+                    NSWAVCMCS_sExpVLC(&instance->encbs, 0);
+                }
+            }
+
+            if( ( instance->clip_sps.pic_order_cnt_type == 1)
+                && (instance->clip_sps.delta_pic_order_always_zero_flag) )
+            {
+                NSWAVCMCS_sExpVLC(&instance->encbs, 0);
+
+                // in baseline profile field_pic_flag should be off.
+                if( instance->encoder_pps.pic_order_present_flag )
+                {
+                    NSWAVCMCS_sExpVLC(&instance->encbs, 0);
+                }
+            }
+
+            cnt = p_bs->bitPos & 0x7;
+
+            if( cnt )
+            {
+                cnt = 8 - cnt;
+                temp = H264MCS_getBits(p_bs, cnt);
+                NSWAVCMCS_putBits(&instance->encbs, temp, cnt);
+            }
+
+            cnt = p_bs->bitPos >> 3;
+
+            while( cnt < (nal_size - 2) )
+            {
+                temp = H264MCS_getBits(p_bs, 8);
+                NSWAVCMCS_putBits(&instance->encbs, temp, 8);
+                cnt = p_bs->bitPos >> 3;
+            }
+
+            temp = H264MCS_getBits(p_bs, 8);
+
+            if( temp != 0 )
+            {
+                cnt = 0;
+
+                while( ( temp & 0x1) == 0 )
+                {
+                    cnt++;
+                    temp = temp >> 1;
+                }
+                cnt++;
+                temp = temp >> 1;
+
+                if( 8 - cnt )
+                {
+                    NSWAVCMCS_putBits(&instance->encbs, temp, (8 - cnt));
+                }
+
+                NSWAVCMCS_putRbspTbits(&instance->encbs);
+            }
+            else
+            {
+
+                M4OSA_TRACE1_1(
+                    "H264MCS_ProcessEncodedNALU : 13 temp = 0 trailing bits = %d",
+                    instance->encbs.bitPos % 8);
+
+                if( instance->encbs.bitPos % 8 )
+                {
+                    NSWAVCMCS_putBits(&instance->encbs, 0,
+                        (8 - instance->encbs.bitPos % 8));
+                }
+            }
+
+            temp = instance->encbs.byteCnt;
+            temp = temp + 1;
+
+            outbuffpos = outbuffpos + temp;
+        }
+    }
+
+    *outbuf_size = outbuffpos;
+
+    instance->POC_lsb = instance->POC_lsb + 1;
+
+    if( instance->POC_lsb == instance->POC_lsb_mod )
+    {
+        instance->POC_lsb = 0;
+    }
+    instance->frame_count = instance->frame_count + 1;
+
+    if( instance->frame_count == instance->frame_mod_count )
+    {
+        instance->frame_count = 0;
+    }
+
+    // StageFright encoder does not provide the size in the first 4 bytes of the AU, add it
+
+    M4OSA_free((M4OSA_MemAddr32)pTmpBuff1);
+    pTmpBuff1 = M4OSA_NULL;
+    inbuff = (M4OSA_UInt8 *)pTmpBuff2;
+
+    return M4NO_ERROR;
+}
+
+M4OSA_Int32 DecSPSMCS( ComBitStreamMCS_t *p_bs,
+                      ComSequenceParameterSet_t_MCS *sps )
+{
+    M4OSA_UInt32 i;
+    M4OSA_Int32 temp_max_dpb_size;
+    M4OSA_Int32 nb_ignore_bits;
+    M4OSA_Int32 error;
+    M4OSA_UInt8 profile_idc, level_idc, reserved_zero_4bits,
+        seq_parameter_set_id;
+    M4OSA_UInt8 constraint_set0_flag, constraint_set1_flag,
+        constraint_set2_flag, constraint_set3_flag;
+
+    sps->profile_idc = (M4OSA_UInt8)H264MCS_getBits(p_bs, 8);
+    sps->constraint_set0_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    sps->constraint_set1_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    sps->constraint_set2_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    sps->constraint_set3_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    reserved_zero_4bits = (M4OSA_UInt8)H264MCS_getBits(p_bs, 4);
+    sps->level_idc = (M4OSA_UInt8)H264MCS_getBits(p_bs, 8);
+    sps->seq_parameter_set_id =
+        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+    sps->log2_max_frame_num_minus4 =
+        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+    sps->MaxFrameNum = 1 << (sps->log2_max_frame_num_minus4 + 4);
+    sps->pic_order_cnt_type =
+        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+    if (sps->pic_order_cnt_type == 0)
+    {
+        sps->log2_max_pic_order_cnt_lsb_minus4 =
+            (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+        sps->MaxPicOrderCntLsb =
+            1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4);
+    }
+    else if( sps->pic_order_cnt_type == 1 )
+    {
+        sps->delta_pic_order_always_zero_flag =
+            (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+        // This fix should be generic to remove codec dependency
+
+        sps->offset_for_non_ref_pic =
+            H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+        sps->offset_for_top_to_bottom_field =
+            H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+
+        /*num_ref_frames_in_pic_order_cnt_cycle must be in the range 0, 255*/
+
+        sps->num_ref_frames_in_pic_order_cnt_cycle =
+            (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+        /* compute deltaPOC */
+        sps->expectedDeltaPerPicOrderCntCycle = 0;
+
+        for ( i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++ )
+        {
+            // This fix should be generic to remove codec dependency
+            sps->offset_for_ref_frame[i] =
+                H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+            sps->expectedDeltaPerPicOrderCntCycle +=
+                sps->offset_for_ref_frame[i];
+        }
+    }
+
+    /* num_ref_frames must be in the range 0,16 */
+    sps->num_ref_frames = (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+    sps->gaps_in_frame_num_value_allowed_flag =
+        (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    sps->pic_width_in_mbs_minus1 =
+        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+    sps->pic_height_in_map_units_minus1 =
+        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+    sps->frame_mbs_only_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    if (!sps->frame_mbs_only_flag)
+    {
+        sps->mb_adaptive_frame_field_flag =
+            (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    }
+    else
+    {
+        sps->mb_adaptive_frame_field_flag = 0;
+    }
+
+    sps->PicWidthInMbs = sps->pic_width_in_mbs_minus1 + 1;
+    sps->FrameHeightInMbs = ( 2 - sps->frame_mbs_only_flag) * \
+        (sps->pic_height_in_map_units_minus1 + 1);
+#ifdef _CAP_FMO_
+
+    sps->NumSliceGroupMapUnits =
+        sps->PicWidthInMbs * (sps->pic_height_in_map_units_minus1 + 1);
+    sps->MaxPicSizeInMbs = sps->PicWidthInMbs * sps->FrameHeightInMbs;
+
+#endif /*_CAP_FMO_*/
+
+    sps->direct_8x8_inference_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    if( sps->frame_mbs_only_flag == 0 )
+        sps->direct_8x8_inference_flag = 1;
+
+    sps->frame_cropping_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    if( sps->frame_cropping_flag )
+    {
+        sps->frame_crop_left_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+        sps->frame_crop_right_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+        sps->frame_crop_top_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+        sps->frame_crop_bottom_offset = H264MCS_DecVLCReadExpGolombCode(p_bs);
+    }
+    else
+    {
+        sps->frame_crop_left_offset = 0;
+        sps->frame_crop_right_offset = 0;
+        sps->frame_crop_top_offset = 0;
+        sps->frame_crop_bottom_offset = 0;
+    }
+
+    sps->vui_parameters_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    if (sps->vui_parameters_present_flag) {
+        /* no error message as stream can be decoded without VUI messages */
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_Int32 DecPPSMCS( ComBitStreamMCS_t *p_bs,
+                      ComPictureParameterSet_t_MCS *pps )
+{
+    M4OSA_Int32 error;
+    M4OSA_UInt32 pic_parameter_set_id;
+
+#ifdef _CAP_FMO_
+    M4OSA_UInt32 i, length, v;
+#endif
+
+    M4OSA_Int32 nb_ignore_bits;
+
+    pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+    pps->pic_parameter_set_id = (M4OSA_UInt8)pic_parameter_set_id;
+
+    pps->seq_parameter_set_id =
+        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+    /* entropy_coding_mode_flag must be 0 or 1 */
+    pps->entropy_coding_mode_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    pps->pic_order_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    pps->num_slice_groups_minus1 =
+        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+#ifdef _CAP_FMO_
+    /* FMO stuff begins here */
+
+    pps->map_initialized = FALSE;
+
+    if( pps->num_slice_groups_minus1 > 0 )
+    {
+        pps->slice_group_map_type =
+            (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+        switch( pps->slice_group_map_type )
+        {
+            case 0:
+                for ( i = 0; i <= pps->num_slice_groups_minus1; i++ )
+                {
+                    pps->run_length_minus1[i] =
+                        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+                }
+                break;
+
+            case 2:
+                for ( i = 0; i < pps->num_slice_groups_minus1; i++ )
+                {
+                    pps->top_left[i] =
+                        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+                    pps->bottom_right[i] =
+                        (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+                }
+                break;
+
+            case 3:
+            case 4:
+            case 5:
+                pps->slice_group_change_direction_flag =
+                    (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+                pps->slice_group_change_rate_minus1 =
+                    (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+                break;
+
+            case 6:
+                pps->pic_size_in_map_units_minus1 =
+                    (M4OSA_UInt16)H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+                pps->slice_group_id = (H264UInt8
+                    *)M4H264Dec_malloc((pps->pic_size_in_map_units_minus1
+                    + 1), M4H264_COREID, (M4OSA_Char *)"PPS");
+
+                if (M4OSA_NULL == pps->slice_group_id)
+                {
+                    M4OSA_TRACE1_0("DecPPSMCS: allocation error");
+                    return M4ERR_ALLOC;
+                }
+
+                for ( length = 0, v = pps->num_slice_groups_minus1 + 1; v != 0;
+                    v >>= 1, length++ );
+
+                    for ( i = 0; i <= pps->pic_size_in_map_units_minus1; i++ )
+                    {
+                        pps->slice_group_id[i] =
+                            (M4OSA_UInt8)getBits(p_vlc_engine->p_bs, length);
+                    }
+                    break;
+        }
+    }
+    else
+    {
+        pps->slice_group_map_type = 0;
+    }
+    /* End of FMO stuff */
+
+#else
+
+#endif /* _CAP_FMO_ */
+
+    /* num_ref_idx_l0_active_minus1 must be in the range 0, 31 */
+
+    pps->num_ref_idx_l0_active_minus1 =
+        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+    /* num_ref_idx_l1_active_minus1 must be in the range 0, 31 */
+    pps->num_ref_idx_l1_active_minus1 =
+        (M4OSA_UInt8)H264MCS_DecVLCReadExpGolombCode(p_bs);
+    pps->weighted_pred_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    /* weighted_bipred_idc must be in the range 0,2 */
+    pps->weighted_bipred_idc = (M4OSA_Bool)H264MCS_getBits(p_bs, 2);
+
+    /* pic_init_qp_minus26 must be in the range -26,25 */
+    pps->pic_init_qp_minus26 =
+        (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+    /* pic_init_qs_minus26 must be in the range -26,25 */
+    pps->pic_init_qs_minus26 =
+        (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+
+    /* chroma_qp_index_offset must be in the range -12,+12 */
+    pps->chroma_qp_index_offset =
+        (M4OSA_Int16)H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+    pps->deblocking_filter_control_present_flag =
+        (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    pps->constrained_intra_pred_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+    pps->redundant_pic_cnt_present_flag = (M4OSA_Bool)H264MCS_getBits(p_bs, 1);
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR H264MCS_ProcessSPS_PPS( NSWAVC_MCS_t *instance, M4OSA_UInt8 *inbuff,
+                                 M4OSA_Int32 inbuf_size )
+{
+    ComBitStreamMCS_t *p_bs, bs;
+    ComBitStreamMCS_t *p_bs1, bs1;
+
+    M4OSA_UInt8 nalu_info = 0;
+    M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
+    M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id = 0,
+        frame_num;
+    M4OSA_Int32 seq_parameter_set_id;
+    M4OSA_UInt8 temp1, temp2, temp3, temp4;
+    M4OSA_Int32 temp_frame_num;
+    M4OSA_Int32 bitstoDiacard, bytes;
+    M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
+    M4OSA_Int32 new_bytes, init_bit_pos;
+    M4OSA_UInt32 nal_size = 0;
+    M4OSA_UInt32 cnt, cnt1;
+    M4OSA_UInt32 outbuffpos = 0;
+    M4OSA_UInt32 nal_size_low16, nal_size_high16;
+    M4OSA_UInt32 frame_size = 0;
+    M4OSA_UInt32 temp = 0;
+    M4OSA_UInt8 *lClipDSI;
+    M4OSA_UInt8 *lClipDSI_PPS_start;
+    M4OSA_UInt32 lClipDSI_PPS_offset = 0;
+
+    M4OSA_UInt8 *lPPS_Buffer = M4OSA_NULL;
+    M4OSA_UInt32 lPPS_Buffer_Size = 0;
+
+    M4OSA_UInt32 lSize, lSize1;
+    M4OSA_UInt32 lActiveSPSID_Clip;
+    M4OSA_UInt32 lClipPPSRemBits = 0;
+
+    M4OSA_UInt32 lEncoder_SPSID = 0;
+    M4OSA_UInt32 lEncoder_PPSID = 0;
+    M4OSA_UInt32 lEncoderPPSRemBits = 0;
+    M4OSA_UInt32 lFound = 0;
+    M4OSA_UInt32 size;
+
+    M4OSA_UInt8 Clip_SPSID[32] = { 0 };
+    M4OSA_UInt8 Clip_UsedSPSID[32] = { 0 };
+    M4OSA_UInt8 Clip_PPSID[256] = { 0 };
+    M4OSA_UInt8 Clip_SPSID_in_PPS[256] = { 0 };
+    M4OSA_UInt8 Clip_UsedPPSID[256] = { 0 };
+    M4OSA_ERR err = M4NO_ERROR;
+
+    p_bs = &bs;
+    p_bs1 = &bs1;
+
+    /* Find the active SPS ID */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
+        "H264MCS_ProcessSPS_PPS: instance is M4OSA_NULL");
+
+    switch( instance->m_pDecoderSpecificInfo[4] & 0x3 )
+    {
+        case 0:
+            instance->m_Num_Bytes_NALUnitLength = 1;
+            break;
+
+        case 1:
+            instance->m_Num_Bytes_NALUnitLength = 2;
+            break;
+
+        case 3:
+            //Note: Current code supports only this...
+            instance->m_Num_Bytes_NALUnitLength = 4;
+            break;
+    }
+
+    instance->m_encoder_SPS_Cnt = instance->m_pDecoderSpecificInfo[5] & 0x1F;
+
+    lClipDSI = instance->m_pDecoderSpecificInfo + 6;
+
+    lClipDSI_PPS_offset = 6;
+
+    for ( cnt = 0; cnt < instance->m_encoder_SPS_Cnt; cnt++ )
+    {
+        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+        lClipDSI = lClipDSI + 2;
+
+        p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 4);
+        DecBitStreamReset_MCS(p_bs, lSize - 4);
+
+        Clip_SPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+        Clip_UsedSPSID[Clip_SPSID[cnt]] = 1;
+
+        lClipDSI = lClipDSI + lSize;
+        lClipDSI_PPS_offset = lClipDSI_PPS_offset + 2 + lSize;
+    }
+
+    instance->m_encoder_PPS_Cnt = lClipDSI[0];
+    lClipDSI = lClipDSI + 1;
+
+    lClipDSI_PPS_start = lClipDSI;
+
+    for ( cnt = 0; cnt < instance->m_encoder_PPS_Cnt; cnt++ )
+    {
+        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+        lClipDSI = lClipDSI + 2;
+
+        p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
+        DecBitStreamReset_MCS(p_bs, lSize - 1);
+
+        Clip_PPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+        Clip_UsedPPSID[Clip_PPSID[cnt]] = 1;
+        Clip_SPSID_in_PPS[Clip_PPSID[cnt]] =
+            H264MCS_DecVLCReadExpGolombCode(p_bs);
+
+        lClipDSI = lClipDSI + lSize;
+    }
+
+    /* Find the clip SPS ID used at the cut start frame */
+    while( ( (M4OSA_Int32)frame_size) < inbuf_size )
+    {
+        mask_bits = 0xFFFFFFFF;
+        p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
+
+        switch( instance->m_Num_Bytes_NALUnitLength )
+        {
+            case 1:
+                nal_size = (unsigned char)p_bs->Buffer[0];
+                nalu_info = (unsigned char)p_bs->Buffer[1];
+                p_bs->Buffer = p_bs->Buffer + 2;
+
+                break;
+
+            case 2:
+                nal_size_high16 = ( p_bs->Buffer[0] << 8) + p_bs->Buffer[1];
+                nal_size = nal_size_high16;
+                nalu_info = (unsigned char)p_bs->Buffer[2];
+                p_bs->Buffer = p_bs->Buffer + 3;
+
+                break;
+
+            case 4:
+                nal_size_high16 = ( p_bs->Buffer[0] << 8) + p_bs->Buffer[1];
+                nal_size_low16 = ( p_bs->Buffer[2] << 8) + p_bs->Buffer[3];
+                nal_size = ( nal_size_high16 << 16) + nal_size_low16;
+                nalu_info = (unsigned char)p_bs->Buffer[4];
+                p_bs->Buffer = p_bs->Buffer + 5;
+
+                break;
+        }
+
+        p_bs->bitPos = 0;
+        p_bs->lastTotalBits = 0;
+        p_bs->numBitsInBuffer =
+            ( inbuf_size - frame_size - instance->m_Num_Bytes_NALUnitLength - 1)
+            << 3;
+        p_bs->readableBytesInBuffer =
+            inbuf_size - frame_size - instance->m_Num_Bytes_NALUnitLength - 1;
+
+        p_bs->ui32TempBuff = 0;
+        p_bs->i8BitCnt = 0;
+        p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+        p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+
+        H264MCS_getBits(p_bs, 0);
+
+        frame_size += nal_size + instance->m_Num_Bytes_NALUnitLength;
+
+        forbidden_bit = ( nalu_info >> 7) & 1;
+        nal_ref_idc = ( nalu_info >> 5) & 3;
+        nal_unit_type = (nalu_info) &0x1f;
+
+        if( nal_unit_type == 8 )
+        {
+            M4OSA_TRACE1_0("H264MCS_ProcessSPS_PPS() Error: PPS");
+            return err;
+        }
+
+        if( nal_unit_type == 7 )
+        {
+            /*SPS Packet */
+            M4OSA_TRACE1_0("H264MCS_ProcessSPS_PPS() Error: SPS");
+            return err;
+        }
+
+        if( ( nal_unit_type == 1) || (nal_unit_type == 5) )
+        {
+            first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            break;
+        }
+    }
+
+    lActiveSPSID_Clip = Clip_SPSID_in_PPS[pic_parameter_set_id];
+
+    instance->final_SPS_ID = lActiveSPSID_Clip;
+    /* Do we need to add encoder PPS to clip PPS */
+
+    lClipDSI = lClipDSI_PPS_start;
+
+    for ( cnt = 0; cnt < instance->m_encoder_PPS_Cnt; cnt++ )
+    {
+        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+        lClipDSI = lClipDSI + 2;
+
+        if( lActiveSPSID_Clip == Clip_SPSID_in_PPS[Clip_PPSID[cnt]] )
+        {
+            lPPS_Buffer = lClipDSI + 1;
+            lPPS_Buffer_Size = lSize - 1;
+
+            p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
+            DecBitStreamReset_MCS(p_bs, lSize - 1);
+
+            Clip_PPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            Clip_UsedPPSID[Clip_SPSID[cnt]] = 1;
+            Clip_SPSID_in_PPS[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            lClipPPSRemBits = ( lSize - 1) << 3;
+            lClipPPSRemBits -= p_bs->bitPos;
+
+            temp = lClipDSI[lSize - 1];
+
+            cnt1 = 0;
+
+            while( ( temp & 0x1) == 0 )
+            {
+                cnt1++;
+                temp = temp >> 1;
+            }
+            cnt1++;
+            lClipPPSRemBits -= cnt1;
+
+            lSize1 = instance->m_encoderPPSSize - 1;
+            p_bs1->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderPPS + 1);
+            DecBitStreamReset_MCS(p_bs1, lSize1);
+
+            lEncoder_PPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+            lEncoder_SPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+
+            lEncoderPPSRemBits = ( lSize1) << 3;
+            lEncoderPPSRemBits -= p_bs1->bitPos;
+
+            temp = instance->m_pEncoderPPS[lSize1];
+
+            cnt1 = 0;
+
+            while( ( temp & 0x1) == 0 )
+            {
+                cnt1++;
+                temp = temp >> 1;
+            }
+            cnt1++;
+            lEncoderPPSRemBits -= cnt1;
+
+            if( lEncoderPPSRemBits == lClipPPSRemBits )
+            {
+                while( lEncoderPPSRemBits > 8 )
+                {
+                    temp1 = H264MCS_getBits(p_bs, 8);
+                    temp2 = H264MCS_getBits(p_bs1, 8);
+                    lEncoderPPSRemBits = lEncoderPPSRemBits - 8;
+
+                    if( temp1 != temp2 )
+                    {
+                        break;
+                    }
+                }
+
+                if( lEncoderPPSRemBits < 8 )
+                {
+                    if( lEncoderPPSRemBits )
+                    {
+                        temp1 = H264MCS_getBits(p_bs, lEncoderPPSRemBits);
+                        temp2 = H264MCS_getBits(p_bs1, lEncoderPPSRemBits);
+
+                        if( temp1 == temp2 )
+                        {
+                            lFound = 1;
+                        }
+                    }
+                    else
+                    {
+                        lFound = 1;
+                    }
+                }
+                break;
+            }
+        }
+
+        lClipDSI = lClipDSI + lSize;
+    }
+
+    /* Form the final SPS and PPS data */
+
+    if( lFound == 1 )
+    {
+        /* No need to add PPS */
+        instance->final_PPS_ID = Clip_PPSID[cnt];
+
+        instance->m_pFinalDSI =
+            (M4OSA_UInt8 *)M4OSA_malloc(instance->m_decoderSpecificInfoSize,
+            M4MCS, (M4OSA_Char *)"instance->m_pFinalDSI");
+
+        if( instance->m_pFinalDSI == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
+            return M4ERR_ALLOC;
+        }
+
+        instance->m_pFinalDSISize = instance->m_decoderSpecificInfoSize;
+        M4OSA_memcpy((M4OSA_MemAddr8)instance->m_pFinalDSI,
+            (M4OSA_MemAddr8)instance->m_pDecoderSpecificInfo,
+            instance->m_decoderSpecificInfoSize);
+    }
+    else
+    {
+        /* ADD PPS */
+        /* find the free PPS ID */
+
+        cnt = 0;
+
+        while( Clip_UsedPPSID[cnt] )
+        {
+            cnt++;
+        }
+        instance->final_PPS_ID = cnt;
+
+        size = instance->m_decoderSpecificInfoSize + instance->m_encoderPPSSize
+            + 10;
+
+        instance->m_pFinalDSI = (M4OSA_UInt8 *)M4OSA_malloc(size, M4MCS,
+            (M4OSA_Char *)"instance->m_pFinalDSI");
+
+        if( instance->m_pFinalDSI == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
+            return M4ERR_ALLOC;
+        }
+
+        M4OSA_memcpy((M4OSA_MemAddr8)instance->m_pFinalDSI,
+            (M4OSA_MemAddr8)instance->m_pDecoderSpecificInfo,
+            instance->m_decoderSpecificInfoSize);
+
+        temp = instance->m_pFinalDSI[lClipDSI_PPS_offset];
+        temp = temp + 1;
+        instance->m_pFinalDSI[lClipDSI_PPS_offset] = temp;
+
+        //temp = instance->m_pEncoderPPS[0];
+        lSize1 = instance->m_encoderPPSSize - 1;
+        p_bs1->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderPPS + 1);
+        DecBitStreamReset_MCS(p_bs1, lSize1);
+
+        lEncoder_PPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+        lEncoder_SPSID = H264MCS_DecVLCReadExpGolombCode(p_bs1);
+
+        lEncoderPPSRemBits = ( lSize1) << 3;
+        lEncoderPPSRemBits -= p_bs1->bitPos;
+
+        temp = instance->m_pEncoderPPS[lSize1];
+
+        cnt1 = 0;
+
+        while( ( temp & 0x1) == 0 )
+        {
+            cnt1++;
+            temp = temp >> 1;
+        }
+        cnt1++;
+        lEncoderPPSRemBits -= cnt1;
+
+        instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 2] =
+            instance->m_pEncoderPPS[0];
+
+        NSWAVCMCS_initBitstream(&instance->encbs);
+        instance->encbs.streamBuffer =
+            &(instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 3]);
+        lPPS_Buffer = instance->encbs.streamBuffer;
+
+        NSWAVCMCS_uExpVLC(&instance->encbs, instance->final_PPS_ID);
+        NSWAVCMCS_uExpVLC(&instance->encbs, instance->final_SPS_ID);
+
+        while( lEncoderPPSRemBits > 8 )
+        {
+            temp = H264MCS_getBits(p_bs1, 8);
+            NSWAVCMCS_putBits(&instance->encbs, temp, 8);
+            lEncoderPPSRemBits = lEncoderPPSRemBits - 8;
+        }
+
+        if( lEncoderPPSRemBits )
+        {
+            temp = H264MCS_getBits(p_bs1, lEncoderPPSRemBits);
+            NSWAVCMCS_putBits(&instance->encbs, temp, lEncoderPPSRemBits);
+        }
+        NSWAVCMCS_putRbspTbits(&instance->encbs);
+
+        temp = instance->encbs.byteCnt;
+        lPPS_Buffer_Size = temp;
+        temp = temp + 1;
+
+        instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize] =
+            ( temp >> 8) & 0xFF;
+        instance->m_pFinalDSI[instance->m_decoderSpecificInfoSize + 1] =
+            (temp) &0xFF;
+        instance->m_pFinalDSISize =
+            instance->m_decoderSpecificInfoSize + 2 + temp;
+    }
+
+    /* Decode the clip SPS */
+
+    lClipDSI = instance->m_pDecoderSpecificInfo + 6;
+
+    lClipDSI_PPS_offset = 6;
+
+    for ( cnt = 0; cnt < instance->m_encoder_SPS_Cnt; cnt++ )
+    {
+        lSize = ( lClipDSI[0] << 8) + lClipDSI[1];
+        lClipDSI = lClipDSI + 2;
+
+        if( Clip_SPSID[cnt] == instance->final_SPS_ID )
+        {
+            p_bs->Buffer = (M4OSA_UInt8 *)(lClipDSI + 1);
+            DecBitStreamReset_MCS(p_bs, lSize - 1);
+
+            DecSPSMCS(p_bs, &instance->clip_sps);
+
+            //Clip_SPSID[cnt] = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            //Clip_UsedSPSID[Clip_SPSID[cnt]] = 1;
+            break;
+        }
+
+        lClipDSI = lClipDSI + lSize;
+    }
+
+    /* Decode encoder SPS */
+    p_bs->Buffer = (M4OSA_UInt8 *)(instance->m_pEncoderSPS + 1);
+    DecBitStreamReset_MCS(p_bs, instance->m_encoderSPSSize - 1);
+    DecSPSMCS(p_bs, &instance->encoder_sps);
+
+    if( instance->encoder_sps.num_ref_frames
+    > instance->clip_sps.num_ref_frames )
+    {
+        return 100; //not supported
+    }
+
+    p_bs->Buffer = (M4OSA_UInt8 *)lPPS_Buffer;
+    DecBitStreamReset_MCS(p_bs, lPPS_Buffer_Size);
+    DecPPSMCS(p_bs, &instance->encoder_pps);
+
+    instance->frame_count = 0;
+    instance->frame_mod_count =
+        1 << (instance->clip_sps.log2_max_frame_num_minus4 + 4);
+
+    instance->POC_lsb = 0;
+    instance->POC_lsb_mod =
+        1 << (instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR H264MCS_ProcessNALU( NSWAVC_MCS_t *ainstance, M4OSA_UInt8 *inbuff,
+                               M4OSA_Int32 inbuf_size, M4OSA_UInt8 *outbuff,
+                               M4OSA_Int32 *outbuf_size )
+{
+    ComBitStreamMCS_t *p_bs, bs;
+    NSWAVC_MCS_t *instance;
+    M4OSA_UInt8 nalu_info;
+    M4OSA_Int32 forbidden_bit, nal_ref_idc, nal_unit_type;
+    M4OSA_Int32 first_mb_in_slice, slice_type, pic_parameter_set_id, frame_num;
+    M4OSA_Int32 seq_parameter_set_id;
+    M4OSA_UInt8 temp1, temp2, temp3, temp4;
+    M4OSA_Int32 temp_frame_num;
+    M4OSA_Int32 bitstoDiacard, bytes;
+    M4OSA_UInt32 mask_bits = 0xFFFFFFFF;
+    M4OSA_Int32 new_bytes, init_bit_pos;
+    M4OSA_UInt32 nal_size;
+    M4OSA_UInt32 cnt;
+    M4OSA_UInt32 outbuffpos = 0;
+    //#ifndef DGR_FIX // + new
+    M4OSA_UInt32 nal_size_low16, nal_size_high16;
+    //#endif // + end new
+    M4OSA_UInt32 frame_size = 0;
+    M4OSA_UInt32 temp = 0;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt8 *buff;
+
+    p_bs = &bs;
+    instance = (NSWAVC_MCS_t *)ainstance;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
+        "H264MCS_ProcessNALU: instance is M4OSA_NULL");
+
+    if( instance->is_done )
+        return err;
+
+    inbuff[0] = 0x00;
+    inbuff[1] = 0x00;
+    inbuff[2] = 0x00;
+    inbuff[3] = 0x01;
+
+
+    while( (M4OSA_Int32)frame_size < inbuf_size )
+    {
+        mask_bits = 0xFFFFFFFF;
+        p_bs->Buffer = (M4OSA_UInt8 *)(inbuff + frame_size);
+
+
+        nalu_info = (unsigned char)p_bs->Buffer[4];
+
+        outbuff[outbuffpos] = p_bs->Buffer[0];
+        outbuff[outbuffpos + 1] = p_bs->Buffer[1];
+        outbuff[outbuffpos + 2] = p_bs->Buffer[2];
+        outbuff[outbuffpos + 3] = p_bs->Buffer[3];
+        outbuff[outbuffpos + 4] = p_bs->Buffer[4];
+
+        p_bs->Buffer = p_bs->Buffer + 5;
+
+        p_bs->bitPos = 0;
+        p_bs->lastTotalBits = 0;
+        p_bs->numBitsInBuffer = ( inbuf_size - frame_size - 5) << 3;
+        p_bs->readableBytesInBuffer = inbuf_size - frame_size - 5;
+
+        p_bs->ui32TempBuff = 0;
+        p_bs->i8BitCnt = 0;
+        p_bs->pui8BfrPtr = (M4OSA_Int8 *)p_bs->Buffer;
+        p_bs->ui32LastTwoBytes = 0xFFFFFFFF;
+
+        H264MCS_getBits(p_bs, 0);
+
+
+
+        nal_size = inbuf_size - frame_size - 4;
+        buff = inbuff + frame_size + 4;
+
+        while( nal_size > 4 )
+        {
+            if( ( buff[0] == 0x00) && (buff[1] == 0x00) && (buff[2] == 0x00)
+                && (buff[3] == 0x01) )
+            {
+                break;
+            }
+            buff = buff + 1;
+            nal_size = nal_size - 1;
+        }
+
+        if( nal_size <= 4 )
+        {
+            nal_size = 0;
+        }
+        nal_size = ( inbuf_size - frame_size - 4) - nal_size;
+
+        //      M4OSA_TRACE1_3("H264MCS_ProcessNALU frame  input buff size = %d  current position
+        //= %d   nal size = %d",
+        //  inbuf_size, frame_size,  nal_size + 4);
+        frame_size += nal_size + 4;
+
+
+
+        forbidden_bit = ( nalu_info >> 7) & 1;
+        nal_ref_idc = ( nalu_info >> 5) & 3;
+        nal_unit_type = (nalu_info) &0x1f;
+
+        if( nal_unit_type == 5 )
+        {
+            /*IDR/PPS Packet - Do nothing*/
+            instance->is_done = 1;
+            return err;
+        }
+
+        NSWAVCMCS_initBitstream(&instance->encbs);
+        instance->encbs.streamBuffer = outbuff + outbuffpos + 5;
+
+        if( nal_unit_type == 8 )
+        {
+            M4OSA_TRACE1_0("H264MCS_ProcessNALU() Error: PPS");
+            return err;
+        }
+
+        if( nal_unit_type == 7 )
+        {
+            /*SPS Packet */
+            M4OSA_TRACE1_0("H264MCS_ProcessNALU() Error: SPS");
+            return 0;
+        }
+
+        if( (nal_unit_type == 5) )
+        {
+            instance->frame_count = 0;
+            instance->POC_lsb = 0;
+        }
+
+        if( (nal_unit_type == 1) )
+        {
+            first_mb_in_slice = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            NSWAVCMCS_uExpVLC(&instance->encbs, first_mb_in_slice);
+
+            slice_type = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            NSWAVCMCS_uExpVLC(&instance->encbs, slice_type);
+
+            pic_parameter_set_id = H264MCS_DecVLCReadExpGolombCode(p_bs);
+            NSWAVCMCS_uExpVLC(&instance->encbs, pic_parameter_set_id);
+
+            temp = H264MCS_getBits(p_bs,
+                instance->clip_sps.log2_max_frame_num_minus4 + 4);
+            NSWAVCMCS_putBits(&instance->encbs, instance->frame_count,
+                instance->clip_sps.log2_max_frame_num_minus4 + 4);
+
+            // In Baseline Profile: frame_mbs_only_flag should be ON
+
+            if( nal_unit_type == 5 )
+            {
+                temp = H264MCS_DecVLCReadExpGolombCode(p_bs);
+                NSWAVCMCS_uExpVLC(&instance->encbs, temp);
+            }
+
+            if( instance->clip_sps.pic_order_cnt_type == 0 )
+            {
+                temp = H264MCS_getBits(p_bs,
+                    instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4
+                    + 4);
+                NSWAVCMCS_putBits(&instance->encbs, instance->POC_lsb,
+                    instance->clip_sps.log2_max_pic_order_cnt_lsb_minus4 + 4);
+            }
+
+            if( ( instance->clip_sps.pic_order_cnt_type == 1)
+                && (instance->clip_sps.delta_pic_order_always_zero_flag) )
+            {
+                temp = H264MCS_DecVLCReadSignedExpGolombCode(p_bs);
+                NSWAVCMCS_sExpVLC(&instance->encbs, temp);
+            }
+
+            cnt = p_bs->bitPos & 0x7;
+
+            if( cnt )
+            {
+                cnt = 8 - cnt;
+                temp = H264MCS_getBits(p_bs, cnt);
+                NSWAVCMCS_putBits(&instance->encbs, temp, cnt);
+            }
+
+            cnt = p_bs->bitPos >> 3;
+
+            while( cnt < (nal_size - 2) )
+            {
+                temp = H264MCS_getBits(p_bs, 8);
+                NSWAVCMCS_putBits(&instance->encbs, temp, 8);
+                cnt = p_bs->bitPos >> 3;
+            }
+
+            temp = H264MCS_getBits(p_bs, 8);
+
+            if( temp != 0 )
+            {
+                cnt = 0;
+
+                while( ( temp & 0x1) == 0 )
+                {
+                    cnt++;
+                    temp = temp >> 1;
+                }
+                cnt++;
+                temp = temp >> 1;
+
+                if( 8 - cnt )
+                {
+                    NSWAVCMCS_putBits(&instance->encbs, temp, (8 - cnt));
+                }
+
+                NSWAVCMCS_putRbspTbits(&instance->encbs);
+            }
+            else
+            {
+                if( instance->encbs.bitPos % 8 )
+                {
+                    NSWAVCMCS_putBits(&instance->encbs, 0,
+                        (8 - instance->encbs.bitPos % 8));
+                }
+            }
+
+            temp = instance->encbs.byteCnt;
+            temp = temp + 1;
+
+            outbuff[outbuffpos] = (M4OSA_UInt8)(( temp >> 24) & 0xFF);
+            outbuff[outbuffpos + 1] = (M4OSA_UInt8)(( temp >> 16) & 0xFF);
+            outbuff[outbuffpos + 2] = (M4OSA_UInt8)(( temp >> 8) & 0xFF);
+            outbuff[outbuffpos + 3] = (M4OSA_UInt8)((temp) &0xFF);
+            outbuffpos = outbuffpos + temp + 4;
+        }
+        else
+        {
+            p_bs->Buffer = p_bs->Buffer - 5;
+            M4OSA_memcpy((M4OSA_MemAddr8) &outbuff[outbuffpos],
+                (M4OSA_MemAddr8)p_bs->Buffer, nal_size + 4);
+
+            outbuff[outbuffpos] = (M4OSA_UInt8)((nal_size >> 24)& 0xFF);
+        outbuff[outbuffpos + 1] = (M4OSA_UInt8)((nal_size >> 16)& 0xFF);;
+        outbuff[outbuffpos + 2] = (M4OSA_UInt8)((nal_size >> 8)& 0xFF);;
+        outbuff[outbuffpos + 3] = (M4OSA_UInt8)((nal_size)& 0xFF);;
+
+            outbuffpos = outbuffpos + nal_size + 4;
+        }
+    }
+
+    *outbuf_size = outbuffpos;
+
+    instance->POC_lsb = instance->POC_lsb + 1;
+
+    if( instance->POC_lsb == instance->POC_lsb_mod )
+    {
+        instance->POC_lsb = 0;
+    }
+    instance->frame_count = instance->frame_count + 1;
+
+    if( instance->frame_count == instance->frame_mod_count )
+    {
+        instance->frame_count = 0;
+    }
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR   M4MCS_convetFromByteStreamtoNALStream(  M4OSA_UInt8 *inbuff,
+                                                    M4OSA_UInt32 inbuf_size )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 framesize = 0;
+    M4OSA_UInt32 nal_size =0;
+    M4OSA_UInt8 *buff;
+
+
+    while(framesize < inbuf_size)
+    {
+            nal_size = inbuf_size - framesize - 4;
+            buff =  inbuff + framesize + 4;
+
+            while(nal_size > 4){
+                if((buff[0] == 0x00) &&
+                (buff[1] == 0x00) &&
+                (buff[2] == 0x00) &&
+                (buff[3] == 0x01)){
+                    break;
+                }
+                buff = buff + 1;
+                nal_size = nal_size -1;
+            }
+
+            if(nal_size <= 4){
+                nal_size = 0;
+            }
+            nal_size = (inbuf_size - framesize - 4) - nal_size;
+
+        inbuff[framesize + 0]  = (M4OSA_UInt8)((nal_size >> 24)& 0xFF);
+        inbuff[framesize + 1]  = (M4OSA_UInt8)((nal_size >> 16)& 0xFF);
+        inbuff[framesize + 2]  = (M4OSA_UInt8)((nal_size >> 8)& 0xFF);
+        inbuff[framesize + 3]  = (M4OSA_UInt8)((nal_size )& 0xFF);
+        framesize += nal_size + 4;
+
+        M4OSA_TRACE1_2("M4MCS_convetFromByteStreamtoNALStream framesize = %x nalsize = %x",
+            framesize, nal_size)
+    }
+
+    return  err;
+}
+
+
+M4OSA_ERR H264MCS_Freeinstance( NSWAVC_MCS_t *instance )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == instance), M4ERR_PARAMETER,
+        "H264MCS_Freeinstance: instance is M4OSA_NULL");
+
+    if( M4OSA_NULL != instance->encoder_pps.slice_group_id )
+    {
+        M4OSA_free((M4OSA_MemAddr32)instance->encoder_pps.slice_group_id);
+    }
+
+    if( M4OSA_NULL != instance->p_encoder_sps )
+    {
+        M4OSA_free((M4OSA_MemAddr32)instance->p_encoder_sps);
+        instance->p_encoder_sps = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != instance->p_encoder_pps )
+    {
+        M4OSA_free((M4OSA_MemAddr32)instance->p_encoder_pps);
+        instance->p_encoder_pps = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != instance->m_pFinalDSI )
+    {
+        M4OSA_free((M4OSA_MemAddr32)instance->m_pFinalDSI);
+        instance->m_pFinalDSI = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != instance )
+    {
+        M4OSA_free((M4OSA_MemAddr32)instance);
+        instance = M4OSA_NULL;
+    }
+
+    return err;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getVersion(M4_VersionInfo* pVersionInfo);
+ * @brief    Get the MCS version.
+ * @note Can be called anytime. Do not need any context.
+ * @param    pVersionInfo        (OUT) Pointer to a version info structure
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getVersion( M4_VersionInfo *pVersionInfo )
+{
+    M4OSA_TRACE3_1("M4MCS_getVersion called with pVersionInfo=0x%x",
+        pVersionInfo);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pVersionInfo), M4ERR_PARAMETER,
+        "M4MCS_getVersion: pVersionInfo is M4OSA_NULL");
+
+    pVersionInfo->m_major = M4MCS_VERSION_MAJOR;
+    pVersionInfo->m_minor = M4MCS_VERSION_MINOR;
+    pVersionInfo->m_revision = M4MCS_VERSION_REVISION;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_getVersion(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * @brief    Initializes the MCS (allocates an execution context).
+ * @note
+ * @param    pContext            (OUT) Pointer on the MCS context to allocate
+ * @param    pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
+ * @param    pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (If Debug Level >= 2)
+ * @return   M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4MCS_init( M4MCS_Context *pContext,
+                     M4OSA_FileReadPointer *pFileReadPtrFct,
+                     M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+    M4MCS_InternalContext *pC = M4OSA_NULL;
+    M4OSA_ERR err;
+
+    M4OSA_TRACE3_3(
+        "M4MCS_init called with pContext=0x%x, pFileReadPtrFct=0x%x, pFileWritePtrFct=0x%x",
+        pContext, pFileReadPtrFct, pFileWritePtrFct);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_init: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+        "M4MCS_init: pFileReadPtrFct is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+        "M4MCS_init: pFileWritePtrFct is M4OSA_NULL");
+
+    /**
+    * Allocate the MCS context and return it to the user */
+    pC = (M4MCS_InternalContext *)M4OSA_malloc(sizeof(M4MCS_InternalContext),
+        M4MCS, (M4OSA_Char *)"M4MCS_InternalContext");
+    *pContext = pC;
+
+    if( M4OSA_NULL == pC )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_init(): unable to allocate M4MCS_InternalContext, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**
+    * Init the context. All pointers must be initialized to M4OSA_NULL
+    * because CleanUp() can be called just after Init(). */
+    pC->State = M4MCS_kState_CREATED;
+    pC->pOsaFileReadPtr = pFileReadPtrFct;
+    pC->pOsaFileWritPtr = pFileWritePtrFct;
+    pC->VideoState = M4MCS_kStreamState_NOSTREAM;
+    pC->AudioState = M4MCS_kStreamState_NOSTREAM;
+    pC->noaudio = M4OSA_FALSE;
+    pC->novideo = M4OSA_FALSE;
+    pC->uiProgress = 0;
+
+    /**
+    * Reader stuff */
+    pC->pInputFile = M4OSA_NULL;
+    pC->InputFileType = M4VIDEOEDITING_kFileType_Unsupported;
+    pC->bFileOpenedInFastMode = M4OSA_FALSE;
+    pC->pReaderContext = M4OSA_NULL;
+    pC->pReaderVideoStream = M4OSA_NULL;
+    pC->pReaderAudioStream = M4OSA_NULL;
+    pC->bUnsupportedVideoFound = M4OSA_FALSE;
+    pC->bUnsupportedAudioFound = M4OSA_FALSE;
+    pC->iAudioCtsOffset = 0;
+    /* First temporary video AU to have more precise end video cut*/
+    pC->ReaderVideoAU1.m_structSize = 0;
+    /* Second temporary video AU to have more precise end video cut*/
+    pC->ReaderVideoAU2.m_structSize = 0;
+    pC->ReaderAudioAU1.m_structSize = 0;
+    pC->ReaderAudioAU2.m_structSize = 0;
+    pC->m_audioAUDuration = 0;
+    pC->m_pDataAddress1 = M4OSA_NULL;
+    pC->m_pDataAddress2 = M4OSA_NULL;
+    /* First temporary video AU data to have more precise end video cut*/
+    pC->m_pDataVideoAddress1 = M4OSA_NULL;
+    /* Second temporary video AU data to have more precise end video cut*/
+    pC->m_pDataVideoAddress2 = M4OSA_NULL;
+
+    /**
+    * Video decoder stuff */
+    pC->pViDecCtxt = M4OSA_NULL;
+    pC->dViDecStartingCts = 0.0;
+    pC->iVideoBeginDecIncr = 0;
+    pC->dViDecCurrentCts = 0.0;
+    pC->dCtsIncrement = 0.0;
+    pC->isRenderDup = M4OSA_FALSE;
+
+    /**
+    * Video encoder stuff */
+    pC->pViEncCtxt = M4OSA_NULL;
+    pC->pPreResizeFrame = M4OSA_NULL;
+    pC->uiEncVideoBitrate = 0;
+    pC->bActivateEmp = M4OSA_FALSE;
+    pC->encoderState = M4MCS_kNoEncoder;
+
+    /**
+    * Audio decoder stuff */
+    pC->pAudioDecCtxt = M4OSA_NULL;
+    pC->AudioDecBufferIn.m_dataAddress = M4OSA_NULL;
+    pC->AudioDecBufferIn.m_bufferSize = 0;
+    pC->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    pC->AudioDecBufferOut.m_bufferSize = 0;
+    pC->pPosInDecBufferOut = M4OSA_NULL;
+    /**
+    * Ssrc stuff */
+    pC->pSsrcBufferIn = M4OSA_NULL;
+    pC->pSsrcBufferOut = M4OSA_NULL;
+    pC->pPosInSsrcBufferIn = M4OSA_NULL;
+    pC->pPosInSsrcBufferOut = M4OSA_NULL;
+    pC->iSsrcNbSamplIn = 0;
+    pC->iSsrcNbSamplOut = 0;
+    pC->SsrcScratch = M4OSA_NULL;
+
+    /**
+    * Audio encoder */
+    pC->pAudioEncCtxt = M4OSA_NULL;
+    pC->pAudioEncDSI.infoSize = 0;
+    pC->pAudioEncDSI.pInfo = M4OSA_NULL;
+    pC->pAudioEncoderBuffer = M4OSA_NULL;
+    pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+    pC->audioEncoderGranularity = 0;
+
+    /**
+    * Writer stuff */
+    pC->pOutputFile = M4OSA_NULL;
+    pC->pTemporaryFile = M4OSA_NULL;
+    pC->pWriterContext = M4OSA_NULL;
+    pC->uiVideoAUCount = 0;
+    pC->uiVideoMaxAuSize = 0;
+    pC->uiVideoMaxChunckSize = 0;
+    pC->uiAudioAUCount = 0;
+    pC->uiAudioMaxAuSize = 0;
+
+    pC->uiAudioCts = 0;
+    pC->b_isRawWriter = M4OSA_FALSE;
+    pC->pOutputPCMfile = M4OSA_NULL;
+
+    /* Encoding config */
+    pC->EncodingVideoFormat = M4ENCODER_kNULL; /**< No format set yet */
+    pC->EncodingWidth = 0;                     /**< No size set yet */
+    pC->EncodingHeight = 0;                    /**< No size set yet */
+    pC->EncodingVideoFramerate = 0;            /**< No framerate set yet */
+
+    pC->uiBeginCutTime = 0;                    /**< No begin cut */
+    pC->uiEndCutTime = 0;                      /**< No end cut */
+    pC->uiMaxFileSize = 0;                     /**< No limit */
+    pC->uiAudioBitrate =
+        M4VIDEOEDITING_kUndefinedBitrate; /**< No bitrate set yet */
+    pC->uiVideoBitrate =
+        M4VIDEOEDITING_kUndefinedBitrate; /**< No bitrate set yet */
+
+#ifdef TIMESCALE_BUG
+
+    /* By default, timescale is not modified; if this value is not 0, timescale is
+     * modified without decode/encode process
+     */
+    pC->uiVideoTimescale = 0;
+    pC->uiTimescaleLength = 0;
+    pC->uiOrigVideoTimescale = 0;
+    pC->uiOrigTimescaleLength = 0;
+
+#endif
+
+    pC->WriterVideoStream.streamType = M4SYS_kVideoUnknown;
+    pC->WriterVideoStreamInfo.Header.pBuf = M4OSA_NULL;
+    pC->WriterAudioStream.streamType = M4SYS_kAudioUnknown;
+
+    pC->outputVideoTimescale = 0;
+
+    /*FB 2008/10/20: add media rendering parameter and AIR context to keep media aspect ratio*/
+    pC->MediaRendering = M4MCS_kResizing;
+    pC->m_air_context = M4OSA_NULL;
+    /**/
+
+    /**
+    * FlB 2009.03.04: add audio Effects*/
+    pC->pEffects = M4OSA_NULL;
+    pC->nbEffects = 0;
+    pC->pActiveEffectNumber = -1;
+    /**/
+
+    /*
+    * Reset pointers for media and codecs interfaces */
+    err = M4MCS_clearInterfaceTables(pC);
+    M4ERR_CHECK_RETURN(err);
+
+    /*
+    *  Call the media and codecs subscription module */
+    err = M4MCS_subscribeMediaAndCodec(pC);
+    M4ERR_CHECK_RETURN(err);
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+    /**
+    * Initialize the Still picture part of MCS*/
+
+    err = M4MCS_stillPicInit(pC, pFileReadPtrFct, pFileWritePtrFct);
+    M4ERR_CHECK_RETURN(err);
+
+    pC->m_bIsStillPicture = M4OSA_FALSE;
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    pC->m_pInstance = M4OSA_NULL;
+    pC->H264MCSTempBuffer = M4OSA_NULL;
+    pC->H264MCSTempBufferSize = 0;
+    pC->H264MCSTempBufferDataSize = 0;
+    pC->bH264Trim = M4OSA_FALSE;
+
+    /* Flag to get the last decoded frame cts */
+    pC->bLastDecodedFrameCTS = M4OSA_FALSE;
+
+    if( pC->m_pInstance == M4OSA_NULL )
+    {
+        err = H264MCS_Getinstance(&pC->m_pInstance);
+    }
+    pC->bExtOMXAudDecoder = M4OSA_FALSE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_init(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_open(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+ *                         M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+ * @brief   Set the MCS input and output files.
+ * @note    It opens the input file, but the output file is not created yet.
+ * @param   pContext            (IN) MCS context
+ * @param   pFileIn             (IN) Input file to transcode (The type of this parameter
+ *                                 (URL, pipe...) depends on the OSAL implementation).
+ * @param   mediaType           (IN) Container type (.3gp,.amr,mp3 ...) of input file.
+ * @param   pFileOut            (IN) Output file to create  (The type of this parameter
+ *                                    (URL, pipe...) depends on the OSAL implementation).
+ * @param   pTempFile           (IN) Temporary file for the constant memory writer to
+ *                                     store metadata ("moov.bin").
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ * @return  M4ERR_FILE_NOT_FOUND:   The input file has not been found
+ * @return  M4MCS_ERR_INVALID_INPUT_FILE:   The input file is not a valid file, or is corrupted
+ * @return  M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM:  The input file contains no
+ *                                supported audio or video stream
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_open( M4MCS_Context pContext, M4OSA_Void *pFileIn,
+                     M4VIDEOEDITING_FileType InputFileType, M4OSA_Void *pFileOut,
+                     M4OSA_Void *pTempFile )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4OSA_ERR err;
+
+    M4READER_MediaFamily mediaFamily;
+    M4_StreamHandler *pStreamHandler;
+
+    M4OSA_TRACE2_3(
+        "M4MCS_open called with pContext=0x%x, pFileIn=0x%x, pFileOut=0x%x",
+        pContext, pFileIn, pFileOut);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_open: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileIn), M4ERR_PARAMETER,
+        "M4MCS_open: pFileIn is M4OSA_NULL");
+
+    if( ( InputFileType == M4VIDEOEDITING_kFileType_JPG)
+        || (InputFileType == M4VIDEOEDITING_kFileType_PNG)
+        || (InputFileType == M4VIDEOEDITING_kFileType_GIF)
+        || (InputFileType == M4VIDEOEDITING_kFileType_BMP) )
+    {
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+        /**
+        * Indicate that we must use the still picture functions*/
+
+        pC->m_bIsStillPicture = M4OSA_TRUE;
+
+        /**
+        * Call the still picture MCS functions*/
+        return M4MCS_stillPicOpen(pC, pFileIn, InputFileType, pFileOut);
+
+#else
+
+        M4OSA_TRACE1_0(
+            "M4MCS_open: Still picture is not supported with this version of MCS");
+        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    }
+
+    /**
+    * Check state automaton */
+    if( M4MCS_kState_CREATED != pC->State )
+    {
+        M4OSA_TRACE1_1("M4MCS_open(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* Copy function input parameters into our context */
+    pC->pInputFile = pFileIn;
+    pC->InputFileType = InputFileType;
+    pC->pOutputFile = pFileOut;
+    pC->pTemporaryFile = pTempFile;
+    pC->uiProgress = 0;
+
+    /***********************************/
+    /* Open input file with the reader */
+    /***********************************/
+
+    err = M4MCS_setCurrentReader(pContext, pC->InputFileType);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Reset reader related variables */
+    pC->VideoState = M4MCS_kStreamState_NOSTREAM;
+    pC->AudioState = M4MCS_kStreamState_NOSTREAM;
+    pC->pReaderVideoStream = M4OSA_NULL;
+    pC->pReaderAudioStream = M4OSA_NULL;
+
+    /*******************************************************/
+    /* Initializes the reader shell and open the data file */
+    /*******************************************************/
+    err = pC->m_pReader->m_pFctCreate(&pC->pReaderContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctCreate returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Link the reader interface to the reader context */
+    pC->m_pReaderDataIt->m_readerContext = pC->pReaderContext;
+
+    /**
+    * Set the reader shell file access functions */
+    err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+        M4READER_kOptionID_SetOsaFileReaderFctsPtr,
+        (M4OSA_DataOption)pC->pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctSetOption returns 0x%x",
+            err);
+        return err;
+    }
+
+#ifdef M4MCS_WITH_FAST_OPEN
+
+    if( M4OSA_FALSE == pC->bFileOpenedInFastMode )
+    {
+        M4OSA_Bool trueValue = M4OSA_TRUE;
+
+        /* For first call use fast open mode */
+        err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+            M4READER_3GP_kOptionID_FastOpenMode, &trueValue);
+
+        if( M4NO_ERROR == err )
+        {
+            pC->bFileOpenedInFastMode = M4OSA_TRUE;
+        }
+        else
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_open(): M4READER_3GP_kOptionID_FastOpenMode returns 0x%x",
+                err);
+
+            if( ( ( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID) == err)
+                || (( (M4OSA_UInt32)M4ERR_PARAMETER) == err) )
+            {
+                /* Not fatal, some readers may not support fast open mode */
+                pC->bFileOpenedInFastMode = M4OSA_FALSE;
+            }
+            else
+                return err;
+        }
+    }
+    else
+    {
+        M4OSA_Bool falseValue = M4OSA_FALSE;
+
+        /* For second call use normal open mode */
+        err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+            M4READER_3GP_kOptionID_FastOpenMode, &falseValue);
+    }
+
+#endif /* M4MCS_WITH_FAST_OPEN */
+
+    /**
+    * Open the input file */
+
+    err = pC->m_pReader->m_pFctOpen(pC->pReaderContext, pC->pInputFile);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_UInt32 uiDummy, uiCoreId;
+        M4OSA_TRACE1_1("M4MCS_open(): m_pReader->m_pFctOpen returns 0x%x", err);
+
+        /**
+        * If the error is from the core reader, we change it to a public VXS error */
+        M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
+
+        if( M4MP4_READER == uiCoreId )
+        {
+            M4OSA_TRACE1_0(
+                "M4MCS_open(): returning M4MCS_ERR_INVALID_INPUT_FILE");
+            return M4MCS_ERR_INVALID_INPUT_FILE;
+        }
+        return err;
+    }
+
+    /**
+    * Get the streams from the input file */
+    while( M4NO_ERROR == err )
+    {
+        err =
+            pC->m_pReader->m_pFctGetNextStream( pC->pReaderContext,
+                                                &mediaFamily,
+                                                &pStreamHandler);
+
+        /**
+        * In case we found a BIFS stream or something else...*/
+        if( ( err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
+            || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)) )
+        {
+            err = M4NO_ERROR;
+            continue;
+        }
+
+        if( M4NO_ERROR == err ) /**< One stream found */
+        {
+            /**
+            * Found the first video stream */
+            if( ( M4READER_kMediaFamilyVideo == mediaFamily)
+                && (M4OSA_NULL == pC->pReaderVideoStream) )
+            {
+                if( ( M4DA_StreamTypeVideoH263 == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeVideoMpeg4
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeVideoMpeg4Avc
+                    == pStreamHandler->m_streamType) )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4MCS_open(): Found a H263 or MPEG-4 video stream in input 3gpp clip");
+
+                    /**
+                    * Keep pointer to the video stream */
+                    pC->pReaderVideoStream =
+                        (M4_VideoStreamHandler *)pStreamHandler;
+                    pC->bUnsupportedVideoFound = M4OSA_FALSE;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                    * Init our video stream state variable */
+                    pC->VideoState = M4MCS_kStreamState_STARTED;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+                        (M4_StreamHandler *)pC->pReaderVideoStream);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4MCS_open():\
+                            m_pReader->m_pFctReset(video) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+                        pStreamHandler, &pC->ReaderVideoAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4MCS_open():\
+                            m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< Not H263 or MPEG-4 (H264, etc.) */
+                {
+                    M4OSA_TRACE1_1("M4MCS_open(): Found an unsupported video stream (0x%x) in\
+                                   input 3gpp clip",
+                                   pStreamHandler->m_streamType);
+
+                    pC->bUnsupportedVideoFound = M4OSA_TRUE;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+                /* +CRLV6775 -H.264 Trimming */
+                if( M4DA_StreamTypeVideoMpeg4Avc
+                    == pStreamHandler->m_streamType )
+                {
+
+                    // SPS and PPS are storead as per the 3gp file format
+                    pC->m_pInstance->m_pDecoderSpecificInfo =
+                        pStreamHandler->m_pH264DecoderSpecificInfo;
+                    pC->m_pInstance->m_decoderSpecificInfoSize =
+                        pStreamHandler->m_H264decoderSpecificInfoSize;
+                }
+                /* -CRLV6775 -H.264 Trimming */
+            }
+            /**
+            * Found the first audio stream */
+            else if( ( M4READER_kMediaFamilyAudio == mediaFamily)
+                && (M4OSA_NULL == pC->pReaderAudioStream) )
+            {
+                if( ( M4DA_StreamTypeAudioAmrNarrowBand
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioAac == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioMp3
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioEvrc
+                    == pStreamHandler->m_streamType) )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4MCS_open(): Found an AMR-NB, AAC or MP3 audio stream in input clip");
+
+                    /**
+                    * Keep pointer to the audio stream */
+                    pC->pReaderAudioStream =
+                        (M4_AudioStreamHandler *)pStreamHandler;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+                    pC->bUnsupportedAudioFound = M4OSA_FALSE;
+
+                    /**
+                    * Init our audio stream state variable */
+                    pC->AudioState = M4MCS_kStreamState_STARTED;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+                        (M4_StreamHandler *)pC->pReaderAudioStream);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4MCS_open():\
+                            m_pReader->m_pFctReset(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+                        pStreamHandler, &pC->ReaderAudioAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4MCS_open():\
+                            m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Output max AU size is equal to input max AU size (this value
+                    * will be changed if there is audio transcoding) */
+                    pC->uiAudioMaxAuSize = pStreamHandler->m_maxAUSize;
+                }
+                else
+                {
+                    /**< Not AMR-NB, AAC, MP3 nor EVRC (AMR-WB, WAV...) */
+                    M4OSA_TRACE1_1("M4MCS_open(): Found an unsupported audio stream (0x%x) in \
+                                   input 3gpp clip", pStreamHandler->m_streamType);
+
+                    pC->bUnsupportedAudioFound = M4OSA_TRUE;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+        }
+    } /**< end of while (M4NO_ERROR == err) */
+
+    /**
+    * Check we found at least one supported stream */
+    if( ( M4OSA_NULL == pC->pReaderVideoStream)
+        && (M4OSA_NULL == pC->pReaderAudioStream) )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_open(): returning M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM");
+        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+    }
+
+    if( pC->VideoState == M4MCS_kStreamState_STARTED )
+    {
+        err = M4MCS_setCurrentVideoDecoder(pContext,
+            pC->pReaderVideoStream->m_basicProperties.m_streamType);
+        /*FB 2009-02-09: the error is check and returned only if video codecs are compiled,
+        else only audio is used, that is why the editing process can continue*/
+#ifndef M4MCS_AUDIOONLY
+
+        M4ERR_CHECK_RETURN(err);
+
+#else
+
+        if( ( M4NO_ERROR != err) && (M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED != err) )
+        {
+            M4ERR_CHECK_RETURN(err);
+        }
+
+#endif /*M4MCS_AUDIOONLY*/
+
+    }
+
+    if( pC->AudioState == M4MCS_kStreamState_STARTED )
+    {
+        //EVRC
+        if( M4DA_StreamTypeAudioEvrc
+            != pStreamHandler->
+            m_streamType ) /* decoder not supported yet, but allow to do null encoding */
+        {
+            err = M4MCS_setCurrentAudioDecoder(pContext,
+                pC->pReaderAudioStream->m_basicProperties.m_streamType);
+            M4ERR_CHECK_RETURN(err);
+        }
+    }
+
+    /**
+    * Get the audio and video stream properties */
+    err = M4MCS_intGetInputClipProperties(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_open(): M4MCS_intGetInputClipProperties returns 0x%x", err);
+        return err;
+    }
+
+    /**
+    * Set the begin cut decoding increment according to the input frame rate */
+    if( 0. != pC->InputFileProperties.fAverageFrameRate ) /**< sanity check */
+    {
+        pC->iVideoBeginDecIncr = (M4OSA_Int32)(3000.
+            / pC->InputFileProperties.
+            fAverageFrameRate); /**< about 3 frames */
+    }
+    else
+    {
+        pC->iVideoBeginDecIncr =
+            200; /**< default value: 200 milliseconds (3 frames @ 15fps)*/
+    }
+
+    /**
+    * Update state automaton */
+    pC->State = M4MCS_kState_OPENED;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_open(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_step(M4MCS_Context pContext, M4OSA_UInt8 *pProgress);
+ * @brief   Perform one step of trancoding.
+ * @note
+ * @param   pContext            (IN) MCS context
+ * @param   pProgress           (OUT) Progress percentage (0 to 100) of the transcoding
+ * @note    pProgress must be a valid address.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    One of the parameters is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_WAR_TRANSCODING_DONE: Transcoding is over, user should now call M4MCS_close()
+ * @return  M4MCS_ERR_AUDIO_CONVERSION_FAILED: The audio conversion (AAC to AMR-NB or MP3) failed
+ * @return  M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY: The input file contains an AAC audio track
+ *                                 with an invalid sampling frequency (should never happen)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_step( M4MCS_Context pContext, M4OSA_UInt8 *pProgress )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+    M4OSA_TRACE3_1("M4MCS_step called with pContext=0x%x", pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_step: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pProgress), M4ERR_PARAMETER,
+        "M4MCS_step: pProgress is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the still picture MCS functions*/
+        return M4MCS_stillPicStep(pC, pProgress);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    switch( pC->State )
+    {
+        case M4MCS_kState_READY:
+            *pProgress = 0;
+            return M4MCS_intStepSet(pC);
+            break;
+
+        case M4MCS_kState_BEGINVIDEOJUMP:
+            *pProgress = pC->uiProgress;
+            return M4MCS_intStepBeginVideoJump(pC);
+            break;
+
+        case M4MCS_kState_BEGINVIDEODECODE:
+            *pProgress = pC->uiProgress;
+            return M4MCS_intStepBeginVideoDecode(pC);
+            break;
+
+        case M4MCS_kState_PROCESSING:
+            {
+                M4OSA_ERR err = M4NO_ERROR;
+                err = M4MCS_intStepEncoding(pC, pProgress);
+                /* Save progress info in case of pause */
+                pC->uiProgress = *pProgress;
+                return err;
+            }
+            break;
+
+        default: /**< State error */
+            M4OSA_TRACE1_1(
+                "M4MCS_step(): Wrong State (%d), returning M4ERR_STATE",
+                pC->State);
+            return M4ERR_STATE;
+    }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_pause(M4MCS_Context pContext);
+ * @brief   Pause the transcoding i.e. release the (external hardware) video decoder.
+ * @note    This function is not needed if no hardware accelerators are used.
+ *          In that case, pausing the MCS is simply achieved by temporarily suspending
+ *          the M4MCS_step function calls.
+ * @param   pContext            (IN) MCS context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_pause( M4MCS_Context pContext )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_1("M4MCS_pause called with pContext=0x%x", pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_pause: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicPause(pC);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    switch( pC->State )
+    {
+        case M4MCS_kState_BEGINVIDEOJUMP: /**< the video decoder has been created,
+                                            we must destroy it */
+        case M4MCS_kState_BEGINVIDEODECODE: /**< the video is being used, we must destroy it */
+        case M4MCS_kState_PROCESSING: /**< the video is being used, we must destroy it */
+                    /**< OK, nothing to do here */
+            break;
+
+        default: /**< State error */
+            M4OSA_TRACE1_1(
+                "M4MCS_pause(): Wrong State (%d), returning M4ERR_STATE",
+                pC->State);
+            return M4ERR_STATE;
+    }
+
+    /**
+    * Set the CTS at which we will resume the decoding */
+    if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
+    {
+        /**
+        * We passed the starting CTS, so the resume target is the current CTS */
+        pC->dViDecStartingCts = pC->dViDecCurrentCts;
+    }
+    else {
+        /**
+        * We haven't passed the starting CTS yet, so the resume target is still the starting CTS
+        * --> nothing to do in the else block */
+    }
+
+    /**
+    * Free video decoder stuff */
+    if( M4OSA_NULL != pC->pViDecCtxt )
+    {
+        err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
+        pC->pViDecCtxt = M4OSA_NULL;
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_pause: m_pVideoDecoder->pFctDestroy returns 0x%x", err);
+            return err;
+        }
+    }
+
+    /**
+    * State transition */
+    pC->State = M4MCS_kState_PAUSED;
+
+    M4OSA_TRACE3_0("M4MCS_pause(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_resume(M4MCS_Context pContext);
+ * @brief   Resume the transcoding after a pause (see M4MCS_pause).
+ * @note    This function is not needed if no hardware accelerators are used.
+ *          In that case, resuming the MCS is simply achieved by calling
+ *          the M4MCS_step function.
+ * @param   pContext            (IN) MCS context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_resume( M4MCS_Context pContext )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_1("M4MCS_resume called with pContext=0x%x", pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_resume: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicResume(pC);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    switch( pC->State )
+    {
+        case M4MCS_kState_PAUSED: /**< OK, nothing to do here */
+            break;
+
+        default:                  /**< State error */
+            M4OSA_TRACE1_1(
+                "M4MCS_resume(): Wrong State (%d), returning M4ERR_STATE",
+                pC->State);
+            return M4ERR_STATE;
+            break;
+    }
+
+    /**
+    * Prepare the video decoder */
+    err = M4MCS_intPrepareVideoDecoder(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_resume(): M4MCS_intPrepareVideoDecoder() returns 0x%x", err);
+        return err;
+    }
+
+    /**
+    * State transition */
+    if( 0.0 == pC->dViDecStartingCts )
+    {
+        /**
+        * We are still at the beginning of the decoded stream, no need to jump, we can proceed */
+        pC->State = M4MCS_kState_PROCESSING;
+    }
+    else
+    {
+        /**
+        * Jumping */
+        pC->State = M4MCS_kState_BEGINVIDEOJUMP;
+    }
+
+    M4OSA_TRACE3_0("M4MCS_resume(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_close(M4MCS_Context pContext);
+ * @brief    Finish the MCS transcoding.
+ * @note The output 3GPP file is ready to be played after this call
+ * @param    pContext            (IN) MCS context
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_close( M4MCS_Context pContext )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4ENCODER_Header *encHeader;
+    M4SYS_StreamIDmemAddr streamHeader;
+
+    M4OSA_ERR err = M4NO_ERROR, err2;
+
+    M4OSA_TRACE2_1("M4MCS_close called with pContext=0x%x", pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_close: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Indicate that current file is no longer a still picture*/
+        pC->m_bIsStillPicture = M4OSA_FALSE;
+
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicClose(pC);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    if( M4MCS_kState_FINISHED != pC->State )
+    {
+        M4OSA_TRACE1_1("M4MCS_close(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* Close the encoder before the writer to be certain all the AUs have been written and we can
+    get the DSI. */
+
+    /* Has the encoder actually been started? Don't stop it if that's not the case. */
+    if( M4MCS_kEncoderRunning == pC->encoderState )
+    {
+        if( pC->pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+        {
+            err = pC->pVideoEncoderGlobalFcts->pFctStop(pC->pViEncCtxt);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_close: pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+                    err);
+                /* Well... how the heck do you handle a failed cleanup? */
+            }
+        }
+
+        pC->encoderState = M4MCS_kEncoderStopped;
+    }
+
+    /* Has the encoder actually been opened? Don't close it if that's not the case. */
+    if( M4MCS_kEncoderStopped == pC->encoderState )
+    {
+        err = pC->pVideoEncoderGlobalFcts->pFctClose(pC->pViEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_close: pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /* Well... how the heck do you handle a failed cleanup? */
+        }
+
+        pC->encoderState = M4MCS_kEncoderClosed;
+    }
+
+    /**********************************/
+    /******** Close the writer ********/
+    /**********************************/
+    if( M4OSA_NULL != pC->pWriterContext ) /* happens in state _SET */
+    {
+        /* HW encoder: fetch the DSI from the shell video encoder, and feed it to the writer before
+        closing it. */
+
+        if( pC->novideo != M4OSA_TRUE )
+        {
+            if( ( M4ENCODER_kMPEG4 == pC->EncodingVideoFormat)
+                || (M4ENCODER_kH264 == pC->EncodingVideoFormat) )
+            {
+                err = pC->pVideoEncoderGlobalFcts->pFctGetOption(pC->pViEncCtxt,
+                    M4ENCODER_kOptionID_EncoderHeader,
+                    (M4OSA_DataOption) &encHeader);
+
+                if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4MCS_close: failed to get the encoder header (err 0x%x)",
+                        err);
+                    /**< no return here, we still have stuff to deallocate after close, even
+                     if it fails. */
+                }
+                else
+                {
+                    /* set this header in the writer */
+                    streamHeader.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+                    streamHeader.size = encHeader->Size;
+                    streamHeader.addr = (M4OSA_MemAddr32)encHeader->pBuf;
+                }
+
+                M4OSA_TRACE1_0("calling set option");
+                err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+                    M4WRITER_kDSI, &streamHeader);
+                M4OSA_TRACE1_0("set option done");
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4MCS_close: failed to set the DSI in the writer (err 0x%x)",
+                        err);
+                }
+            }
+
+            if( ( M4OSA_TRUE == pC->bH264Trim)
+                && (M4ENCODER_kNULL == pC->EncodingVideoFormat) )
+            {
+                if(pC->uiBeginCutTime == 0)
+                {
+                    M4OSA_TRACE1_1("Decoder specific info size = %d",
+                        pC->m_pInstance->m_decoderSpecificInfoSize);
+                    pC->m_pInstance->m_pFinalDSISize =
+                        pC->m_pInstance->m_decoderSpecificInfoSize;
+                    M4OSA_TRACE1_1("Decoder specific info pointer = %d",
+                        (M4OSA_MemAddr8)pC->m_pInstance->m_pDecoderSpecificInfo);
+
+                    pC->m_pInstance->m_pFinalDSI =
+                        (M4OSA_UInt8 *)M4OSA_malloc(pC->m_pInstance-> \
+                        m_decoderSpecificInfoSize, M4MCS,
+                        (M4OSA_Char *)"instance->m_pFinalDSI");
+
+                    if( pC->m_pInstance->m_pFinalDSI == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("instance->m_pFinalDSI: allocation error");
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->m_pInstance->m_pFinalDSI,
+                        (M4OSA_MemAddr8)pC-> \
+                        m_pInstance->m_pDecoderSpecificInfo,
+                        pC->m_pInstance->m_decoderSpecificInfoSize);
+                }
+                streamHeader.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+                streamHeader.size = pC->m_pInstance->m_pFinalDSISize;
+                streamHeader.addr =
+                    (M4OSA_MemAddr32)pC->m_pInstance->m_pFinalDSI;
+                M4OSA_TRACE1_0("calling set option");
+                err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+                    M4WRITER_kDSI, &streamHeader);
+                M4OSA_TRACE1_0("set option done");
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4MCS_close: failed to set the DSI in the writer (err 0x%x)",
+                        err);
+                }
+            }
+        }
+        /* Write and close the 3GP output file */
+        err2 = pC->pWriterGlobalFcts->pFctCloseWrite(pC->pWriterContext);
+        pC->pWriterContext = M4OSA_NULL;
+
+        if( M4NO_ERROR != err2 )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_close: pWriterGlobalFcts->pFctCloseWrite returns 0x%x",
+                err2);
+
+            if( M4NO_ERROR == err )
+                err = err2;
+            /**< no return here, we still have stuff to deallocate after close, even if it fails.*/
+        }
+    }
+
+    /* Close output PCM file if needed */
+    if( pC->pOutputPCMfile != M4OSA_NULL )
+    {
+        pC->pOsaFileWritPtr->closeWrite(pC->pOutputPCMfile);
+        pC->pOutputPCMfile = M4OSA_NULL;
+    }
+
+    /*FlB 2009.03.04: add audio effects,
+    free effects list*/
+    if( M4OSA_NULL != pC->pEffects )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pEffects);
+        pC->pEffects = M4OSA_NULL;
+    }
+    pC->nbEffects = 0;
+    pC->pActiveEffectNumber = -1;
+
+    /**
+    * State transition */
+    pC->State = M4MCS_kState_CLOSED;
+
+    if( M4OSA_NULL != pC->H264MCSTempBuffer )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->H264MCSTempBuffer);
+    }
+
+    if( M4OSA_NULL != pC->m_pInstance )
+    {
+        err = H264MCS_Freeinstance(pC->m_pInstance);
+        pC->m_pInstance = M4OSA_NULL;
+    }
+
+    M4OSA_TRACE3_0("M4MCS_close(): returning M4NO_ERROR");
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_cleanUp(M4MCS_Context pContext);
+ * @brief    Free all resources used by the MCS.
+ * @note The context is no more valid after this call
+ * @param    pContext            (IN) MCS context
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return   M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_cleanUp( M4MCS_Context pContext )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+    M4OSA_TRACE3_1("M4MCS_cleanUp called with pContext=0x%x", pContext);
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+    if( file_au_reader )
+    {
+        fclose(file_au_reader);
+        file_au_reader = NULL;
+    }
+
+    if( file_pcm_decoder )
+    {
+        fclose(file_pcm_decoder);
+        file_pcm_decoder = NULL;
+    }
+
+    if( file_pcm_encoder )
+    {
+        fclose(file_pcm_encoder);
+        file_pcm_encoder = NULL;
+    }
+
+#endif
+
+    /**
+    * Check input parameter */
+
+    if( M4OSA_NULL == pContext )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_cleanUp: pContext is M4OSA_NULL, returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    /**
+    * Check state automaton */
+    if( M4MCS_kState_CLOSED != pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_cleanUp(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* ----- Free video encoder stuff, if needed ----- */
+
+    if( ( M4OSA_NULL != pC->pViEncCtxt)
+        && (M4OSA_NULL != pC->pVideoEncoderGlobalFcts) )
+    {
+        err = pC->pVideoEncoderGlobalFcts->pFctCleanup(pC->pViEncCtxt);
+        pC->pViEncCtxt = M4OSA_NULL;
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_cleanUp: pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC->encoderState = M4MCS_kNoEncoder;
+    }
+
+    /**
+    * In the H263 case, we allocated our own DSI buffer */
+    if( ( M4ENCODER_kH263 == pC->EncodingVideoFormat)
+        && (M4OSA_NULL != pC->WriterVideoStreamInfo.Header.pBuf) )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->WriterVideoStreamInfo.Header.pBuf);
+        pC->WriterVideoStreamInfo.Header.pBuf = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pPreResizeFrame )
+    {
+        if( M4OSA_NULL != pC->pPreResizeFrame[0].pac_data )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->pPreResizeFrame[0].pac_data);
+            pC->pPreResizeFrame[0].pac_data = M4OSA_NULL;
+        }
+
+        if( M4OSA_NULL != pC->pPreResizeFrame[1].pac_data )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->pPreResizeFrame[1].pac_data);
+            pC->pPreResizeFrame[1].pac_data = M4OSA_NULL;
+        }
+
+        if( M4OSA_NULL != pC->pPreResizeFrame[2].pac_data )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->pPreResizeFrame[2].pac_data);
+            pC->pPreResizeFrame[2].pac_data = M4OSA_NULL;
+        }
+        M4OSA_free((M4OSA_MemAddr32)pC->pPreResizeFrame);
+        pC->pPreResizeFrame = M4OSA_NULL;
+    }
+
+    /* ----- Free the ssrc stuff ----- */
+
+    if( M4OSA_NULL != pC->SsrcScratch )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->SsrcScratch);
+        pC->SsrcScratch = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pSsrcBufferIn )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pSsrcBufferIn);
+        pC->pSsrcBufferIn = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pSsrcBufferOut )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pSsrcBufferOut);
+        pC->pSsrcBufferOut = M4OSA_NULL;
+    }
+
+    /* ----- Free the audio encoder stuff ----- */
+
+    if( M4OSA_NULL != pC->pAudioEncCtxt )
+    {
+        err = pC->pAudioEncoderGlobalFcts->pFctClose(pC->pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_cleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC->pAudioEncoderGlobalFcts->pFctCleanUp(pC->pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_cleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC->pAudioEncCtxt = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pAudioEncoderBuffer )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderBuffer);
+        pC->pAudioEncoderBuffer = M4OSA_NULL;
+    }
+
+    /* ----- Free all other stuff ----- */
+
+    /**
+    * Free the readers and the decoders */
+    M4MCS_intCleanUp_ReadersDecoders(pC);
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+    /**
+    * Free the still picture resources */
+
+    M4MCS_stillPicCleanUp(pC);
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Free the shells interfaces */
+
+    M4MCS_unRegisterAllWriters(pContext);
+    M4MCS_unRegisterAllEncoders(pContext);
+    M4MCS_unRegisterAllReaders(pContext);
+    M4MCS_unRegisterAllDecoders(pContext);
+
+    /**
+    * Free the context itself */
+    M4OSA_free((M4OSA_MemAddr32)pC);
+    pC = M4OSA_NULL;
+
+    M4OSA_TRACE3_0("M4MCS_cleanUp(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_abort(M4MCS_Context pContext);
+ * @brief    Finish the MCS transcoding and free all resources used by the MCS
+ *          whatever the state is.
+ * @note    The context is no more valid after this call
+ * @param    pContext            (IN) MCS context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_abort( M4MCS_Context pContext )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_ERR err1 = M4NO_ERROR;
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+    if( M4OSA_NULL == pContext )
+    {
+        return M4NO_ERROR;
+    }
+
+    if( ( pC->State == M4MCS_kState_CREATED)
+        || (pC->State == M4MCS_kState_CLOSED) )
+    {
+        pC->State = M4MCS_kState_CLOSED;
+
+        err = M4MCS_cleanUp(pContext);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1("M4MCS_abort : M4MCS_cleanUp fails err = 0x%x", err);
+        }
+    }
+    else
+    {
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+        if( pC->m_bIsStillPicture )
+        {
+            /**
+            * Cancel the ongoing processes if any*/
+            err = M4MCS_stillPicCancel(pC);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_abort : M4MCS_stillPicCancel fails err = 0x%x", err);
+            }
+            /*Still picture process is now stopped; Carry on with close and cleanup*/
+        }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+        pC->State = M4MCS_kState_FINISHED;
+
+        err = M4MCS_close(pContext);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1("M4MCS_abort : M4MCS_close fails err = 0x%x", err);
+            err1 = err;
+        }
+
+        err = M4MCS_cleanUp(pContext);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1("M4MCS_abort : M4MCS_cleanUp fails err = 0x%x", err);
+        }
+    }
+    err = (err1 == M4NO_ERROR) ? err : err1;
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getInputFileProperties(M4MCS_Context pContext,
+ *                                         M4VIDEOEDITING_ClipProperties* pFileProperties);
+ * @brief   Retrieves the properties of the audio and video streams from the input file.
+ * @param   pContext            (IN) MCS context
+ * @param   pProperties         (OUT) Pointer on an allocated M4VIDEOEDITING_ClipProperties
+structure which is filled with the input stream properties.
+ * @note    The structure pProperties must be allocated and further de-allocated
+by the application. The function must be called in the opened state.
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getInputFileProperties( M4MCS_Context pContext,
+                                       M4VIDEOEDITING_ClipProperties *pFileProperties )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+    M4OSA_TRACE2_2("M4MCS_getInputFileProperties called with pContext=0x%x, \
+                   pFileProperties=0x%x", pContext, pFileProperties);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_getInputFileProperties: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileProperties), M4ERR_PARAMETER,
+        "M4MCS_getInputFileProperties: pProperties is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicGetInputFileProperties(pC, pFileProperties);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    if( M4MCS_kState_OPENED != pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_getInputFileProperties(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /**
+    * Copy previously computed properties into given structure */
+    M4OSA_memcpy((M4OSA_MemAddr8)pFileProperties,
+        (M4OSA_MemAddr8) &pC->InputFileProperties,
+        sizeof(M4VIDEOEDITING_ClipProperties));
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setOutputParams(M4MCS_Context pContext, M4MCS_OutputParams* pParams);
+ * @brief   Set the MCS video output parameters.
+ * @note    Must be called after M4MCS_open. Must be called before M4MCS_step.
+ * @param   pContext            (IN) MCS context
+ * @param   pParams             (IN/OUT) Transcoding parameters
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263 : Output video frame size parameter is
+ *                                                        incompatible with H263 encoding
+ * @return  M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263 : Output video frame size parameter is
+ *                                                        incompatible with H263 encoding
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT     : Undefined output video format parameter
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE : Undefined output video frame size
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE : Undefined output video frame rate
+ * @return  M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT : Undefined output audio format parameter
+ * @return  M4MCS_ERR_DURATION_IS_NULL : Specified output parameters define a null duration stream
+ *                                         (no audio and video)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setOutputParams( M4MCS_Context pContext,
+                                M4MCS_OutputParams *pParams )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4OSA_UInt32 uiFrameWidth;
+    M4OSA_UInt32 uiFrameHeight;
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_2(
+        "M4MCS_setOutputParams called with pContext=0x%x, pParams=0x%x",
+        pContext, pParams);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_setOutputParams: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams), M4ERR_PARAMETER,
+        "M4MCS_setOutputParams: pParam is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicSetOutputParams(pC, pParams);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    if( M4MCS_kState_OPENED != pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_setOutputParams(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* Ignore audio or video stream if the output do not need it, */
+    /* or if the input file does not have any audio or video stream */
+    /*FlB 26.02.2009: add mp3 as mcs output format*/
+    if( ( pParams->OutputVideoFormat == M4VIDEOEDITING_kNoneVideo)
+        || (pC->VideoState == M4MCS_kStreamState_NOSTREAM)
+        || (pParams->OutputFileType == M4VIDEOEDITING_kFileType_AMR)
+        || (pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP3) )
+    {
+        pC->novideo = M4OSA_TRUE;
+    }
+
+    if( ( pParams->OutputAudioFormat == M4VIDEOEDITING_kNoneAudio)
+        || (pC->AudioState == M4MCS_kStreamState_NOSTREAM) )
+    {
+        pC->noaudio = M4OSA_TRUE;
+    }
+
+    if( pC->noaudio && pC->novideo )
+    {
+        M4OSA_TRACE1_0(
+            "!!! M4MCS_setOutputParams : clip is NULL, there is no audio, no video");
+        return M4MCS_ERR_DURATION_IS_NULL;
+    }
+
+    /* Set writer */
+    err = M4MCS_setCurrentWriter(pContext, pParams->OutputFileType);
+    M4ERR_CHECK_RETURN(err);
+
+    /* Set video parameters */
+    if( pC->novideo == M4OSA_FALSE )
+    {
+#ifdef TIMESCALE_BUG
+        /* Check if we are in timescale modification */
+
+        if( pParams->OutputVideoTimescale != 0 )
+        {
+            pC->uiVideoTimescale = pParams->OutputVideoTimescale;
+
+            /* If timescale modification mode is on, we force NULL video encoding ... */
+            pParams->OutputVideoFormat = M4VIDEOEDITING_kNullVideo;
+        }
+
+#endif
+
+        /**
+        * Check Video Format correctness */
+
+        switch( pParams->OutputVideoFormat )
+        {
+            case M4VIDEOEDITING_kH263:
+                if( pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP4 )
+                    return M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE;
+
+                pC->EncodingVideoFormat = M4ENCODER_kH263;
+                err = M4MCS_setCurrentVideoEncoder(pContext,
+                    pParams->OutputVideoFormat);
+                M4ERR_CHECK_RETURN(err);
+                break;
+
+            case M4VIDEOEDITING_kMPEG4_EMP:
+                pC->bActivateEmp = M4OSA_TRUE;
+
+            case M4VIDEOEDITING_kMPEG4:
+
+                pC->EncodingVideoFormat = M4ENCODER_kMPEG4;
+                err = M4MCS_setCurrentVideoEncoder(pContext,
+                    pParams->OutputVideoFormat);
+                M4ERR_CHECK_RETURN(err);
+                break;
+
+            case M4VIDEOEDITING_kH264:
+
+                pC->EncodingVideoFormat = M4ENCODER_kH264;
+                err = M4MCS_setCurrentVideoEncoder(pContext,
+                    pParams->OutputVideoFormat);
+                M4ERR_CHECK_RETURN(err);
+                break;
+
+            case M4VIDEOEDITING_kNullVideo:
+                if( ( pParams->OutputFileType == M4VIDEOEDITING_kFileType_MP4)
+                    && (pC->InputFileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kH263) )
+                    return M4MCS_ERR_H263_FORBIDDEN_IN_MP4_FILE;
+
+
+                /* If input file is EMP, output file will be too */
+
+                if( pC->InputFileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kMPEG4_EMP )
+                    pC->bActivateEmp = M4OSA_TRUE;
+
+                /* Encoder needed for begin cut to generate an I-frame */
+                pC->EncodingVideoFormat = M4ENCODER_kNULL;
+                err = M4MCS_setCurrentVideoEncoder(pContext,
+                    pC->InputFileProperties.VideoStreamType);
+                M4ERR_CHECK_RETURN(err);
+                break;
+
+            default:
+                M4OSA_TRACE1_1("M4MCS_setOutputParams: Undefined output video format (%d),\
+                               returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                               pParams->OutputVideoFormat);
+                return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+        }
+
+        /**
+        * Check Video frame size correctness */
+        if( M4VIDEOEDITING_kNullVideo == pParams->OutputVideoFormat )
+        {
+            uiFrameWidth =
+                pC->EncodingWidth = pC->InputFileProperties.uiVideoWidth;
+            uiFrameHeight =
+                pC->EncodingHeight = pC->InputFileProperties.uiVideoHeight;
+        }
+        else
+        {
+            switch( pParams->OutputVideoFrameSize )
+            {
+                case M4VIDEOEDITING_kSQCIF:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_SQCIF_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_SQCIF_Height;
+                    break;
+
+                case M4VIDEOEDITING_kQQVGA:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_QQVGA_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_QQVGA_Height;
+                    break;
+
+                case M4VIDEOEDITING_kQCIF:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_QCIF_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_QCIF_Height;
+                    break;
+
+                case M4VIDEOEDITING_kQVGA:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_QVGA_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_QVGA_Height;
+                    break;
+
+                case M4VIDEOEDITING_kCIF:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_CIF_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_CIF_Height;
+                    break;
+
+                case M4VIDEOEDITING_kVGA:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_VGA_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_VGA_Height;
+                    break;
+                    /* +PR LV5807 */
+                case M4VIDEOEDITING_kWVGA:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_WVGA_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_WVGA_Height;
+                    break;
+
+                case M4VIDEOEDITING_kNTSC:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_NTSC_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_NTSC_Height;
+                    break;
+                    /* -PR LV5807*/
+                    /* +CR Google */
+                case M4VIDEOEDITING_k640_360:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_640_360_Width;
+                    uiFrameHeight =
+                        pC->EncodingHeight = M4ENCODER_640_360_Height;
+                    break;
+
+                case M4VIDEOEDITING_k854_480:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_854_480_Width;
+                    uiFrameHeight =
+                        pC->EncodingHeight = M4ENCODER_854_480_Height;
+                    break;
+
+                case M4VIDEOEDITING_kHD1280:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_HD1280_Width;
+                    uiFrameHeight =
+                        pC->EncodingHeight = M4ENCODER_HD1280_Height;
+                    break;
+
+                case M4VIDEOEDITING_kHD1080:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_HD1080_Width;
+                    uiFrameHeight =
+                        pC->EncodingHeight = M4ENCODER_HD1080_Height;
+                    break;
+
+                case M4VIDEOEDITING_kHD960:
+                    uiFrameWidth = pC->EncodingWidth = M4ENCODER_HD960_Width;
+                    uiFrameHeight = pC->EncodingHeight = M4ENCODER_HD960_Height;
+                    break;
+                    /* -CR Google */
+                default:
+                    M4OSA_TRACE1_1(
+                        "M4MCS_setOutputParams: Undefined output video frame size \
+                        (%d), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE",
+                        pParams->OutputVideoFrameSize);
+                    return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+            }
+        }
+
+        /**
+        * Compute video max au size and max chunck size.
+        * We do it here because it depends on the frame size only, and
+        * because we need it for the file size/video bitrate estimations */
+        pC->uiVideoMaxAuSize =
+            (M4OSA_UInt32)(1.5F *(M4OSA_Float)(uiFrameWidth * uiFrameHeight) \
+            *M4MCS_VIDEO_MIN_COMPRESSION_RATIO);
+        pC->uiVideoMaxChunckSize = (M4OSA_UInt32)(pC->uiVideoMaxAuSize       \
+            *
+            M4MCS_VIDEO_CHUNK_AU_SIZE_RATIO); /**< from max AU size to max Chunck size */
+
+        if( 0 == pC->uiVideoMaxAuSize )
+        {
+            /* Size may be zero in case of null encoding with unrecognized stream */
+            M4OSA_TRACE1_0("M4MCS_setOutputParams: video frame size is 0 returning\
+                           M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE");
+            return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+        }
+
+
+        /**
+        * Size check for H263 (only valid sizes are CIF, QCIF and SQCIF) */
+
+        if( M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat )
+        {
+            switch( pParams->OutputVideoFrameSize )
+            {
+                case M4VIDEOEDITING_kSQCIF:
+                case M4VIDEOEDITING_kQCIF:
+                case M4VIDEOEDITING_kCIF:
+                    /* OK */
+                    break;
+
+                default:
+                    M4OSA_TRACE1_0(
+                        "M4MCS_setOutputParams():\
+                        returning M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263");
+                    return M4MCS_ERR_INVALID_VIDEO_FRAME_SIZE_FOR_H263;
+            }
+        }
+
+        /**
+        * Check Video Frame rate correctness */
+        if( M4VIDEOEDITING_kNullVideo != pParams->OutputVideoFormat )
+        {
+            switch( pParams->OutputVideoFrameRate )
+            {
+                case M4VIDEOEDITING_k5_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k5_FPS;
+                    break;
+
+                case M4VIDEOEDITING_k7_5_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k7_5_FPS;
+                    break;
+
+                case M4VIDEOEDITING_k10_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k10_FPS;
+                    break;
+
+                case M4VIDEOEDITING_k12_5_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k12_5_FPS;
+                    break;
+
+                case M4VIDEOEDITING_k15_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k15_FPS;
+                    break;
+
+                case M4VIDEOEDITING_k20_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k20_FPS;
+                    break;
+
+                case M4VIDEOEDITING_k25_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k25_FPS;
+                    break;
+
+                case M4VIDEOEDITING_k30_FPS:
+                    pC->EncodingVideoFramerate = M4ENCODER_k30_FPS;
+                    break;
+
+                default:
+                    M4OSA_TRACE1_1(
+                        "M4MCS_setOutputParams: Undefined output video frame rate\
+                        (%d), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE",
+                        pParams->OutputVideoFrameRate);
+                    return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE;
+            }
+        }
+
+        /**
+        * Frame rate check for H263 (only dividers of 30 fps (29.97 actually)) */
+        if( M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat )
+        {
+            switch( pC->EncodingVideoFramerate )
+            {
+                case M4ENCODER_k5_FPS:
+                case M4ENCODER_k7_5_FPS:
+                case M4ENCODER_k10_FPS:
+                case M4ENCODER_k15_FPS:
+                case M4ENCODER_k30_FPS:
+                    /* OK */
+                    break;
+
+                default:
+                    M4OSA_TRACE1_0(
+                        "M4MCS_setOutputParams():\
+                        returning M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263");
+                    return M4MCS_ERR_INVALID_VIDEO_FRAME_RATE_FOR_H263;
+            }
+        }
+    }
+
+    /* Set audio parameters */
+    if( pC->noaudio == M4OSA_FALSE )
+    {
+        /**
+        * Check Audio Format correctness */
+        switch( pParams->OutputAudioFormat )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+
+                err = M4MCS_setCurrentAudioEncoder(pContext,
+                    pParams->OutputAudioFormat);
+                M4ERR_CHECK_RETURN(err);
+
+                pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
+                pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+                pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+                pC->AudioEncParams.SpecifParam.AmrSID = M4ENCODER_kAmrNoSID;
+                break;
+
+            case M4VIDEOEDITING_kAAC:
+
+                err = M4MCS_setCurrentAudioEncoder(pContext,
+                    pParams->OutputAudioFormat);
+                M4ERR_CHECK_RETURN(err);
+
+                pC->AudioEncParams.Format = M4ENCODER_kAAC;
+                pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+                switch( pParams->OutputAudioSamplingFrequency )
+                {
+                    case M4VIDEOEDITING_k8000_ASF:
+                        pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+                        break;
+
+                    case M4VIDEOEDITING_k16000_ASF:
+                        pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+                        break;
+
+                    case M4VIDEOEDITING_k22050_ASF:
+                        pC->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
+                        break;
+
+                    case M4VIDEOEDITING_k24000_ASF:
+                        pC->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
+                        break;
+
+                    case M4VIDEOEDITING_k32000_ASF:
+                        pC->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
+                        break;
+
+                    case M4VIDEOEDITING_k44100_ASF:
+                        pC->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
+                        break;
+
+                    case M4VIDEOEDITING_k48000_ASF:
+                        pC->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
+                        break;
+
+                    case M4VIDEOEDITING_k11025_ASF:
+                    case M4VIDEOEDITING_k12000_ASF:
+                    case M4VIDEOEDITING_kDefault_ASF:
+                        break;
+                }
+                    pC->AudioEncParams.ChannelNum =
+                        (pParams->bAudioMono == M4OSA_TRUE) ? \
+                        M4ENCODER_kMono : M4ENCODER_kStereo;
+                    pC->AudioEncParams.SpecifParam.AacParam.Regulation =
+                        M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
+                    /* unused */
+                    pC->AudioEncParams.SpecifParam.AacParam.bIS = M4OSA_FALSE;
+                    pC->AudioEncParams.SpecifParam.AacParam.bMS = M4OSA_FALSE;
+                    pC->AudioEncParams.SpecifParam.AacParam.bPNS = M4OSA_FALSE;
+                    pC->AudioEncParams.SpecifParam.AacParam.bTNS = M4OSA_FALSE;
+                    /* TODO change into highspeed asap */
+                    pC->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
+                        M4OSA_FALSE;
+                    break;
+
+                    /*FlB 26.02.2009: add mp3 as mcs output format, add mp3 encoder*/
+                case M4VIDEOEDITING_kMP3:
+                    err = M4MCS_setCurrentAudioEncoder(pContext,
+                        pParams->OutputAudioFormat);
+                    M4ERR_CHECK_RETURN(err);
+
+                    pC->AudioEncParams.Format = M4ENCODER_kMP3;
+                    pC->AudioEncParams.ChannelNum =
+                        (pParams->bAudioMono == M4OSA_TRUE) ? \
+                        M4ENCODER_kMono : M4ENCODER_kStereo;
+
+                    pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+                    switch( pParams->OutputAudioSamplingFrequency )
+                    {
+                        case M4VIDEOEDITING_k8000_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k11025_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k11025Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k12000_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k12000Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k16000_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k22050_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k24000_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k32000_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k44100_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
+                            break;
+
+                        case M4VIDEOEDITING_k48000_ASF:
+                            pC->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
+                            break;
+
+                        case M4VIDEOEDITING_kDefault_ASF:
+                            break;
+                    }
+
+                    break;
+
+                case M4VIDEOEDITING_kNullAudio:
+                    if( pParams->pEffects == M4OSA_NULL || pParams->nbEffects == 0 )
+                    {
+                        /* no encoder needed */
+                        pC->AudioEncParams.Format = M4ENCODER_kAudioNULL;
+                        pC->AudioEncParams.Frequency =
+                            pC->pReaderAudioStream->m_samplingFrequency;
+                        pC->AudioEncParams.ChannelNum =
+                            (pC->pReaderAudioStream->m_nbChannels == 1) ? \
+                            M4ENCODER_kMono : M4ENCODER_kStereo;
+                    }
+                    else
+                    {
+                        pC->AudioEncParams.Frequency =
+                            pC->pReaderAudioStream->m_samplingFrequency;
+                        pC->AudioEncParams.ChannelNum =
+                            (pC->pReaderAudioStream->m_nbChannels == 1) ? \
+                            M4ENCODER_kMono : M4ENCODER_kStereo;
+
+                        switch( pC->InputFileProperties.AudioStreamType )
+                        {
+                            case M4VIDEOEDITING_kAMR_NB:
+                                M4OSA_TRACE3_0(
+                                    "M4MCS_setOutputParams calling \
+                                    M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, AMR");
+                                err = M4MCS_setCurrentAudioEncoder(pContext,
+                                    pC->InputFileProperties.AudioStreamType);
+                                M4ERR_CHECK_RETURN(err);
+
+                                pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
+                                pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+                                pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+
+                                if( pC->pReaderAudioStream->m_samplingFrequency
+                                    != 8000 )
+                                {
+                                    pC->AudioEncParams.Format = M4ENCODER_kAMRNB;
+                                }
+                                pC->AudioEncParams.SpecifParam.AmrSID =
+                                    M4ENCODER_kAmrNoSID;
+                                break;
+
+                            case M4VIDEOEDITING_kAAC:
+                                M4OSA_TRACE3_0(
+                                    "M4MCS_setOutputParams calling \
+                                    M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, AAC");
+                                err = M4MCS_setCurrentAudioEncoder(pContext,
+                                    pC->InputFileProperties.AudioStreamType);
+                                M4ERR_CHECK_RETURN(err);
+
+                                pC->AudioEncParams.Format = M4ENCODER_kAAC;
+                                pC->AudioEncParams.SpecifParam.AacParam.Regulation =
+                                    M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
+                                pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+                                pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+                                switch( pC->pReaderAudioStream->
+                                    m_samplingFrequency )
+                                {
+                                case 16000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k16000Hz;
+                                    break;
+
+                                case 22050:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k22050Hz;
+                                    break;
+
+                                case 24000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k24000Hz;
+                                    break;
+
+                                case 32000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k32000Hz;
+                                    break;
+
+                                case 44100:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k44100Hz;
+                                    break;
+
+                                case 48000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k48000Hz;
+                                    break;
+
+                                default:
+                                    pC->AudioEncParams.Format = M4ENCODER_kAAC;
+                                    break;
+                            }
+                            /* unused */
+                            pC->AudioEncParams.SpecifParam.AacParam.bIS =
+                                M4OSA_FALSE;
+                            pC->AudioEncParams.SpecifParam.AacParam.bMS =
+                                M4OSA_FALSE;
+                            pC->AudioEncParams.SpecifParam.AacParam.bPNS =
+                                M4OSA_FALSE;
+                            pC->AudioEncParams.SpecifParam.AacParam.bTNS =
+                                M4OSA_FALSE;
+                            /* TODO change into highspeed asap */
+                            pC->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
+                                M4OSA_FALSE;
+                            break;
+
+                        case M4VIDEOEDITING_kMP3:
+                            M4OSA_TRACE3_0(
+                                "M4MCS_setOutputParams calling\
+                                M4MCS_setCurrentAudioEncoder M4VIDEOEDITING_kNull, MP3");
+                            err = M4MCS_setCurrentAudioEncoder(pContext,
+                                pC->InputFileProperties.AudioStreamType);
+                            M4ERR_CHECK_RETURN(err);
+
+                            pC->AudioEncParams.Format = M4ENCODER_kMP3;
+                            pC->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+
+                            switch( pC->pReaderAudioStream->
+                                m_samplingFrequency )
+                            {
+                                case 8000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k8000Hz;
+                                    break;
+
+                                case 16000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k16000Hz;
+                                    break;
+
+                                case 22050:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k22050Hz;
+                                    break;
+
+                                case 24000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k24000Hz;
+                                    break;
+
+                                case 32000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k32000Hz;
+                                    break;
+
+                                case 44100:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k44100Hz;
+                                    break;
+
+                                case 48000:
+                                    pC->AudioEncParams.Frequency =
+                                        M4ENCODER_k48000Hz;
+                                    break;
+
+                                default:
+                                    pC->AudioEncParams.Format = M4ENCODER_kMP3;
+                                    break;
+                            }
+                            break;
+
+                        case M4VIDEOEDITING_kEVRC:
+                        case M4VIDEOEDITING_kUnsupportedAudio:
+                        default:
+                            M4OSA_TRACE1_1(
+                                "M4MCS_setOutputParams: Output audio format (%d) is\
+                                incompatible with audio effects, returning \
+                                M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+                                pC->InputFileProperties.AudioStreamType);
+                            return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+                        }
+                    }
+                    break;
+                    /* EVRC
+                    //            case M4VIDEOEDITING_kEVRC:
+                    //
+                    //                err = M4MCS_setCurrentAudioEncoder(pContext, pParams->\
+                    //                    OutputAudioFormat);
+                    //                M4ERR_CHECK_RETURN(err);
+                    //
+                    //                pC->AudioEncParams.Format = M4ENCODER_kEVRC;
+                    //                pC->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+                    //                pC->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+                    //                break; */
+
+                default:
+                    M4OSA_TRACE1_1("M4MCS_setOutputParams: Undefined output audio format (%d),\
+                                   returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+                                   pParams->OutputAudioFormat);
+                    return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+        }
+    }
+
+    if( pParams->pOutputPCMfile != M4OSA_NULL )
+    {
+        pC->pOutputPCMfile = pParams->pOutputPCMfile;
+
+        /* Open output PCM file */
+        pC->pOsaFileWritPtr->openWrite(&(pC->pOutputPCMfile),
+            pParams->pOutputPCMfile, M4OSA_kFileWrite);
+    }
+    else
+    {
+        pC->pOutputPCMfile = M4OSA_NULL;
+    }
+
+    /*Store media rendering parameter into the internal context*/
+    pC->MediaRendering = pParams->MediaRendering;
+
+    /* Add audio effects*/
+    /*Copy MCS effects structure into internal context*/
+    if( pParams->nbEffects > 0 )
+    {
+        M4OSA_UInt32 j = 0;
+        pC->nbEffects = pParams->nbEffects;
+        pC->pEffects = (M4MCS_EffectSettings *)M4OSA_malloc(pC->nbEffects \
+            *sizeof(M4MCS_EffectSettings), M4MCS,
+            (M4OSA_Char *)"Allocation of effects list");
+
+        if( pC->pEffects == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("M4MCS_setOutputParams(): allocation error");
+            return M4ERR_ALLOC;
+        }
+
+        for ( j = 0; j < pC->nbEffects; j++ )
+        {
+            /* Copy effect to "local" structure */
+            M4OSA_memcpy((M4OSA_MemAddr8) &(pC->pEffects[j]),
+                (M4OSA_MemAddr8) &(pParams->pEffects[j]),
+                sizeof(M4MCS_EffectSettings));
+
+            switch( pC->pEffects[j].AudioEffectType )
+            {
+                case M4MCS_kAudioEffectType_None:
+                    M4OSA_TRACE3_1(
+                        "M4MCS_setOutputParams(): effect type %i is None", j);
+                    pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
+                    pC->pEffects[j].ExtAudioEffectFct = M4OSA_NULL;
+                    break;
+
+                case M4MCS_kAudioEffectType_FadeIn:
+                    M4OSA_TRACE3_1(
+                        "M4MCS_setOutputParams(): effect type %i is FadeIn", j);
+                    pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
+                    pC->pEffects[j].ExtAudioEffectFct =
+                        M4MCS_editAudioEffectFct_FadeIn;
+                    break;
+
+                case M4MCS_kAudioEffectType_FadeOut:
+                    M4OSA_TRACE3_1(
+                        "M4MCS_setOutputParams(): effect type %i is FadeOut",
+                        j);
+                    pC->pEffects[j].pExtAudioEffectFctCtxt = M4OSA_NULL;
+                    pC->pEffects[j].ExtAudioEffectFct =
+                        M4MCS_editAudioEffectFct_FadeOut;
+                    break;
+
+                case M4MCS_kAudioEffectType_External:
+                    M4OSA_TRACE3_1(
+                        "M4MCS_setOutputParams(): effect type %i is External",
+                        j);
+
+                    if( pParams->pEffects != M4OSA_NULL )
+                    {
+                        if( pParams->pEffects[j].ExtAudioEffectFct
+                            == M4OSA_NULL )
+                        {
+                            M4OSA_TRACE1_1("M4MCS_setOutputParams(): no external effect function\
+                                           associated to external effect number %i", j);
+                            return M4ERR_PARAMETER;
+                        }
+                        pC->pEffects[j].pExtAudioEffectFctCtxt =
+                            pParams->pEffects[j].pExtAudioEffectFctCtxt;
+
+                        pC->pEffects[j].ExtAudioEffectFct =
+                            pParams->pEffects[j].ExtAudioEffectFct;
+                    }
+
+                    break;
+
+                default:
+                    M4OSA_TRACE1_0(
+                        "M4MCS_setOutputParams(): effect type not recognized");
+                    return M4ERR_PARAMETER;
+            }
+        }
+    }
+    else
+    {
+        pC->nbEffects = 0;
+        pC->pEffects = M4OSA_NULL;
+    }
+
+    /**
+    * Update state automaton */
+    pC->State = M4MCS_kState_SET;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_setOutputParams(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_setEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief   Set the values of the encoding parameters
+ * @note    Must be called before M4MCS_checkParamsAndStart().
+ * @param   pContext           (IN) MCS context
+ * @param   pRates             (IN) Transcoding parameters
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH: Audio bitrate too high (we limit to 96 kbps)
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW: Audio bitrate is too low (16 kbps min for aac, 12.2
+ *                                            for amr, 8 for mp3)
+ * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Begin cut and End cut are equals
+ * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION: Begin cut time is larger than the input clip
+ *                                                     duration
+ * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT: End cut time is smaller than begin cut time
+ * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL: Not enough space to store whole output file at given
+ *                                             bitrates
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH: Video bitrate too high (we limit to 800 kbps)
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW:  Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_setEncodingParams( M4MCS_Context pContext,
+                                  M4MCS_EncodingParams *pRates )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4OSA_UInt32 j = 0;
+
+    M4OSA_TRACE2_2(
+        "M4MCS_setEncodingParams called with pContext=0x%x, pRates=0x%x",
+        pContext, pRates);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_setEncodingParams: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pRates), M4ERR_PARAMETER,
+        "M4MCS_setEncodingParams: pRates is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicSetEncodingParams(pC, pRates);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    if( M4MCS_kState_SET != pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_setEncodingParams(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* Set given values */
+    pC->uiVideoBitrate = pRates->OutputVideoBitrate;
+    pC->uiAudioBitrate = pRates->OutputAudioBitrate;
+    pC->uiBeginCutTime = pRates->BeginCutTime;
+    pC->uiEndCutTime = pRates->EndCutTime;
+    pC->uiMaxFileSize = pRates->OutputFileSize;
+
+    /**
+    * Check begin cut time validity */
+    if( pC->uiBeginCutTime >= pC->InputFileProperties.uiClipDuration )
+    {
+        M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin cut larger than duration (%d>%d),\
+                       returning M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION",
+                       pC->uiBeginCutTime, pC->InputFileProperties.uiClipDuration);
+        return M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION;
+    }
+
+    /**
+    * If end cut time is too large, we set it to the clip duration */
+    if( pC->uiEndCutTime > pC->InputFileProperties.uiClipDuration )
+    {
+        pC->uiEndCutTime = pC->InputFileProperties.uiClipDuration;
+    }
+
+    /**
+    * Check end cut time validity */
+    if( pC->uiEndCutTime > 0 )
+    {
+        if( pC->uiEndCutTime < pC->uiBeginCutTime )
+        {
+            M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin cut greater than end cut (%d,%d), \
+                           returning M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT",
+                           pC->uiBeginCutTime, pC->uiEndCutTime);
+            return M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT;
+        }
+
+        if( pC->uiEndCutTime == pC->uiBeginCutTime )
+        {
+            M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Begin and End cuts are equal (%d,%d),\
+                           returning M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT",
+                           pC->uiBeginCutTime, pC->uiEndCutTime);
+            return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+        }
+    }
+
+    /**
+    * FlB 2009.03.04: check audio effects start time and duration validity*/
+    for ( j = 0; j < pC->nbEffects; j++ )
+    {
+        M4OSA_UInt32 outputEndCut = pC->uiEndCutTime;
+
+        if( pC->uiEndCutTime == 0 )
+        {
+            outputEndCut = pC->InputFileProperties.uiClipDuration;
+        }
+
+        if( pC->pEffects[j].uiStartTime > (outputEndCut - pC->uiBeginCutTime) )
+        {
+            M4OSA_TRACE1_2("M4MCS_setEncodingParams(): Effects start time is larger than\
+                           duration (%d,%d), returning M4ERR_PARAMETER",
+                           pC->pEffects[j].uiStartTime,
+                           (pC->uiEndCutTime - pC->uiBeginCutTime));
+            return M4ERR_PARAMETER;
+        }
+
+        if( pC->pEffects[j].uiStartTime + pC->pEffects[j].uiDuration > \
+            (outputEndCut - pC->uiBeginCutTime) )
+        {
+            /* Re-adjust the effect duration until the end of the output clip*/
+            pC->pEffects[j].uiDuration = (outputEndCut - pC->uiBeginCutTime) - \
+                pC->pEffects[j].uiStartTime;
+        }
+    }
+
+    /* Check audio bitrate consistency */
+    if( ( pC->noaudio == M4OSA_FALSE)
+        && (pC->AudioEncParams.Format != M4ENCODER_kAudioNULL) )
+    {
+        if( pC->uiAudioBitrate != M4VIDEOEDITING_kUndefinedBitrate )
+        {
+            if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
+            {
+                if( pC->uiAudioBitrate > M4VIDEOEDITING_k12_2_KBPS )
+                    return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+                if( pC->uiAudioBitrate < M4VIDEOEDITING_k12_2_KBPS )
+                    return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+            }
+            //EVRC
+            //            else if(pC->AudioEncParams.Format == M4ENCODER_kEVRC)
+            //            {
+            //                if(pC->uiAudioBitrate > M4VIDEOEDITING_k9_2_KBPS)
+            //                    return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+            //                if(pC->uiAudioBitrate < M4VIDEOEDITING_k9_2_KBPS)
+            //                     return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+            //            }
+            /*FlB 26.02.2009: add mp3 as mcs output format, add mp3 encoder*/
+            else if( pC->AudioEncParams.Format == M4ENCODER_kMP3 )
+            {
+                if( pC->AudioEncParams.Frequency >= M4ENCODER_k32000Hz )
+                {
+                    /*Mpeg layer 1*/
+                    if( pC->uiAudioBitrate > 320000 )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+                    if( pC->uiAudioBitrate < 32000 )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+                }
+                else if( pC->AudioEncParams.Frequency >= M4ENCODER_k16000Hz )
+                {
+                    /*Mpeg layer 2*/
+                    if( pC->uiAudioBitrate > 160000 )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+                    if( ( pC->uiAudioBitrate < 8000
+                        && pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+                        || (pC->uiAudioBitrate < 16000
+                        && pC->AudioEncParams.ChannelNum
+                        == M4ENCODER_kStereo) )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+                }
+                else if( pC->AudioEncParams.Frequency == M4ENCODER_k8000Hz
+                    || pC->AudioEncParams.Frequency == M4ENCODER_k11025Hz
+                    || pC->AudioEncParams.Frequency == M4ENCODER_k12000Hz )
+                {
+                    /*Mpeg layer 2.5*/
+                    if( pC->uiAudioBitrate > 64000 )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+                    if( ( pC->uiAudioBitrate < 8000
+                        && pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+                        || (pC->uiAudioBitrate < 16000
+                        && pC->AudioEncParams.ChannelNum
+                        == M4ENCODER_kStereo) )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+                }
+                else
+                {
+                    M4OSA_TRACE1_1("M4MCS_setEncodingParams: MP3 audio sampling frequency error\
+                                   (%d)", pC->AudioEncParams.Frequency);
+                    return M4ERR_PARAMETER;
+                }
+            }
+            else
+            {
+                if( pC->uiAudioBitrate > M4VIDEOEDITING_k192_KBPS )
+                    return M4MCS_ERR_AUDIOBITRATE_TOO_HIGH;
+
+                if( pC->AudioEncParams.ChannelNum == M4ENCODER_kMono )
+                {
+                    if( pC->uiAudioBitrate < M4VIDEOEDITING_k16_KBPS )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+                }
+                else
+                {
+                    if( pC->uiAudioBitrate < M4VIDEOEDITING_k32_KBPS )
+                        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+                }
+            }
+        }
+    }
+    else
+    {
+        /* NULL audio : copy input file bitrate */
+        pC->uiAudioBitrate = pC->InputFileProperties.uiAudioBitrate;
+    }
+
+    /* Check video bitrate consistency */
+    if( ( pC->novideo == M4OSA_FALSE)
+        && (pC->EncodingVideoFormat != M4ENCODER_kNULL) )
+    {
+        if( pC->uiVideoBitrate != M4VIDEOEDITING_kUndefinedBitrate )
+        {
+            if( pC->uiVideoBitrate > M4VIDEOEDITING_k8_MBPS )
+                return M4MCS_ERR_VIDEOBITRATE_TOO_HIGH;
+
+            if( pC->uiVideoBitrate < M4VIDEOEDITING_k16_KBPS )
+                return M4MCS_ERR_VIDEOBITRATE_TOO_LOW;
+        }
+    }
+    else
+    {
+        /* NULL video : copy input file bitrate */
+        pC->uiVideoBitrate = pC->InputFileProperties.uiVideoBitrate;
+    }
+
+    if( pRates->OutputVideoTimescale <= 30000
+        && pRates->OutputVideoTimescale > 0 )
+    {
+        pC->outputVideoTimescale = pRates->OutputVideoTimescale;
+    }
+
+    /* Check file size */
+    return M4MCS_intCheckMaxFileSize(pC);
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getExtendedEncodingParams(M4MCS_Context pContext, M4MCS_EncodingParams* pRates)
+ * @brief   Get the extended values of the encoding parameters
+ * @note    Could be called after M4MCS_setEncodingParams.
+ * @param   pContext           (IN) MCS context
+ * @param   pRates             (OUT) Transcoding parameters
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT: Encoding settings would produce a null duration
+ *                                             clip = encoding is impossible
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getExtendedEncodingParams( M4MCS_Context pContext,
+                                          M4MCS_EncodingParams *pRates )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+    M4OSA_Int32 minaudiobitrate;
+    M4OSA_Int32 minvideobitrate;
+    M4OSA_Int32 maxcombinedbitrate;
+
+    M4OSA_Int32 calcbitrate;
+
+    M4OSA_UInt32 maxduration;
+    M4OSA_UInt32 calcduration;
+
+    M4OSA_Bool fixed_audio = M4OSA_FALSE;
+    M4OSA_Bool fixed_video = M4OSA_FALSE;
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicGetExtendedEncodingParams(pC, pRates);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    pRates->OutputVideoBitrate =
+        M4MCS_intGetNearestBitrate(pC->uiVideoBitrate, 0);
+    pRates->OutputAudioBitrate =
+        M4MCS_intGetNearestBitrate(pC->uiAudioBitrate, 0);
+    pRates->BeginCutTime = pC->uiBeginCutTime;
+    pRates->EndCutTime = pC->uiEndCutTime;
+    pRates->OutputFileSize = pC->uiMaxFileSize;
+
+    /**
+    * Check state automaton */
+    if( M4MCS_kState_SET != pC->State )
+    {
+        M4OSA_TRACE1_1("M4MCS_getExtendedEncodingParams(): Wrong State (%d),\
+                       returning M4ERR_STATE", pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* Compute min audio bitrate */
+    if( pC->noaudio )
+    {
+        fixed_audio = M4OSA_TRUE;
+        pRates->OutputAudioBitrate = 0;
+        minaudiobitrate = 0;
+    }
+    else if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+    {
+        fixed_audio = M4OSA_TRUE;
+        pRates->OutputAudioBitrate = pC->InputFileProperties.uiAudioBitrate;
+        minaudiobitrate = pC->InputFileProperties.uiAudioBitrate;
+    }
+    else
+    {
+        if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
+        {
+            fixed_audio = M4OSA_TRUE;
+            pRates->OutputAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+            minaudiobitrate = M4VIDEOEDITING_k12_2_KBPS;
+        }
+        //EVRC
+        //        if(pC->AudioEncParams.Format == M4ENCODER_kEVRC)
+        //        {
+        //            fixed_audio = M4OSA_TRUE;
+        //            pRates->OutputAudioBitrate = M4VIDEOEDITING_k9_2_KBPS;
+        //            minaudiobitrate = M4VIDEOEDITING_k9_2_KBPS;
+        //        }
+        /*FlB 26.02.2009: add mp3 as mcs output format*/
+        else if( pC->AudioEncParams.Format == M4ENCODER_kMP3 )
+        {
+            minaudiobitrate =
+                M4VIDEOEDITING_k32_KBPS; /*Default min audio bitrate for MPEG layer 1,
+                                             for both mono and stereo channels*/
+        }
+        else
+        {
+            minaudiobitrate = (pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+                ? M4VIDEOEDITING_k16_KBPS : M4VIDEOEDITING_k32_KBPS;
+        }
+    }
+
+    /* Check audio bitrate is in the correct range */
+    if( fixed_audio == M4OSA_FALSE )
+    {
+        if( ( pC->uiAudioBitrate > 0)
+            && (pRates->OutputAudioBitrate < minaudiobitrate) )
+        {
+            pRates->OutputAudioBitrate = minaudiobitrate;
+        }
+
+        if( pRates->OutputAudioBitrate > M4VIDEOEDITING_k96_KBPS )
+        {
+            pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
+        }
+    }
+
+    /* Compute min video bitrate */
+    if( pC->novideo )
+    {
+        fixed_video = M4OSA_TRUE;
+        pRates->OutputVideoBitrate = 0;
+        minvideobitrate = 0;
+    }
+    else if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+    {
+        fixed_video = M4OSA_TRUE;
+        pRates->OutputVideoBitrate = pC->InputFileProperties.uiVideoBitrate;
+        minvideobitrate = pC->InputFileProperties.uiVideoBitrate;
+    }
+    else
+    {
+        minvideobitrate = M4VIDEOEDITING_k16_KBPS;
+    }
+
+    /* Check video bitrate is in the correct range */
+    if( fixed_video == M4OSA_FALSE )
+    {
+        if( ( pC->uiVideoBitrate > 0)
+            && (pRates->OutputVideoBitrate < minvideobitrate) )
+        {
+            pRates->OutputVideoBitrate = minvideobitrate;
+        }
+        /*+ New Encoder bitrates */
+        if( pRates->OutputVideoBitrate > M4VIDEOEDITING_k8_MBPS )
+        {
+            pRates->OutputVideoBitrate = M4VIDEOEDITING_k8_MBPS;
+        }
+        /*- New Encoder bitrates */
+    }
+
+    /* Check cut times are in correct range */
+    if( ( pRates->BeginCutTime >= pC->InputFileProperties.uiClipDuration)
+        || (( pRates->BeginCutTime >= pRates->EndCutTime)
+        && (pRates->EndCutTime > 0)) )
+    {
+        pRates->BeginCutTime = 0;
+        pRates->EndCutTime = 0;
+    }
+
+    if( pRates->EndCutTime == 0 )
+        calcduration =
+        pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
+    else
+        calcduration = pRates->EndCutTime - pRates->BeginCutTime;
+
+    /* priority 1 : max file size */
+    if( pRates->OutputFileSize == 0 )
+    {
+        /* we can put maximum values for all undefined parameters */
+        if( pRates->EndCutTime == 0 )
+        {
+            pRates->EndCutTime = pC->InputFileProperties.uiClipDuration;
+        }
+
+        if( ( pRates->OutputAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate)
+            && (fixed_audio == M4OSA_FALSE) )
+        {
+            pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
+        }
+
+        if( ( pRates->OutputVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate)
+            && (fixed_video == M4OSA_FALSE) )
+        {
+            /*+ New Encoder bitrates */
+            pRates->OutputVideoBitrate = M4VIDEOEDITING_k8_MBPS;
+            /*- New Encoder bitrates */
+        }
+    }
+    else
+    {
+        /* compute max duration */
+        maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+            / M4MCS_MOOV_OVER_FILESIZE_RATIO
+            / (minvideobitrate + minaudiobitrate) * 8000.0);
+
+        if( maxduration
+            + pRates->BeginCutTime > pC->InputFileProperties.uiClipDuration )
+        {
+            maxduration =
+                pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
+        }
+
+        /* priority 2 : cut times */
+        if( ( pRates->BeginCutTime > 0) || (pRates->EndCutTime > 0) )
+        {
+            if( calcduration > maxduration )
+            {
+                calcduration = maxduration;
+            }
+
+            if( calcduration == 0 )
+            {
+                return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+            }
+
+            maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+                / M4MCS_MOOV_OVER_FILESIZE_RATIO / (calcduration / 8000.0));
+
+            /* audio and video bitrates */
+            if( ( pRates->OutputAudioBitrate
+                == M4VIDEOEDITING_kUndefinedBitrate)
+                && (pRates->OutputVideoBitrate
+                == M4VIDEOEDITING_kUndefinedBitrate) )
+            {
+                /* set audio = 1/3 and video = 2/3 */
+                if( fixed_audio == M4OSA_FALSE )
+                {
+                    if( pC->novideo )
+                        pRates->OutputAudioBitrate =
+                        M4MCS_intGetNearestBitrate(maxcombinedbitrate, 0);
+                    else
+                        pRates->OutputAudioBitrate =
+                        M4MCS_intGetNearestBitrate(maxcombinedbitrate / 3,
+                        0);
+
+                    if( pRates->OutputAudioBitrate < minaudiobitrate )
+                        pRates->OutputAudioBitrate = minaudiobitrate;
+
+                    if( pRates->OutputAudioBitrate > M4VIDEOEDITING_k96_KBPS )
+                        pRates->OutputAudioBitrate = M4VIDEOEDITING_k96_KBPS;
+                }
+
+                if( fixed_video == M4OSA_FALSE )
+                {
+                    pRates->OutputVideoBitrate =
+                        M4MCS_intGetNearestBitrate(maxcombinedbitrate
+                        - pRates->OutputAudioBitrate, 0);
+
+                    if( pRates->OutputVideoBitrate < minvideobitrate )
+                        pRates->OutputVideoBitrate = minvideobitrate;
+
+                    if( pRates->OutputVideoBitrate > M4VIDEOEDITING_k8_MBPS )
+                        pRates->OutputVideoBitrate =
+                        M4VIDEOEDITING_k8_MBPS; /*+ New Encoder
+                                                bitrates */
+                }
+            }
+            else
+            {
+                /* priority 3 : audio bitrate */
+                if( pRates->OutputAudioBitrate
+                    != M4VIDEOEDITING_kUndefinedBitrate )
+                {
+                    while( ( fixed_audio == M4OSA_FALSE)
+                        && (pRates->OutputAudioBitrate >= minaudiobitrate)
+                        && (pRates->OutputAudioBitrate
+                        + minvideobitrate > maxcombinedbitrate) )
+                    {
+                        pRates->OutputAudioBitrate =
+                            M4MCS_intGetNearestBitrate(
+                            pRates->OutputAudioBitrate, -1);
+                    }
+
+                    if( ( fixed_audio == M4OSA_FALSE)
+                        && (pRates->OutputAudioBitrate < minaudiobitrate) )
+                    {
+                        pRates->OutputAudioBitrate = minaudiobitrate;
+                    }
+
+                    calcbitrate = M4MCS_intGetNearestBitrate(
+                                    maxcombinedbitrate
+                                    - pRates->OutputAudioBitrate, 0);
+
+                    if( calcbitrate < minvideobitrate )
+                        calcbitrate = minvideobitrate;
+
+                    if( calcbitrate > M4VIDEOEDITING_k8_MBPS )
+                        calcbitrate = M4VIDEOEDITING_k8_MBPS;
+
+                    if( ( fixed_video == M4OSA_FALSE)
+                        && (( pRates->OutputVideoBitrate
+                        == M4VIDEOEDITING_kUndefinedBitrate)
+                        || (pRates->OutputVideoBitrate > calcbitrate)) )
+                    {
+                        pRates->OutputVideoBitrate = calcbitrate;
+                    }
+                }
+                else
+                {
+                    /* priority 4 : video bitrate */
+                    if( pRates->OutputVideoBitrate
+                        != M4VIDEOEDITING_kUndefinedBitrate )
+                    {
+                        while( ( fixed_video == M4OSA_FALSE)
+                            && (pRates->OutputVideoBitrate >= minvideobitrate)
+                            && (pRates->OutputVideoBitrate
+                            + minaudiobitrate > maxcombinedbitrate) )
+                        {
+                            pRates->OutputVideoBitrate =
+                                M4MCS_intGetNearestBitrate(
+                                pRates->OutputVideoBitrate, -1);
+                        }
+
+                        if( ( fixed_video == M4OSA_FALSE)
+                            && (pRates->OutputVideoBitrate < minvideobitrate) )
+                        {
+                            pRates->OutputVideoBitrate = minvideobitrate;
+                        }
+
+                        calcbitrate =
+                            M4MCS_intGetNearestBitrate(maxcombinedbitrate
+                            - pRates->OutputVideoBitrate, 0);
+
+                        if( calcbitrate < minaudiobitrate )
+                            calcbitrate = minaudiobitrate;
+
+                        if( calcbitrate > M4VIDEOEDITING_k96_KBPS )
+                            calcbitrate = M4VIDEOEDITING_k96_KBPS;
+
+                        if( ( fixed_audio == M4OSA_FALSE)
+                            && (( pRates->OutputAudioBitrate
+                            == M4VIDEOEDITING_kUndefinedBitrate)
+                            || (pRates->OutputAudioBitrate > calcbitrate)) )
+                        {
+                            pRates->OutputAudioBitrate = calcbitrate;
+                        }
+                    }
+                }
+            }
+        }
+        else
+        {
+            /* priority 3 : audio bitrate */
+            if( pRates->OutputAudioBitrate != M4VIDEOEDITING_kUndefinedBitrate )
+            {
+                /* priority 4 : video bitrate */
+                if( pRates->OutputVideoBitrate
+                    != M4VIDEOEDITING_kUndefinedBitrate )
+                {
+                    /* compute max duration */
+                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
+                        / (pRates->OutputVideoBitrate
+                        + pRates->OutputAudioBitrate) * 8000.0);
+
+                    if( maxduration + pRates->BeginCutTime
+                        > pC->InputFileProperties.uiClipDuration )
+                    {
+                        maxduration = pC->InputFileProperties.uiClipDuration
+                            - pRates->BeginCutTime;
+                    }
+
+                    if( calcduration > maxduration )
+                    {
+                        calcduration = maxduration;
+                    }
+
+                    if( calcduration == 0 )
+                    {
+                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+                    }
+                }
+                else
+                {
+                    /* start with min video bitrate */
+                    pRates->OutputVideoBitrate = minvideobitrate;
+
+                    /* compute max duration */
+                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
+                        / (pRates->OutputVideoBitrate
+                        + pRates->OutputAudioBitrate) * 8000.0);
+
+                    if( maxduration + pRates->BeginCutTime
+                        > pC->InputFileProperties.uiClipDuration )
+                    {
+                        maxduration = pC->InputFileProperties.uiClipDuration
+                            - pRates->BeginCutTime;
+                    }
+
+                    if( calcduration > maxduration )
+                    {
+                        calcduration = maxduration;
+                    }
+
+                    if( calcduration == 0 )
+                    {
+                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+                    }
+
+                    /* search max possible video bitrate */
+                    maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
+                        / (calcduration / 8000.0));
+
+                    while( ( fixed_video == M4OSA_FALSE)
+                        && (pRates->OutputVideoBitrate
+                        < M4VIDEOEDITING_k8_MBPS) ) /*+ New Encoder bitrates */
+                    {
+                        calcbitrate = M4MCS_intGetNearestBitrate(
+                            pRates->OutputVideoBitrate, +1);
+
+                        if( calcbitrate
+                            + pRates->OutputAudioBitrate <= maxcombinedbitrate )
+                            pRates->OutputVideoBitrate = calcbitrate;
+                        else
+                            break;
+                    }
+                }
+            }
+            else
+            {
+                /* priority 4 : video bitrate */
+                if( pRates->OutputVideoBitrate
+                    != M4VIDEOEDITING_kUndefinedBitrate )
+                {
+                    /* start with min audio bitrate */
+                    pRates->OutputAudioBitrate = minaudiobitrate;
+
+                    /* compute max duration */
+                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
+                        / (pRates->OutputVideoBitrate
+                        + pRates->OutputAudioBitrate) * 8000.0);
+
+                    if( maxduration + pRates->BeginCutTime
+                        > pC->InputFileProperties.uiClipDuration )
+                    {
+                        maxduration = pC->InputFileProperties.uiClipDuration
+                            - pRates->BeginCutTime;
+                    }
+
+                    if( calcduration > maxduration )
+                    {
+                        calcduration = maxduration;
+                    }
+
+                    if( calcduration == 0 )
+                    {
+                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+                    }
+
+                    /* search max possible audio bitrate */
+                    maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
+                        / (calcduration / 8000.0));
+
+                    while( ( fixed_audio == M4OSA_FALSE)
+                        && (pRates->OutputAudioBitrate
+                        < M4VIDEOEDITING_k96_KBPS) )
+                    {
+                        calcbitrate = M4MCS_intGetNearestBitrate(
+                            pRates->OutputAudioBitrate, +1);
+
+                        if( calcbitrate
+                            + pRates->OutputVideoBitrate <= maxcombinedbitrate )
+                            pRates->OutputAudioBitrate = calcbitrate;
+                        else
+                            break;
+                    }
+                }
+                else
+                {
+                    /* compute max duration */
+                    maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
+                        / (minvideobitrate + minaudiobitrate) * 8000.0);
+
+                    if( maxduration + pRates->BeginCutTime
+                        > pC->InputFileProperties.uiClipDuration )
+                    {
+                        maxduration = pC->InputFileProperties.uiClipDuration
+                            - pRates->BeginCutTime;
+                    }
+
+                    if( calcduration > maxduration )
+                    {
+                        calcduration = maxduration;
+                    }
+
+                    if( calcduration == 0 )
+                    {
+                        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+                    }
+
+                    /* set audio = 1/3 and video = 2/3 */
+                    maxcombinedbitrate = (M4OSA_UInt32)(pRates->OutputFileSize
+                        / M4MCS_MOOV_OVER_FILESIZE_RATIO
+                        / (calcduration / 8000.0));
+
+                    if( fixed_audio == M4OSA_FALSE )
+                    {
+                        if( pC->novideo )
+                            pRates->OutputAudioBitrate =
+                            M4MCS_intGetNearestBitrate(maxcombinedbitrate,
+                            0);
+                        else
+                            pRates->OutputAudioBitrate =
+                            M4MCS_intGetNearestBitrate(maxcombinedbitrate
+                            / 3, 0);
+
+                        if( pRates->OutputAudioBitrate < minaudiobitrate )
+                            pRates->OutputAudioBitrate = minaudiobitrate;
+
+                        if( pRates->OutputAudioBitrate
+                        > M4VIDEOEDITING_k96_KBPS )
+                        pRates->OutputAudioBitrate =
+                        M4VIDEOEDITING_k96_KBPS;
+                    }
+
+                    if( fixed_video == M4OSA_FALSE )
+                    {
+                        pRates->OutputVideoBitrate =
+                            M4MCS_intGetNearestBitrate(maxcombinedbitrate
+                            - pRates->OutputAudioBitrate, 0);
+
+                        if( pRates->OutputVideoBitrate < minvideobitrate )
+                            pRates->OutputVideoBitrate = minvideobitrate;
+
+                        if( pRates->OutputVideoBitrate
+                        > M4VIDEOEDITING_k8_MBPS )
+                        pRates->OutputVideoBitrate =
+                        M4VIDEOEDITING_k8_MBPS; /*+ New Encoder
+                                                bitrates */
+                    }
+                }
+            }
+        }
+    }
+
+    /* recompute max duration with final bitrates */
+    if( pRates->OutputFileSize > 0 )
+    {
+        maxduration = (M4OSA_UInt32)(pRates->OutputFileSize
+            / M4MCS_MOOV_OVER_FILESIZE_RATIO
+            / (pRates->OutputVideoBitrate + pRates->OutputAudioBitrate)
+            * 8000.0);
+    }
+    else
+    {
+        maxduration = pC->InputFileProperties.uiClipDuration;
+    }
+
+    if( maxduration
+        + pRates->BeginCutTime > pC->InputFileProperties.uiClipDuration )
+    {
+        maxduration =
+            pC->InputFileProperties.uiClipDuration - pRates->BeginCutTime;
+    }
+
+    if( pRates->EndCutTime == 0 )
+    {
+        pRates->EndCutTime = pRates->BeginCutTime + maxduration;
+    }
+    else
+    {
+        calcduration = pRates->EndCutTime - pRates->BeginCutTime;
+
+        if( calcduration > maxduration )
+        {
+            pRates->EndCutTime = pRates->BeginCutTime + maxduration;
+        }
+    }
+
+    /* Should never happen : constraints are too strong */
+    if( pRates->EndCutTime == pRates->BeginCutTime )
+    {
+        return M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT;
+    }
+
+    /* estimated resulting file size */
+    pRates->OutputFileSize = (M4OSA_UInt32)(M4MCS_MOOV_OVER_FILESIZE_RATIO
+        * (pRates->OutputVideoBitrate + pRates->OutputAudioBitrate)
+        * (( pRates->EndCutTime - pRates->BeginCutTime) / 8000.0));
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_checkParamsAndStart(M4MCS_Context pContext)
+ * @brief   Check parameters to start
+ * @note
+ * @param   pContext           (IN) MCS context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for
+ *                              this function to be called
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_HIGH:
+ *                              Audio bitrate too high (we limit to 96 kbps)
+ * @return  M4MCS_ERR_AUDIOBITRATE_TOO_LOW:
+ *                              Audio bitrate is too low (16 kbps min for aac,
+ *                              12.2 for amr, 8 for mp3)
+ * @return  M4MCS_ERR_BEGIN_CUT_EQUALS_END_CUT:
+ *                              Begin cut and End cut are equals
+ * @return  M4MCS_ERR_BEGIN_CUT_LARGER_THAN_DURATION:
+ *                              Begin cut time is larger than the input
+ *                              clip duration
+ * @return  M4MCS_ERR_END_CUT_SMALLER_THAN_BEGIN_CUT:
+ *                              End cut time is smaller than begin cut time
+ * @return  M4MCS_ERR_MAXFILESIZE_TOO_SMALL:
+ *                              Not enough space to store whole output
+ *                              file at given bitrates
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_HIGH:
+ *                              Video bitrate too high (we limit to 800 kbps)
+ * @return  M4MCS_ERR_VIDEOBITRATE_TOO_LOW:
+ *                              Video bitrate too low
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_checkParamsAndStart( M4MCS_Context pContext )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4MCS_EncodingParams VerifyRates;
+    M4OSA_ERR err;
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_checkParamsAndStart: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicCheckParamsAndStart(pC);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    /**
+    * Check state automaton */
+
+    if( M4MCS_kState_SET != pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_checkParamsAndStart(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* Audio bitrate should not stay undefined at this point */
+    if( ( pC->noaudio == M4OSA_FALSE)
+        && (pC->AudioEncParams.Format != M4ENCODER_kAudioNULL)
+        && (pC->uiAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate) )
+    {
+        M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : undefined audio bitrate");
+        return M4MCS_ERR_AUDIOBITRATE_TOO_LOW;
+    }
+
+    /* Video bitrate should not stay undefined at this point */
+    if( ( pC->novideo == M4OSA_FALSE)
+        && (pC->EncodingVideoFormat != M4ENCODER_kNULL)
+        && (pC->uiVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate) )
+    {
+        M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : undefined video bitrate");
+        return M4MCS_ERR_VIDEOBITRATE_TOO_LOW;
+    }
+
+    /* Set end cut time if necessary (not an error) */
+    if( pC->uiEndCutTime == 0 )
+    {
+        pC->uiEndCutTime = pC->InputFileProperties.uiClipDuration;
+    }
+
+    /* Force a re-set to check validity of parameters */
+    VerifyRates.OutputVideoBitrate = pC->uiVideoBitrate;
+    VerifyRates.OutputAudioBitrate = pC->uiAudioBitrate;
+    VerifyRates.BeginCutTime = pC->uiBeginCutTime;
+    VerifyRates.EndCutTime = pC->uiEndCutTime;
+    VerifyRates.OutputFileSize = pC->uiMaxFileSize;
+    VerifyRates.OutputVideoTimescale = pC->outputVideoTimescale;
+
+    err = M4MCS_setEncodingParams(pContext, &VerifyRates);
+
+    /**
+    * Check parameters consistency */
+    if( err != M4NO_ERROR )
+    {
+        M4OSA_TRACE1_0("M4MCS_checkParamsAndStart : invalid parameter found");
+        return err;
+    }
+
+    /**
+    * All is OK : update state automaton */
+    pC->uiEncVideoBitrate = pC->uiVideoBitrate;
+    pC->AudioEncParams.Bitrate = pC->uiAudioBitrate;
+
+#ifdef M4MCS_WITH_FAST_OPEN
+    /**
+    * Remake the open if it was done in fast mode */
+
+    if( M4OSA_TRUE == pC->bFileOpenedInFastMode )
+    {
+        /* Close the file opened in fast mode */
+        M4MCS_intCleanUp_ReadersDecoders(pC);
+
+        pC->State = M4MCS_kState_CREATED;
+
+        /* Reopen it in normal mode */
+        err = M4MCS_open(pContext, pC->pInputFile, pC->InputFileType,
+            pC->pOutputFile, pC->pTemporaryFile);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_checkParamsAndStart : M4MCS_Open returns 0x%x", err);
+            return err;
+        }
+    }
+
+#endif /* M4MCS_WITH_FAST_OPEN */
+
+    pC->State = M4MCS_kState_READY;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepSet(M4MCS_InternalContext* pC)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepSet( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+    M4ENCODER_Header *encHeader;
+
+    /**
+    * Prepare the video decoder */
+    err = M4MCS_intPrepareVideoDecoder(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepSet(): M4MCS_intPrepareVideoDecoder() returns 0x%x",
+            err);
+        return err;
+    }
+
+    if( ( pC->InputFileProperties.VideoStreamType == M4VIDEOEDITING_kH264)
+        && (pC->EncodingVideoFormat == M4ENCODER_kNULL) )
+    {
+        pC->bH264Trim = M4OSA_TRUE;
+    }
+
+    /**
+    * Prepare the video encoder */
+    err = M4MCS_intPrepareVideoEncoder(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepSet(): M4MCS_intPrepareVideoEncoder() returns 0x%x",
+            err);
+        return err;
+    }
+
+    if( ( pC->uiBeginCutTime != 0)
+        && (pC->InputFileProperties.VideoStreamType == M4VIDEOEDITING_kH264)
+        && (pC->EncodingVideoFormat == M4ENCODER_kNULL) )
+    {
+
+        err = pC->pVideoEncoderGlobalFcts->pFctSetOption(pC->pViEncCtxt,
+            M4ENCODER_kOptionID_H264ProcessNALUContext,
+            (M4OSA_DataOption)pC->m_pInstance);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1("M4MCS_intStetSet :pFctSetOption failed  (err 0x%x)",
+                err);
+            return err;
+        }
+
+        err = pC->pVideoEncoderGlobalFcts->pFctSetOption(pC->pViEncCtxt,
+            M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr,
+            (M4OSA_DataOption) &H264MCS_ProcessEncodedNALU);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1("M4MCS_intStetSet :pFctSetOption failed  (err 0x%x)",
+                err);
+            return err;
+        }
+
+        err = pC->pVideoEncoderGlobalFcts->pFctGetOption(pC->pViEncCtxt,
+            M4ENCODER_kOptionID_EncoderHeader,
+            (M4OSA_DataOption) &encHeader);
+
+        if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_close: failed to get the encoder header (err 0x%x)",
+                err);
+            /**< no return here, we still have stuff to deallocate after close, even if it fails.*/
+        }
+        else
+        {
+            // Handle DSI first bits
+#define SPS_START_POS 6
+
+            pC->m_pInstance->m_encoderSPSSize =
+                ( encHeader->pBuf[SPS_START_POS] << 8)
+                + encHeader->pBuf[SPS_START_POS + 1];
+            pC->m_pInstance->m_pEncoderSPS =
+                (M4OSA_UInt8 *)(encHeader->pBuf) + SPS_START_POS + 2;
+
+            pC->m_pInstance->m_encoderPPSSize =
+                ( encHeader->pBuf[SPS_START_POS + 3
+                + pC->m_pInstance->m_encoderSPSSize] << 8)
+                + encHeader->pBuf[SPS_START_POS + 4
+                + pC->m_pInstance->m_encoderSPSSize];
+            pC->m_pInstance->m_pEncoderPPS = (M4OSA_UInt8 *)encHeader->pBuf + SPS_START_POS + 5
+                + pC->m_pInstance->m_encoderSPSSize;
+
+            /* Check the DSI integrity */
+            if( encHeader->Size != (pC->m_pInstance->m_encoderSPSSize
+                + pC->m_pInstance->m_encoderPPSSize + 5 + SPS_START_POS) )
+            {
+                M4OSA_TRACE1_3(
+                    "!!! M4MCS_intStepSet ERROR : invalid SPS / PPS %d %d %d",
+                    encHeader->Size, pC->m_pInstance->m_encoderSPSSize,
+                    pC->m_pInstance->m_encoderPPSSize);
+                return M4ERR_PARAMETER;
+            }
+        }
+    }
+
+    /**
+    * Prepare audio processing */
+    err = M4MCS_intPrepareAudioProcessing(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepSet(): M4MCS_intPrepareAudioProcessing() returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Prepare the writer */
+    err = M4MCS_intPrepareWriter(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepSet(): M4MCS_intPrepareWriter() returns 0x%x", err);
+        return err;
+    }
+
+    /**
+    * Jump the audio stream to the begin cut time (all AUs are RAP)
+    * Must be done after the 3gpp writer init, because it may write the first
+    * audio AU in some cases */
+    err = M4MCS_intPrepareAudioBeginCut(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepSet(): M4MCS_intPrepareAudioBeginCut() returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Update state automaton */
+    if( 0 == pC->uiBeginCutTime )
+    {
+        pC->dViDecStartingCts = 0.0;
+        /**
+        * No begin cut, do the encoding */
+        pC->State = M4MCS_kState_PROCESSING;
+    }
+    else
+    {
+        /**
+        * Remember that we must start the decode/encode process at the begin cut time */
+        pC->dViDecStartingCts = (M4OSA_Double)pC->uiBeginCutTime;
+
+        /**
+        * Jumping */
+        pC->State = M4MCS_kState_BEGINVIDEOJUMP;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intStepSet(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareVideoDecoder(M4MCS_InternalContext* pC);
+ * @brief    Prepare the video decoder.
+ * @param    pC          (IN) MCS private context
+ * @return   M4NO_ERROR  No error
+ * @return   M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED
+ * @return   Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareVideoDecoder( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_Void *decoderUserData;
+    M4DECODER_OutputFilter FilterOption;
+
+    if( pC->novideo )
+        return M4NO_ERROR;
+
+    /**
+    * Create the decoder, if it has not been created yet (to get video properties for example) */
+    if( M4OSA_NULL == pC->pViDecCtxt )
+    {
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+        decoderUserData = pC->m_pCurrentVideoDecoderUserData;
+
+#else
+
+        decoderUserData = M4OSA_NULL;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS ? */
+
+        err = pC->m_pVideoDecoder->m_pFctCreate(&pC->pViDecCtxt,
+            &pC->pReaderVideoStream->m_basicProperties, pC->m_pReaderDataIt,
+            &pC->ReaderVideoAU, decoderUserData);
+
+        if( (M4OSA_UInt32)(M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err )
+        {
+            /**
+            * Our decoder is not compatible with H263 profile other than 0.
+            * So it returns this internal error code.
+            * We translate it to our own error code */
+            M4OSA_TRACE1_0("M4MCS_intPrepareVideoDecoder:\
+                           returning M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED");
+            return M4MCS_ERR_H263_PROFILE_NOT_SUPPORTED;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1("M4MCS_intPrepareVideoDecoder:\
+                           m_pVideoDecoder->m_pFctCreate returns 0x%x", err);
+            return err;
+        }
+
+        if( M4VIDEOEDITING_kH264 == pC->InputFileProperties.VideoStreamType )
+        {
+            FilterOption.m_pFilterFunction =
+                (M4OSA_Void *) &M4VIFI_ResizeBilinearYUV420toYUV420;
+            FilterOption.m_pFilterUserData = M4OSA_NULL;
+            err = pC->m_pVideoDecoder->m_pFctSetOption(pC->pViDecCtxt,
+                M4DECODER_kOptionID_OutputFilter,
+                (M4OSA_DataOption) &FilterOption);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1("M4MCS_intPrepareVideoDecoder:\
+                               m_pVideoDecoder->m_pFctSetOption returns 0x%x", err);
+                return err;
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intPrepareVideoDecoder(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareVideoEncoder(M4MCS_InternalContext* pC);
+ * @brief    Prepare the video encoder.
+ * @param    pC          (IN) MCS private context
+ * @return   M4NO_ERROR  No error
+ * @return   Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareVideoEncoder( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+    M4ENCODER_AdvancedParams EncParams; /**< Encoder advanced parameters */
+    M4ENCODER_Params EncParams1;
+    M4OSA_Double dFrameRate;            /**< tmp variable */
+
+    if( pC->novideo )
+        return M4NO_ERROR;
+
+    if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+    {
+        /* Approximative cts increment */
+        pC->dCtsIncrement = 1000.0 / pC->pReaderVideoStream->m_averageFrameRate;
+
+        if( pC->uiBeginCutTime == 0 )
+        {
+            M4OSA_TRACE3_0(
+                "M4MCS_intPrepareVideoEncoder(): Null encoding, do nothing.");
+            return M4NO_ERROR;
+        }
+        else
+        {
+            M4OSA_TRACE3_0(
+                "M4MCS_intPrepareVideoEncoder(): Null encoding, I-frame defaults.");
+
+            /* Set useful parameters to encode the first I-frame */
+            EncParams.InputFormat = M4ENCODER_kIYUV420;
+
+            switch( pC->InputFileProperties.VideoStreamType )
+            {
+                case M4VIDEOEDITING_kH263:
+                    EncParams.Format = M4ENCODER_kH263;
+                    break;
+
+                case M4VIDEOEDITING_kMPEG4:
+                case M4VIDEOEDITING_kMPEG4_EMP:
+                    EncParams.Format = M4ENCODER_kMPEG4;
+                    break;
+
+                case M4VIDEOEDITING_kH264:
+                    EncParams.Format = M4ENCODER_kH264;
+                    break;
+
+                default:
+                    M4OSA_TRACE1_1("M4MCS_intPrepareVideoEncoder: unknown encoding video format\
+                                   (%d), returning M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED",
+                                   pC->InputFileProperties.VideoStreamType);
+                    return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+            }
+
+            EncParams.FrameWidth = pC->EncodingWidth;
+            EncParams.FrameHeight = pC->EncodingHeight;
+            EncParams.Bitrate = pC->uiEncVideoBitrate;
+            EncParams.bInternalRegulation =
+                M4OSA_FALSE; /* do not constrain the I-frame */
+            EncParams.FrameRate = pC->EncodingVideoFramerate;
+
+            /* Other encoding settings (quite all dummy...) */
+            EncParams.uiHorizontalSearchRange = 0;    /* use default */
+            EncParams.uiVerticalSearchRange = 0;      /* use default */
+            EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+            EncParams.uiIVopPeriod = 0;               /* use default */
+            EncParams.uiMotionEstimationTools =
+                0; /* M4V_MOTION_EST_TOOLS_ALL */
+            EncParams.bAcPrediction = M4OSA_TRUE;     /* use AC prediction */
+            EncParams.uiStartingQuantizerValue = 5;   /* initial QP = 5 */
+            EncParams.bDataPartitioning =
+                M4OSA_FALSE; /* no data partitioning */
+
+            /* Rate factor */
+            EncParams.uiTimeScale = pC->InputFileProperties.uiVideoTimeScale;
+            EncParams.uiRateFactor = 1;
+        }
+    }
+    else
+    {
+        M4OSA_TRACE3_0(
+            "M4MCS_intPrepareVideoEncoder(): Normal encoding, set full config.");
+
+        /**
+        * Set encoder shell parameters according to MCS settings */
+        EncParams.Format = pC->EncodingVideoFormat;
+        EncParams.InputFormat = M4ENCODER_kIYUV420;
+
+        /**
+        * Video frame size */
+        EncParams.FrameWidth = pC->EncodingWidth;
+        EncParams.FrameHeight = pC->EncodingHeight;
+
+        /**
+        * Video bitrate has been previously computed */
+        EncParams.Bitrate = pC->uiEncVideoBitrate;
+
+        /**
+        * MCS use the "true" core internal bitrate regulation */
+        EncParams.bInternalRegulation = M4OSA_TRUE;
+
+        /**
+        * Other encoder settings */
+        if( M4OSA_TRUE == pC->bActivateEmp )
+        {
+            EncParams.uiHorizontalSearchRange = 15;   /* set value */
+            EncParams.uiVerticalSearchRange = 15;     /* set value */
+            EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+            EncParams.uiIVopPeriod = 15; /* one I frame every 15 frames */
+            EncParams.uiMotionEstimationTools =
+                1; /* M4V_MOTION_EST_TOOLS_NO_4MV */
+            EncParams.bAcPrediction = M4OSA_FALSE;    /* no AC prediction */
+            EncParams.uiStartingQuantizerValue = 10;  /* initial QP = 10 */
+            EncParams.bDataPartitioning =
+                M4OSA_FALSE; /* no data partitioning */
+        }
+        else
+        {
+            EncParams.uiHorizontalSearchRange = 0;    /* use default */
+            EncParams.uiVerticalSearchRange = 0;      /* use default */
+            EncParams.bErrorResilience = M4OSA_FALSE; /* no error resilience */
+            EncParams.uiIVopPeriod = 0;               /* use default */
+            EncParams.uiMotionEstimationTools =
+                0; /* M4V_MOTION_EST_TOOLS_ALL */
+            EncParams.bAcPrediction = M4OSA_TRUE;     /* use AC prediction */
+            EncParams.uiStartingQuantizerValue = 10;  /* initial QP = 10 */
+            EncParams.bDataPartitioning =
+                M4OSA_FALSE; /* no data partitioning */
+        }
+
+        /**
+        * Video encoder frame rate and rate factor */
+        EncParams.FrameRate = pC->EncodingVideoFramerate;
+        EncParams.uiTimeScale = pC->outputVideoTimescale;
+
+        switch( pC->EncodingVideoFramerate )
+        {
+            case M4ENCODER_k5_FPS:
+                dFrameRate = 5.0;
+                break;
+
+            case M4ENCODER_k7_5_FPS:
+                dFrameRate = 7.5;
+                break;
+
+            case M4ENCODER_k10_FPS:
+                dFrameRate = 10.0;
+                break;
+
+            case M4ENCODER_k12_5_FPS:
+                dFrameRate = 12.5;
+                break;
+
+            case M4ENCODER_k15_FPS:
+                dFrameRate = 15.0;
+                break;
+
+            case M4ENCODER_k20_FPS: /**< MPEG-4 only */
+                dFrameRate = 20.0;
+                break;
+
+            case M4ENCODER_k25_FPS: /**< MPEG-4 only */
+                dFrameRate = 25.0;
+                break;
+
+            case M4ENCODER_k30_FPS:
+                dFrameRate = 30.0;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4MCS_intPrepareVideoEncoder: unknown encoding video frame rate\
+                    (0x%x), returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE",
+                    pC->EncodingVideoFramerate);
+                return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FRAME_RATE;
+        }
+
+        /**
+        * Compute the number of milliseconds between two frames */
+        if( M4ENCODER_kH263 == EncParams.Format )
+        {
+            pC->dCtsIncrement = 1001.0 / dFrameRate;
+        }
+        else /**< MPEG4 or H.264 */
+        {
+            pC->dCtsIncrement = 1000.0 / dFrameRate;
+        }
+    }
+
+    /**
+    * Create video encoder */
+    err = pC->pVideoEncoderGlobalFcts->pFctInit(&pC->pViEncCtxt,
+        pC->pWriterDataFcts, \
+        M4MCS_intApplyVPP, pC, pC->pCurrentVideoEncoderExternalAPI, \
+        pC->pCurrentVideoEncoderUserData);
+
+    /**< We put the MCS context in place of the VPP context */
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->encoderState = M4MCS_kEncoderClosed;
+
+    if( M4OSA_TRUE == pC->bH264Trim )
+        //if((M4ENCODER_kNULL == pC->EncodingVideoFormat)
+        //    && (M4VIDEOEDITING_kH264 == pC->InputFileProperties.VideoStreamType))
+    {
+        EncParams1.InputFormat = EncParams.InputFormat;
+        //EncParams1.InputFrameWidth = EncParams.InputFrameWidth;
+        //EncParams1.InputFrameHeight = EncParams.InputFrameHeight;
+        EncParams1.FrameWidth = EncParams.FrameWidth;
+        EncParams1.FrameHeight = EncParams.FrameHeight;
+        EncParams1.Bitrate = EncParams.Bitrate;
+        EncParams1.FrameRate = EncParams.FrameRate;
+        EncParams1.Format = M4ENCODER_kH264; //EncParams.Format;
+
+        err = pC->pVideoEncoderGlobalFcts->pFctOpen(pC->pViEncCtxt,
+            &pC->WriterVideoAU, &EncParams1);
+    }
+    else
+    {
+        err = pC->pVideoEncoderGlobalFcts->pFctOpen(pC->pViEncCtxt,
+            &pC->WriterVideoAU, &EncParams);
+    }
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->encoderState = M4MCS_kEncoderStopped;
+
+    if( M4OSA_NULL != pC->pVideoEncoderGlobalFcts->pFctStart )
+    {
+        err = pC->pVideoEncoderGlobalFcts->pFctStart(pC->pViEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareVideoEncoder: EncoderInt->pFctStart returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    pC->encoderState = M4MCS_kEncoderRunning;
+
+    /******************************/
+    /* Video resize management    */
+    /******************************/
+    /**
+    * Compare video input size and video output size to check if resize is needed */
+    if( ( (M4OSA_UInt32)EncParams.FrameWidth
+        != pC->pReaderVideoStream->m_videoWidth)
+        || ((M4OSA_UInt32)EncParams.FrameHeight
+        != pC->pReaderVideoStream->m_videoHeight) )
+    {
+        /**
+        * Allocate the intermediate video plane that will receive the decoded image before
+         resizing */
+        pC->pPreResizeFrame =
+            (M4VIFI_ImagePlane *)M4OSA_malloc(3 * sizeof(M4VIFI_ImagePlane),
+            M4MCS, (M4OSA_Char *)"m_pPreResizeFrame");
+
+        if( M4OSA_NULL == pC->pPreResizeFrame )
+        {
+            M4OSA_TRACE1_0("M4MCS_intPrepareVideoEncoder():\
+                           unable to allocate m_pPreResizeFrame, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        pC->pPreResizeFrame[0].pac_data = M4OSA_NULL;
+        pC->pPreResizeFrame[1].pac_data = M4OSA_NULL;
+        pC->pPreResizeFrame[2].pac_data = M4OSA_NULL;
+
+        /**
+        * Allocate the Y plane */
+        pC->pPreResizeFrame[0].u_topleft = 0;
+        pC->pPreResizeFrame[0].u_width = pC->pReaderVideoStream->
+            m_videoWidth; /**< input width */
+        pC->pPreResizeFrame[0].u_height = pC->pReaderVideoStream->
+            m_videoHeight; /**< input height */
+        pC->pPreResizeFrame[0].u_stride = pC->
+            pPreResizeFrame[0].u_width; /**< simple case: stride equals width */
+
+        pC->pPreResizeFrame[0].pac_data =
+            (M4VIFI_UInt8 *)M4OSA_malloc(pC->pPreResizeFrame[0].u_stride \
+            *pC->pPreResizeFrame[0].u_height, M4MCS,
+            (M4OSA_Char *)"m_pPreResizeFrame[0].pac_data");
+
+        if( M4OSA_NULL == pC->pPreResizeFrame[0].pac_data )
+        {
+            M4OSA_TRACE1_0(
+                "M4MCS_intPrepareVideoEncoder():\
+                     unable to allocate m_pPreResizeFrame[0].pac_data, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        /**
+        * Allocate the U plane */
+        pC->pPreResizeFrame[1].u_topleft = 0;
+        pC->pPreResizeFrame[1].u_width = pC->pPreResizeFrame[0].u_width
+            >> 1; /**< U width is half the Y width */
+        pC->pPreResizeFrame[1].u_height = pC->pPreResizeFrame[0].u_height
+            >> 1; /**< U height is half the Y height */
+        pC->pPreResizeFrame[1].u_stride = pC->
+            pPreResizeFrame[1].u_width; /**< simple case: stride equals width */
+
+        pC->pPreResizeFrame[1].pac_data =
+            (M4VIFI_UInt8 *)M4OSA_malloc(pC->pPreResizeFrame[1].u_stride \
+            *pC->pPreResizeFrame[1].u_height, M4MCS,
+            (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
+
+        if( M4OSA_NULL == pC->pPreResizeFrame[1].pac_data )
+        {
+            M4OSA_TRACE1_0(
+                "M4MCS_intPrepareVideoEncoder():\
+                 unable to allocate m_pPreResizeFrame[1].pac_data, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        /**
+        * Allocate the V plane */
+        pC->pPreResizeFrame[2].u_topleft = 0;
+        pC->pPreResizeFrame[2].u_width = pC->
+            pPreResizeFrame[1].u_width; /**< V width equals U width */
+        pC->pPreResizeFrame[2].u_height = pC->
+            pPreResizeFrame[1].u_height; /**< V height equals U height */
+        pC->pPreResizeFrame[2].u_stride = pC->
+            pPreResizeFrame[2].u_width; /**< simple case: stride equals width */
+
+        pC->pPreResizeFrame[2].pac_data =
+            (M4VIFI_UInt8 *)M4OSA_malloc(pC->pPreResizeFrame[2].u_stride \
+            *pC->pPreResizeFrame[2].u_height, M4MCS,
+            (M4OSA_Char *)"m_pPreResizeFrame[1].pac_data");
+
+        if( M4OSA_NULL == pC->pPreResizeFrame[2].pac_data )
+        {
+            M4OSA_TRACE1_0(
+                "M4MCS_intPrepareVideoEncoder():\
+                 unable to allocate m_pPreResizeFrame[2].pac_data, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intPrepareVideoEncoder(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareAudioProcessing(M4MCS_InternalContext* pC);
+ * @brief    Prepare the AAC decoder, the SRC and the AMR-NB encoder and the MP3 encoder.
+ * @param    pC          (IN) MCS private context
+ * @return   M4NO_ERROR  No error
+ * @return   Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareAudioProcessing( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+
+    SSRC_ReturnStatus_en
+        ReturnStatus; /* Function return status                       */
+    LVM_INT16 NrSamplesMin =
+        0; /* Minimal number of samples on the input or on the output */
+    LVM_INT32 ScratchSize; /* The size of the scratch memory               */
+    LVM_INT16
+        *pInputInScratch; /* Pointer to input in the scratch buffer       */
+    LVM_INT16
+        *pOutputInScratch; /* Pointer to the output in the scratch buffer  */
+    SSRC_Params_t ssrcParams;          /* Memory for init parameters                    */
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+    file_au_reader = fopen("mcs_ReaderOutput.raw", "wb");
+    file_pcm_decoder = fopen("mcs_DecoderOutput.pcm", "wb");
+    file_pcm_encoder = fopen("mcs_EncoderInput.pcm", "wb");
+
+#endif
+
+    if( pC->noaudio )
+        return M4NO_ERROR;
+
+    if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+    {
+        M4OSA_TRACE3_0(
+            "M4MCS_intPrepareAudioProcessing(): Null encoding, do nothing.");
+        return M4NO_ERROR;
+    }
+
+    /* ________________________________ */
+    /*|                                |*/
+    /*| Create and "start" the decoder |*/
+    /*|________________________________|*/
+
+    if( M4OSA_NULL == pC->m_pAudioDecoder )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_intPrepareAudioProcessing(): Fails to initiate the audio decoder.");
+        return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
+    }
+
+    if( M4OSA_NULL == pC->pAudioDecCtxt )
+    {
+        err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(&pC->pAudioDecCtxt,
+            pC->pReaderAudioStream, pC->m_pCurrentAudioDecoderUserData);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareVideoDecoder: m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    if( M4VIDEOEDITING_kAMR_NB == pC->InputFileProperties.AudioStreamType ) {
+        /* AMR DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4VIDEOEDITING_kEVRC == pC->InputFileProperties.AudioStreamType ) {
+        /* EVRC DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4VIDEOEDITING_kMP3 == pC->InputFileProperties.AudioStreamType ) {
+        /* MP3 DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else
+    {
+        /* AAC DECODER CONFIGURATION */
+        M4_AacDecoderConfig AacDecParam;
+
+        AacDecParam.m_AACDecoderProfile = AAC_kAAC;
+        AacDecParam.m_DownSamplingMode = AAC_kDS_OFF;
+
+        if( pC->AudioEncParams.Format == M4ENCODER_kAMRNB )
+        {
+            AacDecParam.m_OutputMode = AAC_kMono;
+        }
+        else
+        {
+            /* For this version, we encode only in AAC */
+            if( M4ENCODER_kMono == pC->AudioEncParams.ChannelNum )
+            {
+                AacDecParam.m_OutputMode = AAC_kMono;
+            }
+            else
+            {
+                AacDecParam.m_OutputMode = AAC_kStereo;
+            }
+        }
+
+        pC->m_pAudioDecoder->m_pFctSetOptionAudioDec(pC->pAudioDecCtxt,
+            M4AD_kOptionID_UserParam, (M4OSA_DataOption) &AacDecParam);
+    }
+
+    if( pC->m_pAudioDecoder->m_pFctStartAudioDec != M4OSA_NULL )
+    {
+        /* Not implemented in all decoders */
+        err = pC->m_pAudioDecoder->m_pFctStartAudioDec(pC->pAudioDecCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareVideoDecoder: m_pAudioDecoder->m_pFctStartAudioDec returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Allocate output buffer for the audio decoder */
+    pC->InputFileProperties.uiDecodedPcmSize =
+        pC->pReaderAudioStream->m_byteFrameLength
+        * pC->pReaderAudioStream->m_byteSampleSize
+        * pC->pReaderAudioStream->m_nbChannels;
+
+    if( pC->InputFileProperties.uiDecodedPcmSize > 0 )
+    {
+        pC->AudioDecBufferOut.m_bufferSize =
+            pC->InputFileProperties.uiDecodedPcmSize;
+        pC->AudioDecBufferOut.m_dataAddress =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->AudioDecBufferOut.m_bufferSize \
+            *sizeof(short), M4MCS, (M4OSA_Char *)"AudioDecBufferOut.m_bufferSize");
+    }
+
+    if( M4OSA_NULL == pC->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_intPrepareVideoDecoder():\
+             unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /* _________________________ */
+    /*|                         |*/
+    /*| Set the SSRC parameters |*/
+    /*|_________________________|*/
+
+    switch( pC->pReaderAudioStream->m_samplingFrequency )
+    {
+        case 8000:
+            ssrcParams.SSRC_Fs_In = LVM_FS_8000;
+            break;
+
+        case 11025:
+            ssrcParams.SSRC_Fs_In = LVM_FS_11025;
+            break;
+
+        case 12000:
+            ssrcParams.SSRC_Fs_In = LVM_FS_12000;
+            break;
+
+        case 16000:
+            ssrcParams.SSRC_Fs_In = LVM_FS_16000;
+            break;
+
+        case 22050:
+            ssrcParams.SSRC_Fs_In = LVM_FS_22050;
+            break;
+
+        case 24000:
+            ssrcParams.SSRC_Fs_In = LVM_FS_24000;
+            break;
+
+        case 32000:
+            ssrcParams.SSRC_Fs_In = LVM_FS_32000;
+            break;
+
+        case 44100:
+            ssrcParams.SSRC_Fs_In = LVM_FS_44100;
+            break;
+
+        case 48000:
+            ssrcParams.SSRC_Fs_In = LVM_FS_48000;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareVideoDecoder: invalid input AAC sampling frequency (%d Hz),\
+                 returning M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY",
+                pC->pReaderAudioStream->m_samplingFrequency);
+            return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY;
+    }
+
+    if( 1 == pC->pReaderAudioStream->m_nbChannels )
+    {
+        ssrcParams.SSRC_NrOfChannels = LVM_MONO;
+    }
+    else
+    {
+        ssrcParams.SSRC_NrOfChannels = LVM_STEREO;
+    }
+
+    /*FlB 26.02.2009: add mp3 as output format*/
+    if( pC->AudioEncParams.Format == M4ENCODER_kAAC
+        || pC->AudioEncParams.Format == M4ENCODER_kMP3 )
+    {
+        switch( pC->AudioEncParams.Frequency )
+        {
+            case M4ENCODER_k8000Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
+                break;
+
+            case M4ENCODER_k11025Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_11025;
+                break;
+
+            case M4ENCODER_k12000Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_12000;
+                break;
+
+            case M4ENCODER_k16000Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_16000;
+                break;
+
+            case M4ENCODER_k22050Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_22050;
+                break;
+
+            case M4ENCODER_k24000Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_24000;
+                break;
+
+            case M4ENCODER_k32000Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_32000;
+                break;
+
+            case M4ENCODER_k44100Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_44100;
+                break;
+
+            case M4ENCODER_k48000Hz:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_48000;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4MCS_intPrepareAudioProcessing: invalid output AAC sampling frequency \
+                    (%d Hz), returning M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY",
+                    pC->AudioEncParams.Frequency);
+                return M4MCS_ERR_INVALID_AAC_SAMPLING_FREQUENCY;
+                break;
+        }
+    }
+    else
+    {
+        ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
+    }
+
+
+
+    ReturnStatus = 0;
+
+    switch( ssrcParams.SSRC_Fs_In )
+    {
+        case LVM_FS_8000:
+            ssrcParams.NrSamplesIn = 320;
+            break;
+
+        case LVM_FS_11025:
+            ssrcParams.NrSamplesIn = 441;
+            break;
+
+        case LVM_FS_12000:
+            ssrcParams.NrSamplesIn = 480;
+            break;
+
+        case LVM_FS_16000:
+            ssrcParams.NrSamplesIn = 640;
+            break;
+
+        case LVM_FS_22050:
+            ssrcParams.NrSamplesIn = 882;
+            break;
+
+        case LVM_FS_24000:
+            ssrcParams.NrSamplesIn = 960;
+            break;
+
+        case LVM_FS_32000:
+            ssrcParams.NrSamplesIn = 1280;
+            break;
+
+        case LVM_FS_44100:
+            ssrcParams.NrSamplesIn = 1764;
+            break;
+
+        case LVM_FS_48000:
+            ssrcParams.NrSamplesIn = 1920;
+            break;
+
+        default:
+            ReturnStatus = -1;
+            break;
+    }
+
+    switch( ssrcParams.SSRC_Fs_Out )
+    {
+        case LVM_FS_8000:
+            ssrcParams.NrSamplesOut = 320;
+            break;
+
+        case LVM_FS_11025:
+            ssrcParams.NrSamplesOut = 441;
+            break;
+
+        case LVM_FS_12000:
+            ssrcParams.NrSamplesOut = 480;
+            break;
+
+        case LVM_FS_16000:
+            ssrcParams.NrSamplesOut = 640;
+            break;
+
+        case LVM_FS_22050:
+            ssrcParams.NrSamplesOut = 882;
+            break;
+
+        case LVM_FS_24000:
+            ssrcParams.NrSamplesOut = 960;
+            break;
+
+        case LVM_FS_32000:
+            ssrcParams.NrSamplesOut = 1280;
+            break;
+
+        case LVM_FS_44100:
+            ssrcParams.NrSamplesOut = 1764;
+            break;
+
+        case LVM_FS_48000:
+            ssrcParams.NrSamplesOut = 1920;
+            break;
+
+        default:
+            ReturnStatus = -1;
+            break;
+    }
+
+
+
+    if( ReturnStatus != SSRC_OK )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareAudioProcessing:\
+             Error code %d returned by the SSRC_GetNrSamples function",
+            ReturnStatus);
+        return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
+    }
+
+    NrSamplesMin =
+        (LVM_INT16)((ssrcParams.NrSamplesIn > ssrcParams.NrSamplesOut)
+        ? ssrcParams.NrSamplesOut : ssrcParams.NrSamplesIn);
+
+    while( NrSamplesMin < M4MCS_SSRC_MINBLOCKSIZE )
+    { /* Don't take blocks smaller that the minimal block size */
+        ssrcParams.NrSamplesIn = (LVM_INT16)(ssrcParams.NrSamplesIn << 1);
+        ssrcParams.NrSamplesOut = (LVM_INT16)(ssrcParams.NrSamplesOut << 1);
+        NrSamplesMin = (LVM_INT16)(NrSamplesMin << 1);
+    }
+
+
+    pC->iSsrcNbSamplIn = (LVM_INT16)(
+        ssrcParams.
+        NrSamplesIn); /* multiplication by NrOfChannels is done below */
+    pC->iSsrcNbSamplOut = (LVM_INT16)(ssrcParams.NrSamplesOut);
+
+    /**
+    * Allocate buffer for the input of the SSRC */
+    pC->pSsrcBufferIn =
+        (M4OSA_MemAddr8)M4OSA_malloc(pC->iSsrcNbSamplIn * sizeof(short) \
+        *pC->pReaderAudioStream->m_nbChannels, M4MCS,
+        (M4OSA_Char *)"pSsrcBufferIn");
+
+    if( M4OSA_NULL == pC->pSsrcBufferIn )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_intPrepareVideoDecoder():\
+             unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+    pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+    /**
+    * Allocate buffer for the output of the SSRC */
+    pC->pSsrcBufferOut =
+        (M4OSA_MemAddr8)M4OSA_malloc(pC->iSsrcNbSamplOut * sizeof(short) \
+        *pC->pReaderAudioStream->m_nbChannels, M4MCS,
+        (M4OSA_Char *)"pSsrcBufferOut");
+
+    if( M4OSA_NULL == pC->pSsrcBufferOut )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_intPrepareVideoDecoder():\
+             unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+
+    pC->pLVAudioResampler = (M4OSA_Int32)LVAudioResamplerCreate(
+        16, /*gInputParams.lvBTChannelCount*/
+        /*pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels*/
+        (M4OSA_Int16)pC->InputFileProperties.uiNbChannels/*ssrcParams.SSRC_NrOfChannels*/,
+        /* gInputParams.lvOutSampleRate*//*pSettings->outputASF*/
+        pC->AudioEncParams.Frequency/*ssrcParams.SSRC_Fs_Out*/, 1);
+    LVAudiosetSampleRate((M4OSA_Int32)pC->pLVAudioResampler,
+        /*gInputParams.lvInSampleRate*/
+        /*pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency*/
+        pC->InputFileProperties.uiSamplingFrequency/*ssrcParams.SSRC_Fs_In*/);
+
+    LVAudiosetVolume((M4OSA_Int32)pC->pLVAudioResampler, (M4OSA_Int16)(0x1000 /* 0x7fff */),
+        (M4OSA_Int16)(0x1000/*0x7fff*/));
+
+
+    /* ________________________ */
+    /*|                        |*/
+    /*| Init the audio encoder |*/
+    /*|________________________|*/
+
+    /* Initialise the audio encoder */
+
+    err = pC->pAudioEncoderGlobalFcts->pFctInit(&pC->pAudioEncCtxt,
+        pC->pCurrentAudioEncoderUserData);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareAudioProcessing: pAudioEncoderGlobalFcts->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    /* Open the audio encoder */
+    err = pC->pAudioEncoderGlobalFcts->pFctOpen(pC->pAudioEncCtxt,
+        &pC->AudioEncParams, &pC->pAudioEncDSI,
+        M4OSA_NULL /* no grabbing */);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareAudioProcessing: pAudioEncoderGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    /* Allocate the input buffer for the audio encoder */
+    switch( pC->AudioEncParams.Format )
+    {
+        case M4ENCODER_kAMRNB:
+            pC->audioEncoderGranularity = M4MCS_PCM_AMR_GRANULARITY_SAMPLES;
+            break;
+
+        case M4ENCODER_kAAC:
+            pC->audioEncoderGranularity = M4MCS_PCM_AAC_GRANULARITY_SAMPLES;
+            break;
+
+            /*FlB 26.02.2009: add mp3 as output format*/
+        case M4ENCODER_kMP3:
+            pC->audioEncoderGranularity = M4MCS_PCM_MP3_GRANULARITY_SAMPLES;
+            break;
+
+         default:
+         break;
+    }
+
+    if( M4ENCODER_kMono == pC->AudioEncParams.ChannelNum )
+        pC->audioEncoderGranularity *= sizeof(short);
+    else
+        pC->audioEncoderGranularity *= sizeof(short) * 2;
+
+    pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+    pC->pAudioEncoderBuffer =
+        (M4OSA_MemAddr8)M4OSA_malloc(pC->audioEncoderGranularity, M4MCS,
+        (M4OSA_Char *)"pC->pAudioEncoderBuffer");
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intPrepareAudioProcessing(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareWriter(M4MCS_InternalContext* pC);
+ * @brief    Prepare the writer.
+ * @param    pC          (IN) MCS private context
+ * @return   M4NO_ERROR  No error
+ * @return   Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareWriter( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 uiVersion; /**< To write component version in 3gp writer */
+    M4OSA_MemAddr8 pDSI = M4OSA_NULL; /**< To create the Decoder Specific Info */
+    M4SYS_StreamIDValue optionValue; /**< For the setoption calls */
+    M4OSA_UInt32 TargetedFileSize;
+    M4OSA_Bool bMULPPSSPS = M4OSA_FALSE;
+
+    /**
+    * Init the writer */
+    err = pC->pWriterGlobalFcts->pFctOpen(&pC->pWriterContext, pC->pOutputFile,
+        pC->pOsaFileWritPtr, pC->pTemporaryFile, pC->pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Link to the writer context in the writer interface */
+    pC->pWriterDataFcts->pWriterContext = pC->pWriterContext;
+
+    /**
+    * Set the product description string in the written file */
+    err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+        M4WRITER_kEmbeddedString, (M4OSA_DataOption)"NXP-SW : MCS    ");
+
+    if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+        != err) ) /* this option may not be implemented by some writers */
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareWriter:\
+             pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Set the product version in the written file */
+    uiVersion =
+        M4VIDEOEDITING_VERSION_MAJOR * 100 + M4VIDEOEDITING_VERSION_MINOR * 10
+        + M4VIDEOEDITING_VERSION_REVISION;
+    err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+        M4WRITER_kEmbeddedVersion, (M4OSA_DataOption) &uiVersion);
+
+    if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+        != err) ) /* this option may not be implemented by some writers */
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareWriter: \
+            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * In case of EMP, we have to explicitely give an emp ftyp to the writer */
+    if( M4OSA_TRUE == pC->bActivateEmp )
+    {
+        M4VIDEOEDITING_FtypBox ftyp;
+
+        ftyp.major_brand = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.minor_version = M4VIDEOEDITING_BRAND_0000;
+        ftyp.nbCompatibleBrands = 2;
+        ftyp.compatible_brands[0] = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.compatible_brands[1] = M4VIDEOEDITING_BRAND_EMP;
+
+        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+            (M4OSA_UInt32)M4WRITER_kSetFtypBox, (M4OSA_DataOption) &ftyp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareWriter:\
+                 pWriterGlobalFcts->pFctSetOption(M4WRITER_kSetFtypBox) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * If there is a video input, allocate and fill the video stream structures for the writer */
+    if( pC->novideo == M4OSA_FALSE )
+    {
+        /**
+        * Fill Video properties structure for the AddStream method */
+        pC->WriterVideoStreamInfo.height = pC->EncodingHeight;
+        pC->WriterVideoStreamInfo.width = pC->EncodingWidth;
+        pC->WriterVideoStreamInfo.fps =
+            0; /**< Not used by the shell/core writer */
+        pC->WriterVideoStreamInfo.Header.pBuf =
+            M4OSA_NULL; /**< Will be updated later */
+        pC->WriterVideoStreamInfo.Header.Size = 0; /**< Will be updated later */
+
+        /**
+        * Fill Video stream description structure for the AddStream method */
+        switch( pC->EncodingVideoFormat )
+        {
+            case M4ENCODER_kMPEG4:
+                pC->WriterVideoStream.streamType = M4SYS_kMPEG_4;
+                break;
+
+            case M4ENCODER_kH263:
+                pC->WriterVideoStream.streamType = M4SYS_kH263;
+                break;
+
+            case M4ENCODER_kH264:
+                pC->WriterVideoStream.streamType = M4SYS_kH264;
+                break;
+
+            case M4ENCODER_kNULL:
+                switch( pC->InputFileProperties.VideoStreamType )
+                {
+                    case M4VIDEOEDITING_kMPEG4:
+                    case M4VIDEOEDITING_kMPEG4_EMP: /* RC */
+                        pC->WriterVideoStream.streamType = M4SYS_kMPEG_4;
+                        break;
+
+                    case M4VIDEOEDITING_kH263:
+                        pC->WriterVideoStream.streamType = M4SYS_kH263;
+                        break;
+
+                    case M4VIDEOEDITING_kH264:
+                        pC->WriterVideoStream.streamType = M4SYS_kH264;
+                        break;
+
+                    default:
+                        M4OSA_TRACE1_1(
+                            "M4MCS_intPrepareWriter: case input=M4ENCODER_kNULL, \
+                            unknown format (0x%x),\
+                             returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                            pC->EncodingVideoFormat);
+                        return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+                }
+                break;
+
+            default: /**< It should never happen, already tested */
+                M4OSA_TRACE1_1(
+                    "M4MCS_intPrepareWriter: unknown format (0x%x),\
+                     returning M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                    pC->EncodingVideoFormat);
+                return M4MCS_ERR_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+        }
+
+        /**
+        * Video bitrate value will be the real value */
+        pC->WriterVideoStream.averageBitrate =
+            (M4OSA_Int32)pC->uiEncVideoBitrate;
+        pC->WriterVideoStream.maxBitrate = (M4OSA_Int32)pC->uiEncVideoBitrate;
+
+        /**
+        * most other parameters are "dummy" */
+        pC->WriterVideoStream.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+        pC->WriterVideoStream.timeScale =
+            0; /**< Not used by the shell/core writer */
+        pC->WriterVideoStream.profileLevel =
+            0; /**< Not used by the shell/core writer */
+        pC->WriterVideoStream.duration =
+            0; /**< Not used by the shell/core writer */
+        pC->WriterVideoStream.decoderSpecificInfoSize =
+            sizeof(M4WRITER_StreamVideoInfos);
+        pC->WriterVideoStream.decoderSpecificInfo =
+            (M4OSA_MemAddr32) &(pC->WriterVideoStreamInfo);
+
+        /**
+        * Update Encoder Header properties for Video stream if needed */
+        if( M4ENCODER_kH263 == pC->EncodingVideoFormat )
+        {
+            /**
+            * Creates the H263 DSI */
+            pC->WriterVideoStreamInfo.Header.Size =
+                7; /**< H263 output DSI is always 7 bytes */
+            pDSI = (M4OSA_MemAddr8)M4OSA_malloc(7, M4MCS, (M4OSA_Char
+                *)"pC->WriterVideoStreamInfo.Header.pBuf (DSI H263)");
+
+            if( M4OSA_NULL == pDSI )
+            {
+                M4OSA_TRACE1_0("M4MCS_intPrepareWriter(): unable to allocate pDSI (H263),\
+                               returning M4ERR_ALLOC");
+                return M4ERR_ALLOC;
+            }
+
+            /**
+            * Vendor is NXP Software: N, X, P, S. */
+            pDSI[0] = 'N';
+            pDSI[1] = 'X';
+            pDSI[2] = 'P';
+            pDSI[3] = 'S';
+
+            /**
+            * Decoder version is 0 */
+            pDSI[4] = 0;
+
+            /**
+            * Level is the sixth byte of the DSI. */
+            switch( pC->EncodingWidth )
+            {
+                case M4ENCODER_SQCIF_Width:
+                case M4ENCODER_QCIF_Width:
+                    if( ( pC->uiEncVideoBitrate <= M4ENCODER_k64_KBPS)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
+                    {
+                        pDSI[5] = 10;
+                    }
+                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
+                    {
+                        pDSI[5] = 45;
+                    }
+                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+                    {
+                        pDSI[5] = 20;
+                    }
+                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k384_KBPS)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+                    {
+                        pDSI[5] = 30;
+                    }
+                    else if( ( pC->uiEncVideoBitrate
+                        <= M4ENCODER_k800_KBPS/*2048*/)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+                    {
+                        pDSI[5] = 40;
+                    }
+                    break;
+
+                case M4ENCODER_CIF_Width:
+                    if( ( pC->uiEncVideoBitrate <= M4ENCODER_k128_KBPS)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k15_FPS) )
+                    {
+                        pDSI[5] = 20;
+                    }
+                    else if( ( pC->uiEncVideoBitrate <= M4ENCODER_k384_KBPS)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+                    {
+                        pDSI[5] = 30;
+                    }
+                    else if( ( pC->uiEncVideoBitrate
+                        <= M4ENCODER_k800_KBPS/*2048*/)
+                        && (pC->EncodingVideoFramerate <= M4ENCODER_k30_FPS) )
+                    {
+                        pDSI[5] = 40;
+                    }
+                    break;
+
+                    default:
+                    break;
+            }
+
+            /**
+            * Profile is the seventh byte of the DSI. */
+            pDSI[6] = 0;
+
+            pC->WriterVideoStreamInfo.Header.pBuf = pDSI;
+        }
+        else if( M4ENCODER_kNULL == pC->EncodingVideoFormat )
+        {
+#ifdef TIMESCALE_BUG
+            /* if we are in "timescale mode", we need to know on how many bits the v
+            op_time_increment is coded and to change the DSI */
+
+            if( pC->uiVideoTimescale == 0 )
+            {
+                /* If we copy the stream from the input, we copy its DSI */
+                pC->WriterVideoStreamInfo.Header.Size = pC->pReaderVideoStream->
+                    m_basicProperties.m_decoderSpecificInfoSize;
+                pC->WriterVideoStreamInfo.Header.pBuf =
+                    (M4OSA_MemAddr8)pC->pReaderVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo;
+            }
+            else
+            {
+                /* Allocate a new DSI */
+                pC->WriterVideoStreamInfo.Header.Size = pC->pReaderVideoStream->
+                    m_basicProperties.m_decoderSpecificInfoSize;
+                pC->WriterVideoStreamInfo.Header.pBuf =
+                    (M4OSA_Void
+                    *)M4OSA_malloc(pC->WriterVideoStreamInfo.Header.Size,
+                    M4MCS,
+                    (M4OSA_Char
+                    *)
+                    "New decoder specific info for timescale modification");
+
+                if( pC->WriterVideoStreamInfo.Header.pBuf == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("M4MCS_intPrepareWriter: Allocation error\
+                                   pC->WriterVideoStreamInfo.Header.pBuf");
+                    return M4ERR_ALLOC;
+                }
+
+                /* Copy Reading DSI to new DSI */
+                M4OSA_memcpy(pC->WriterVideoStreamInfo.Header.pBuf,
+                    pC->pReaderVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo,
+                    pC->WriterVideoStreamInfo.Header.Size);
+
+                /* Call a function to change DSI and to get the nb of bits on which the
+                vop_time_increment is coded */
+                err = M4MCS_intParseVideoDSI(pC);
+            }
+
+#else
+            /* If we copy the stream from the input, we copy its DSI */
+
+            pC->WriterVideoStreamInfo.Header.Size = pC->pReaderVideoStream->
+                m_basicProperties.m_decoderSpecificInfoSize;
+            pC->WriterVideoStreamInfo.Header.pBuf =
+                (M4OSA_MemAddr8)pC->pReaderVideoStream->
+                m_basicProperties.m_pDecoderSpecificInfo;
+
+#endif
+
+        }
+        /* otherwise (MPEG4), the DSI will be recovered from the encoder later on. */
+
+        /*+CRLV6775 - H.264 Trimming  */
+        if( pC->bH264Trim == M4OSA_TRUE )
+        {
+            bMULPPSSPS = M4OSA_TRUE;
+            err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+                (M4OSA_UInt32)M4WRITER_kMUL_PPS_SPS,
+                (M4OSA_DataOption) &bMULPPSSPS);
+
+            if( ( M4NO_ERROR != err)
+                && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+                != err) ) /* this option may not be implemented by some writers */
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intPrepareWriter:\
+                     pWriterGlobalFcts->pFctSetOption(M4WRITER_kMUL_PPS_SPS) returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+        /*-CRLV6775 - H.264 Trimming  */
+        /**
+        * Add the video stream */
+        err = pC->pWriterGlobalFcts->pFctAddStream(pC->pWriterContext,
+            &pC->WriterVideoStream);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctAddStream(video) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Update AU properties for video stream */
+        pC->WriterVideoAU.stream = &(pC->WriterVideoStream);
+        pC->WriterVideoAU.dataAddress = M4OSA_NULL;
+        pC->WriterVideoAU.size = 0;
+        pC->WriterVideoAU.CTS = 0; /** Reset time */
+        pC->WriterVideoAU.DTS = 0;
+        pC->WriterVideoAU.attribute = AU_RAP;
+        pC->WriterVideoAU.nbFrag = 0; /** No fragment */
+        pC->WriterVideoAU.frag = M4OSA_NULL;
+
+        /**
+        * Set the writer max video AU size */
+        optionValue.streamID = M4MCS_WRITER_VIDEO_STREAM_ID;
+        optionValue.value = pC->uiVideoMaxAuSize;
+        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+            (M4OSA_DataOption) &optionValue);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareWriter: \
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Set the writer max video chunk size */
+        optionValue.value = pC->uiVideoMaxChunckSize;
+        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+            (M4OSA_DataOption) &optionValue);
+
+        if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+            != err) ) /* this option may not be implemented by some writers */
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareWriter:\
+                 pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * If there is an audio input, allocate and fill the audio stream structures for the writer */
+    if( pC->noaudio == M4OSA_FALSE )
+    {
+        M4WRITER_StreamAudioInfos streamAudioInfo;
+
+        streamAudioInfo.nbSamplesPerSec = 0; /**< unused by our shell writer */
+        streamAudioInfo.nbBitsPerSample = 0; /**< unused by our shell writer */
+        streamAudioInfo.nbChannels = 1;      /**< unused by our shell writer */
+
+        pC->WriterAudioStream.averageBitrate =
+            0; /**< It is not used by the shell, the DSI is taken into account instead */
+        pC->WriterAudioStream.maxBitrate =
+            0; /**< Not used by the shell/core writer */
+
+        /**
+        * Fill Audio stream description structure for the AddStream method */
+        switch( pC->AudioEncParams.Format )
+        {
+            case M4ENCODER_kAMRNB:
+                pC->WriterAudioStream.streamType = M4SYS_kAMR;
+                break;
+
+            case M4ENCODER_kAAC:
+                pC->WriterAudioStream.streamType = M4SYS_kAAC;
+                pC->WriterAudioStream.averageBitrate =
+                    pC->AudioEncParams.Bitrate;
+                pC->WriterAudioStream.maxBitrate = pC->AudioEncParams.Bitrate;
+                break;
+
+                /*FlB 26.02.2009: add mp3 as output format*/
+            case M4ENCODER_kMP3:
+                pC->WriterAudioStream.streamType = M4SYS_kMP3;
+                break;
+
+            case M4ENCODER_kAudioNULL:
+                switch( pC->InputFileProperties.AudioStreamType )
+                {
+                case M4VIDEOEDITING_kAMR_NB:
+                    pC->WriterAudioStream.streamType = M4SYS_kAMR;
+                    break;
+                    /*FlB 26.02.2009: add mp3 as output format*/
+                case M4VIDEOEDITING_kMP3:
+                    pC->WriterAudioStream.streamType = M4SYS_kMP3;
+                    break;
+
+                case M4VIDEOEDITING_kAAC:
+                case M4VIDEOEDITING_kAACplus:
+                case M4VIDEOEDITING_keAACplus:
+                    pC->WriterAudioStream.streamType = M4SYS_kAAC;
+                    pC->WriterAudioStream.averageBitrate =
+                        pC->AudioEncParams.Bitrate;
+                    pC->WriterAudioStream.maxBitrate =
+                        pC->AudioEncParams.Bitrate;
+                    break;
+
+                case M4VIDEOEDITING_kEVRC:
+                    pC->WriterAudioStream.streamType = M4SYS_kEVRC;
+                    break;
+
+                case M4VIDEOEDITING_kNoneAudio:
+                case M4VIDEOEDITING_kPCM:
+                case M4VIDEOEDITING_kNullAudio:
+                case M4VIDEOEDITING_kUnsupportedAudio:
+                    break;
+                }
+                break;
+
+            default: /**< It should never happen, already tested */
+                M4OSA_TRACE1_1(
+                    "M4MCS_intPrepareWriter: \
+                    unknown format (0x%x), returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+                    pC->AudioEncParams.Format);
+                return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+        }
+
+        /**
+        * MCS produces only AMR-NB output */
+        pC->WriterAudioStream.streamID = M4MCS_WRITER_AUDIO_STREAM_ID;
+        pC->WriterAudioStream.duration =
+            0; /**< Not used by the shell/core writer */
+        pC->WriterAudioStream.profileLevel =
+            0; /**< Not used by the shell/core writer */
+        pC->WriterAudioStream.timeScale = pC->AudioEncParams.Frequency;
+
+        if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+        {
+            /* If we copy the stream from the input, we copy its DSI */
+            streamAudioInfo.Header.Size = pC->pReaderAudioStream->
+                m_basicProperties.m_decoderSpecificInfoSize;
+            streamAudioInfo.Header.pBuf =
+                (M4OSA_MemAddr8)pC->pReaderAudioStream->
+                m_basicProperties.m_pDecoderSpecificInfo;
+        }
+        else
+        {
+            if( pC->pAudioEncDSI.pInfo != M4OSA_NULL )
+            {
+                /* Use the DSI given by the encoder open() */
+                streamAudioInfo.Header.Size = pC->pAudioEncDSI.infoSize;
+                streamAudioInfo.Header.pBuf = pC->pAudioEncDSI.pInfo;
+            }
+            else
+            {
+                /* Writer will put a default Philips DSI */
+                streamAudioInfo.Header.Size = 0;
+                streamAudioInfo.Header.pBuf = M4OSA_NULL;
+            }
+        }
+
+        /**
+        * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos
+         in the DSI pointer... */
+        pC->WriterAudioStream.decoderSpecificInfo =
+            (M4OSA_MemAddr32) &streamAudioInfo;
+
+        /**
+        * Add the audio stream to the writer */
+        err = pC->pWriterGlobalFcts->pFctAddStream(pC->pWriterContext,
+            &pC->WriterAudioStream);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctAddStream(audio) returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * Link the AU and the stream */
+        pC->WriterAudioAU.stream = &(pC->WriterAudioStream);
+        pC->WriterAudioAU.dataAddress = M4OSA_NULL;
+        pC->WriterAudioAU.size = 0;
+        pC->WriterAudioAU.CTS = 0; /** Reset time */
+        pC->WriterAudioAU.DTS = 0;
+        pC->WriterAudioAU.attribute = 0;
+        pC->WriterAudioAU.nbFrag = 0; /** No fragment */
+        pC->WriterAudioAU.frag = M4OSA_NULL;
+
+        /**
+        * Set the writer audio max AU size */
+        /* As max bitrate is now 320kbps instead of 128kbps, max AU
+         * size has to be increased adapt the max AU size according to the stream type and the
+         * channels numbers*/
+        /* After tests, a margin of 3 is taken (2 was not enough and raises to memory overwrite)
+         */
+        //pC->uiAudioMaxAuSize = M4MCS_AUDIO_MAX_AU_SIZE;
+        switch( pC->WriterAudioStream.streamType )
+        {
+            case M4SYS_kAMR:
+                pC->uiAudioMaxAuSize = M4MCS_PCM_AMR_GRANULARITY_SAMPLES
+                    * (( pC->InputFileProperties.uiNbChannels
+                    * sizeof(short)) + 3);
+                break;
+
+            case M4SYS_kMP3:
+                pC->uiAudioMaxAuSize = M4MCS_PCM_MP3_GRANULARITY_SAMPLES
+                    * (( pC->InputFileProperties.uiNbChannels
+                    * sizeof(short)) + 3);
+                break;
+
+            case M4SYS_kAAC:
+                pC->uiAudioMaxAuSize = M4MCS_PCM_AAC_GRANULARITY_SAMPLES
+                    * (( pC->InputFileProperties.uiNbChannels
+                    * sizeof(short)) + 3);
+                break;
+                /*case M4SYS_kEVRC:
+                pC->uiAudioMaxAuSize = M4MCS_PCM_EVRC_GRANULARITY_SAMPLES*
+                ((pC->InputFileProperties.uiNbChannels * sizeof(short))+3);
+                break;*/
+            default: /**< It should never happen, already tested */
+                M4OSA_TRACE1_1(
+                    "M4MCS_intPrepareWriter: unknown format (0x%x),\
+                     returning M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT",
+                    pC->WriterAudioStream.streamType);
+                return M4MCS_ERR_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+        }
+
+        optionValue.streamID = M4MCS_WRITER_AUDIO_STREAM_ID;
+        optionValue.value = pC->uiAudioMaxAuSize;
+        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+            (M4OSA_DataOption) &optionValue);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption(audio,\
+                M4WRITER_kMaxAUSize) returns 0x%x",
+                err);
+            return err;
+        }
+
+        optionValue.value = M4MCS_AUDIO_MAX_CHUNK_SIZE;
+        err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+            (M4OSA_DataOption) &optionValue);
+
+        if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+            != err) ) /* this option may not be implemented by some writers */
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption(audio,\
+                M4WRITER_kMaxChunckSize) returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /*
+    * Set the limitation size of the writer */
+    TargetedFileSize = pC->uiMaxFileSize;
+    /* add 1 kB margin */
+    if( TargetedFileSize > 8192 )
+        TargetedFileSize -= 1024;
+
+    err = pC->pWriterGlobalFcts->pFctSetOption(pC->pWriterContext,
+        (M4OSA_UInt32)M4WRITER_kMaxFileSize,
+        (M4OSA_DataOption) &TargetedFileSize);
+
+    if( ( M4NO_ERROR != err) && (( (M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+        != err) ) /* this option may not be implemented by some writers */
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctSetOption\
+            (M4WRITER_kMaxFileSize) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Close the stream registering in order to be ready to write data */
+    err = pC->pWriterGlobalFcts->pFctStartWriting(pC->pWriterContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareWriter: pWriterGlobalFcts->pFctStartWriting returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intPrepareWriter(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intPrepareAudioBeginCut(M4MCS_InternalContext* pC);
+ * @brief    DO the audio begin cut.
+ * @param    pC          (IN) MCS private context
+ * @return   M4NO_ERROR  No error
+ * @return   Any error returned by an underlaying module
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intPrepareAudioBeginCut( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iCts;
+    M4OSA_UInt32 uiFrameSize;
+
+    if( pC->noaudio )
+        return M4NO_ERROR;
+
+    /**
+    * Check if an audio begin cut is needed */
+    if( ( M4OSA_NULL == pC->pReaderAudioStream) || (0 == pC->uiBeginCutTime) )
+    {
+        /**
+        * Return with no error */
+        M4OSA_TRACE3_0(
+            "M4MCS_intPrepareAudioBeginCut(): returning M4NO_ERROR (a)");
+        return M4NO_ERROR;
+    }
+
+    /**
+    * Jump at the begin cut time */
+    iCts = pC->uiBeginCutTime;
+    err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
+        (M4_StreamHandler *)pC->pReaderAudioStream, &iCts);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intPrepareAudioBeginCut: m_pFctJump(Audio) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Remember audio begin cut offset */
+    pC->iAudioCtsOffset = iCts;
+
+    /**
+    * AMR-NB & EVRC: there may be many frames per AU.
+    * In that case we need to slice the first AU to keep the 20 ms cut precision */
+    if( ( M4DA_StreamTypeAudioAmrNarrowBand
+        == pC->pReaderAudioStream->m_basicProperties.m_streamType)
+        || (M4DA_StreamTypeAudioEvrc
+        == pC->pReaderAudioStream->m_basicProperties.m_streamType) )
+    {
+        /**
+        * If the next frame CTS is lower than the begin cut time,
+        * we must read the AU and parse its frames to reach the
+        * nearest to the begin cut */
+        if( ( iCts + 20) < (M4OSA_Int32)pC->uiBeginCutTime )
+        {
+            /**
+            * Read the first audio AU after the jump */
+            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderAudioStream,
+                &pC->ReaderAudioAU);
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                M4OSA_TRACE1_0(
+                    "M4MCS_intPrepareAudioBeginCut(): m_pReaderDataIt->m_pFctGetNextAu(audio)\
+                     returns M4WAR_NO_MORE_AU! Returning M4NO_ERROR");
+                return
+                    M4NO_ERROR; /**< no fatal error here, we should be able to pursue */
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intPrepareAudioBeginCut(): m_pReaderDataIt->m_pFctGetNextAu(Audio)\
+                     returns 0x%x",
+                    err);
+                return err;
+            }
+
+            /**
+            * While the next AU has a lower CTS than the begin cut time, we advance to
+            the next frame */
+            while( ( iCts + 20) <= (M4OSA_Int32)pC->uiBeginCutTime )
+            {
+                /**
+                * Get the size of the frame */
+                switch( pC->pReaderAudioStream->m_basicProperties.m_streamType )
+                {
+                    case M4DA_StreamTypeAudioAmrNarrowBand:
+                        uiFrameSize = M4MCS_intGetFrameSize_AMRNB(
+                            pC->ReaderAudioAU.m_dataAddress);
+                        break;
+
+                    case M4DA_StreamTypeAudioEvrc:
+                        uiFrameSize = M4MCS_intGetFrameSize_EVRC(
+                            pC->ReaderAudioAU.m_dataAddress);
+                        break;
+
+                    default:
+                        uiFrameSize = 0;
+                        break;
+                }
+
+                if( 0 == uiFrameSize )
+                {
+                    /**
+                    * Corrupted frame! We get out of this mess!
+                    * We don't want to crash here... */
+                    M4OSA_TRACE1_0(
+                        "M4MCS_intPrepareAudioBeginCut(): \
+                        M4MCS_intGetFrameSize_xxx returns 0! Returning M4NO_ERROR");
+                    return
+                        M4NO_ERROR; /**< no fatal error here, we should be able to pursue */
+                }
+
+                /**
+                * Go to the next frame */
+                pC->ReaderAudioAU.m_dataAddress += uiFrameSize;
+                pC->ReaderAudioAU.m_size -= uiFrameSize;
+
+                /**
+                * Get the CTS of the next frame */
+                iCts += 20; /**< AMR, EVRC frame duration is always 20 ms */
+                pC->ReaderAudioAU.m_CTS = iCts;
+                pC->ReaderAudioAU.m_DTS = iCts;
+            }
+
+            /**
+            * Update the audio begin cut offset */
+            pC->iAudioCtsOffset = iCts;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intPrepareAudioBeginCut(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepEncoding(M4MCS_InternalContext* pC, M4OSA_UInt8* pProgress)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepEncoding( M4MCS_InternalContext *pC,
+                                       M4OSA_UInt8 *pProgress )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 uiAudioStepCount = 0;
+
+    /* ---------- VIDEO TRANSCODING ---------- */
+
+    if( ( pC->novideo == M4OSA_FALSE) && (M4MCS_kStreamState_STARTED
+        == pC->VideoState) ) /**< If the video encoding is going on */
+    {
+        if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+        {
+            err = M4MCS_intVideoNullEncoding(pC);
+        }
+        else
+        {
+            err = M4MCS_intVideoTranscoding(pC);
+        }
+
+        /**
+        * No more space, quit properly */
+        if( M4WAR_WRITER_STOP_REQ == err )
+        {
+            *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->dViDecCurrentCts
+                - pC->uiBeginCutTime) * 100)
+                / (pC->uiEndCutTime - pC->uiBeginCutTime));
+
+            pC->State = M4MCS_kState_FINISHED;
+
+            /* bad file produced on very short 3gp file */
+            if( pC->dViDecCurrentCts - pC->uiBeginCutTime == 0 )
+            {
+                /* Nothing has been encoded -> bad produced file -> error returned */
+                M4OSA_TRACE2_0(
+                    "M4MCS_intStepEncoding(): video transcoding returns\
+                     M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL");
+                return M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL;
+            }
+            else
+            {
+#ifndef M4MCS_AUDIOONLY
+                /* clean AIR context needed to keep media aspect ratio*/
+
+                if( M4OSA_NULL != pC->m_air_context )
+                {
+                    err = M4AIR_cleanUp(pC->m_air_context);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
+                            err);
+                        return err;
+                    }
+                    pC->m_air_context = M4OSA_NULL;
+                }
+
+#endif /*M4MCS_AUDIOONLY*/
+
+                M4OSA_TRACE2_0(
+                    "M4MCS_intStepEncoding(): video transcoding returns M4MCS_ERR_NOMORE_SPACE");
+                return M4MCS_ERR_NOMORE_SPACE;
+            }
+        }
+
+        /**< The input plane is null because the input image will be obtained by the
+        VPP filter from the context */
+        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intStepEncoding(): video transcoding returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /* ---------- AUDIO TRANSCODING ---------- */
+
+    if( ( pC->noaudio == M4OSA_FALSE) && (M4MCS_kStreamState_STARTED
+        == pC->AudioState) ) /**< If there is an audio stream */
+    {
+        while(
+            /**< If the video encoding is running, encode audio until we reach video time */
+            ( ( pC->novideo == M4OSA_FALSE)
+            && (M4MCS_kStreamState_STARTED == pC->VideoState)
+            && (pC->ReaderAudioAU.m_CTS
+            + pC->m_audioAUDuration < pC->ReaderVideoAU.m_CTS)) ||
+            /**< If the video encoding is not running, perform 1 step of audio encoding */
+            (( M4MCS_kStreamState_STARTED == pC->AudioState)
+            && (uiAudioStepCount < 1)) )
+        {
+            uiAudioStepCount++;
+
+            /**< check if an adio effect has to be applied*/
+            err = M4MCS_intCheckAudioEffects(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intStepEncoding(): M4MCS_intCheckAudioEffects returns err: 0x%x",
+                    err);
+                return err;
+            }
+
+            if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+            {
+                err = M4MCS_intAudioNullEncoding(pC);
+            }
+            else /**< Audio transcoding */
+            {
+                err = M4MCS_intAudioTranscoding(pC);
+            }
+
+            /**
+            * No more space, quit properly */
+            if( M4WAR_WRITER_STOP_REQ == err )
+            {
+                *pProgress =
+                    (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
+                    - pC->uiBeginCutTime) * 100)
+                    / (pC->uiEndCutTime - pC->uiBeginCutTime));
+
+                pC->State = M4MCS_kState_FINISHED;
+
+                /* bad file produced on very short 3gp file */
+                if( pC->ReaderAudioAU.m_CTS - pC->uiBeginCutTime == 0 )
+                {
+                    /* Nothing has been encoded -> bad produced file -> error returned */
+                    M4OSA_TRACE2_0(
+                        "M4MCS_intStepEncoding():\
+                         audio transcoding returns M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL");
+                    return M4MCS_ERR_OUTPUT_FILE_SIZE_TOO_SMALL;
+                }
+                else
+                {
+#ifndef M4MCS_AUDIOONLY
+                    /* clean AIR context needed to keep media aspect ratio*/
+
+                    if( M4OSA_NULL != pC->m_air_context )
+                    {
+                        err = M4AIR_cleanUp(pC->m_air_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
+                                err);
+                            return err;
+                        }
+                        pC->m_air_context = M4OSA_NULL;
+                    }
+
+#endif /*M4MCS_AUDIOONLY*/
+
+                    M4OSA_TRACE2_0(
+                        "M4MCS_intStepEncoding(): \
+                        audio transcoding returns M4MCS_ERR_NOMORE_SPACE");
+                    return M4MCS_ERR_NOMORE_SPACE;
+                }
+            }
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                pC->AudioState = M4MCS_kStreamState_FINISHED;
+                M4OSA_TRACE3_0(
+                    "M4MCS_intStepEncoding(): audio transcoding returns M4WAR_NO_MORE_AU");
+                break;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intStepEncoding(): audio transcoding returns 0x%x",
+                    err);
+                return err;
+            }
+
+            /**
+            * Check for end cut */
+            /* We absolutely want to have less or same audio duration as video ->
+            (2*pC->m_audioAUDuration) */
+            if( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
+                + (2 *pC->m_audioAUDuration) > pC->uiEndCutTime )
+            {
+                pC->AudioState = M4MCS_kStreamState_FINISHED;
+                break;
+            }
+        }
+    }
+
+    /* ---------- PROGRESS MANAGEMENT ---------- */
+
+    /**
+    * Compute progress */
+    if( pC->novideo )
+    {
+        if( pC->ReaderAudioAU.m_CTS < pC->uiBeginCutTime )
+        {
+            *pProgress = 0;
+        }
+        else
+        {
+            *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS
+                - pC->uiBeginCutTime) * 100)
+                / (pC->uiEndCutTime - pC->uiBeginCutTime));
+        }
+        //printf(": %6.0f\b\b\b\b\b\b\b\b", pC->ReaderAudioAU.m_CTS);
+
+    }
+    else
+    {
+        if( pC->dViDecCurrentCts < pC->uiBeginCutTime )
+        {
+            *pProgress = 0;
+        }
+        else
+        {
+            *pProgress = (M4OSA_UInt8)(( ( (M4OSA_UInt32)pC->dViDecCurrentCts
+                - pC->uiBeginCutTime) * 100)
+                / (pC->uiEndCutTime - pC->uiBeginCutTime));
+        }
+        //printf(": %6.0f\b\b\b\b\b\b\b\b", pC->dViDecCurrentCts);
+    }
+
+    /**
+    * Sanity check */
+    if( *pProgress > 99 )
+    {
+        *pProgress = 99;
+    }
+
+    /**
+    * Increment CTS for next step */
+    if( pC->novideo == M4OSA_FALSE )
+    {
+        if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+        {
+           pC->dViDecCurrentCts +=  1;
+        }
+        else
+        {
+            pC->dViDecCurrentCts += pC->dCtsIncrement;
+        }
+    }
+
+    /**
+    * The transcoding is finished when no stream is being encoded anymore */
+    if( ( ( pC->novideo) || (M4MCS_kStreamState_FINISHED == pC->VideoState))
+        && (( pC->noaudio) || (M4MCS_kStreamState_FINISHED == pC->AudioState)) )
+    {
+        /* the AIR part can only be used when video codecs are compiled*/
+#ifndef M4MCS_AUDIOONLY
+        /* clean AIR context needed to keep media aspect ratio*/
+
+        if( M4OSA_NULL != pC->m_air_context )
+        {
+            err = M4AIR_cleanUp(pC->m_air_context);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x",
+                    err);
+                return err;
+            }
+            pC->m_air_context = M4OSA_NULL;
+        }
+
+#endif /*M4MCS_AUDIOONLY*/
+        /**/
+
+        *pProgress = 100;
+        pC->State = M4MCS_kState_FINISHED;
+        M4OSA_TRACE2_0(
+            "M4MCS_intStepEncoding(): transcoding finished, returning M4MCS_WAR_TRANSCODING_DONE");
+        return M4MCS_WAR_TRANSCODING_DONE;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intStepEncoding(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepBeginVideoJump(M4MCS_InternalContext* pC)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepBeginVideoJump( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iCts;
+
+    if( pC->novideo )
+    {
+        pC->State = M4MCS_kState_BEGINVIDEODECODE;
+        return M4NO_ERROR;
+    }
+
+    /**
+    * Jump to the previous RAP in the clip (first get the time, then jump) */
+    iCts = (M4OSA_Int32)pC->dViDecStartingCts;
+    err = pC->m_pReader->m_pFctGetPrevRapTime(pC->pReaderContext,
+        (M4_StreamHandler *)pC->pReaderVideoStream, &iCts);
+
+    if( M4WAR_READER_INFORMATION_NOT_PRESENT == err )
+    {
+        /* No RAP table, jump backward and predecode */
+        iCts = (M4OSA_Int32)pC->dViDecStartingCts - M4MCS_NO_STSS_JUMP_POINT;
+
+        if( iCts < 0 )
+            iCts = 0;
+    }
+    else if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepBeginVideoJump: m_pFctGetPrevRapTime returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /* + CRLV6775 -H.264 Trimming */
+
+    if( M4OSA_TRUE == pC->bH264Trim )
+    {
+
+        // Save jump time for safety, this fix should be generic
+
+        M4OSA_Int32 iCtsOri = iCts;
+
+
+        err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream, &iCts);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intStepBeginVideoJump: m_pFctJump(V) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        if( pC->ReaderVideoAU1.m_structSize == 0 )
+        {
+            /**
+            * Initializes an access Unit */
+            err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderVideoStream,
+                &pC->ReaderVideoAU1);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+                    err);
+                return err;
+            }
+            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderVideoStream,
+                &pC->ReaderVideoAU1);
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                M4OSA_TRACE2_0(
+                    "M4MCS_intVideoNullEncoding(): \
+                    m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+                /* The audio transcoding is finished */
+                pC->VideoState = M4MCS_kStreamState_FINISHED;
+                return err;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intVideoNullEncoding():\
+                     m_pReaderDataIt->m_pFctGetNextAu(video) returns 0x%x",
+                    err);
+                return err;
+            }
+
+            pC->ReaderVideoAU1.m_structSize = 0;
+        }
+
+        err = H264MCS_ProcessSPS_PPS(pC->m_pInstance,
+            (M4OSA_UInt8 *)pC->ReaderVideoAU1.m_dataAddress, pC->ReaderVideoAU1.m_size);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intStepBeginVideoJump: H264MCS_ProcessSPS_PPS returns 0x%x!",
+                err);
+            return err;
+        }
+
+
+        // Restore jump time for safety, this fix should be generic
+
+        iCts = iCtsOri;
+
+
+    }
+    /* - CRLV6775 -H.264 Trimming */
+    err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
+        (M4_StreamHandler *)pC->pReaderVideoStream, &iCts);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepBeginVideoJump: m_pFctJump(V) returns 0x%x!", err);
+        return err;
+    }
+
+    /**
+    * Decode one step */
+    pC->dViDecCurrentCts = (M4OSA_Double)(iCts + pC->iVideoBeginDecIncr);
+
+    /**
+    * Be sure we don't decode too far */
+    if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
+    {
+        pC->dViDecCurrentCts = pC->dViDecStartingCts;
+    }
+
+    /**
+    * Decode at least once with the bJump flag to true */
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_intClipDecodeVideoUpToCts: Decoding upTo CTS %.3f",
+        pC->dViDecCurrentCts);
+    pC->isRenderDup = M4OSA_FALSE;
+    err =
+        pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &pC->dViDecCurrentCts,
+        M4OSA_TRUE);
+
+    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
+        && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepBeginVideoJump: m_pFctDecode returns 0x%x!", err);
+        return err;
+    }
+
+    if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+    {
+        M4OSA_TRACE2_0("Decoding output the same frame as before 1");
+        pC->isRenderDup = M4OSA_TRUE;
+    }
+
+    /**
+    * Increment decoding cts for the next step */
+    pC->dViDecCurrentCts += (M4OSA_Double)pC->iVideoBeginDecIncr;
+
+    /**
+    * Update state automaton */
+    if( pC->dViDecCurrentCts > pC->dViDecStartingCts )
+    {
+        /**
+        * Be sure we don't decode too far */
+        pC->dViDecCurrentCts = pC->dViDecStartingCts;
+        pC->State = M4MCS_kState_PROCESSING;
+    }
+    else
+    {
+        pC->State = M4MCS_kState_BEGINVIDEODECODE;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intStepBeginVideoJump(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intStepBeginVideoDecode(M4MCS_InternalContext* pC)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intStepBeginVideoDecode( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+    M4_MediaTime dDecTarget;
+
+    if( pC->novideo )
+    {
+        pC->State = M4MCS_kState_PROCESSING;
+        return M4NO_ERROR;
+    }
+
+    /**
+    * Decode */
+    dDecTarget = pC->dViDecCurrentCts;
+    M4OSA_TRACE3_1("M4MCS_intStepBeginDecode: Decoding upTo CTS %.3f",
+        pC->dViDecCurrentCts);
+    pC->isRenderDup = M4OSA_FALSE;
+    err = pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &dDecTarget,
+        M4OSA_FALSE);
+
+    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
+        && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intStepBeginVideoDecode: m_pFctDecode returns 0x%x!", err);
+        return err;
+    }
+
+    if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+    {
+        M4OSA_TRACE2_0("Decoding output the same frame as before 2");
+        pC->isRenderDup = M4OSA_TRUE;
+    }
+
+    /**
+    * Increment decoding cts for the next step */
+    pC->dViDecCurrentCts += (M4OSA_Double)pC->iVideoBeginDecIncr;
+
+    /**
+    * Update state automaton, if needed */
+    if( ( (M4OSA_UInt32)pC->dViDecCurrentCts > pC->dViDecStartingCts)
+        || (M4WAR_NO_MORE_AU == err) )
+    {
+        /**
+        * Be sure we don't decode too far */
+        pC->dViDecCurrentCts = (M4OSA_Double)pC->dViDecStartingCts;
+        pC->State = M4MCS_kState_PROCESSING;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intStepBeginVideoDecode(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/*****************************/
+/* define AMR silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE 13
+#define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 160
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+
+const M4OSA_UInt8 M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[
+    M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+    {
+        0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33, 0xFF, 0xE0, 0x00, 0x00, 0x00
+    };
+#else
+
+extern
+const
+M4OSA_UInt8
+M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
+
+#endif
+
+/*****************************/
+/* define AAC silence frames */
+/*****************************/
+
+#define M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE      4
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_MONO[
+    M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE] =
+    {
+        0x00, 0xC8, 0x20, 0x07
+    };
+#else
+
+extern const M4OSA_UInt8
+M4VSS3GPP_AAC_AU_SILENCE_MONO[M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE];
+
+#endif
+
+#define M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE        6
+
+#ifdef M4VSS3GPP_SILENCE_FRAMES
+
+const M4OSA_UInt8 M4VSS3GPP_AAC_AU_SILENCE_STEREO[
+    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE] =
+    {
+        0x21, 0x10, 0x03, 0x20, 0x54, 0x1C
+    };
+#else
+
+extern const
+M4OSA_UInt8
+M4VSS3GPP_AAC_AU_SILENCE_STEREO[M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE];
+
+#endif
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intAudioNullEncoding(M4MCS_InternalContext* pC)
+ * @return   M4NO_ERROR:         No error
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4MCS_intAudioNullEncoding( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;
+
+    if( pC->noaudio )
+        return M4NO_ERROR;
+
+    /* Check if all audio frame has been written (happens at begin cut) */
+    if( pC->ReaderAudioAU.m_size == 0 )
+    {
+        /**
+        * Initializes a new AU if needed */
+        if( pC->ReaderAudioAU1.m_structSize == 0 )
+        {
+            /**
+            * Initializes an access Unit */
+            err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderAudioStream,
+                &pC->ReaderAudioAU1);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_open(): m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+                    err);
+                return err;
+            }
+
+            pC->m_pDataAddress1 =
+                (M4OSA_MemAddr8)M4OSA_malloc(pC->ReaderAudioAU1.m_maxsize,
+                M4MCS, (M4OSA_Char *)"Temporary AU1 buffer");
+
+            if( pC->m_pDataAddress1 == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0(
+                    "M4MCS_intAudioNullEncoding(): allocation error");
+                return M4ERR_ALLOC;
+            }
+
+            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderAudioStream,
+                &pC->ReaderAudioAU1);
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                M4OSA_TRACE2_0(
+                    "M4MCS_intAudioNullEncoding():\
+                     m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
+                /* The audio transcoding is finished */
+                pC->AudioState = M4MCS_kStreamState_FINISHED;
+                return err;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intAudioNullEncoding(): \
+                    m_pReaderDataIt->m_pFctGetNextAu(Audio) returns 0x%x",
+                    err);
+                return err;
+            }
+            /*FB 2009.04.02: PR surnxp#616: Crash in MCS while Audio AU copying ,
+             constant memory reader case*/
+            if( pC->ReaderAudioAU1.m_maxsize
+        > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
+            {
+                /* Constant memory reader case, we need to reallocate the temporary buffers */
+                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                    *) &(pC->m_pDataAddress1), pC->ReaderAudioAU1.m_maxsize);
+                /* pC->m_pDataAddress1 and
+                pC->m_pDataAddress2 must be reallocated at the same time */
+                /* because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take
+                 maximum value. Then the test "if(pC->ReaderAudioAU?.m_maxsize >
+                  pC->pReaderAudioStream->m_basicProperties.m_maxAUSize)" is never true */
+                /* and the size of the second buffer is never changed. */
+                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                    *) &(pC->m_pDataAddress2), pC->ReaderAudioAU1.m_maxsize);
+                /* pC->m_pDataAddress1 and
+                pC->m_pDataAddress2 must be reallocated at the same time */
+                /* Update stream properties */
+                pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
+                    pC->ReaderAudioAU1.m_maxsize;
+            }
+            /**/
+            M4OSA_memcpy((M4OSA_MemAddr8)pC->m_pDataAddress1,
+                (M4OSA_MemAddr8)pC->ReaderAudioAU1.m_dataAddress,
+                pC->ReaderAudioAU1.m_size);
+        }
+
+        if( pC->ReaderAudioAU2.m_structSize == 0 )
+        {
+            /**
+            * Initializes an access Unit */
+            err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderAudioStream,
+                &pC->ReaderAudioAU2);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_open(): m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+                    err);
+                return err;
+            }
+            pC->m_pDataAddress2 =
+                (M4OSA_MemAddr8)M4OSA_malloc(pC->ReaderAudioAU2.m_maxsize,
+                M4MCS, (M4OSA_Char *)"Temporary AU buffer");
+
+            if( pC->m_pDataAddress2 == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0(
+                    "M4MCS_intAudioNullEncoding(): allocation error");
+                return M4ERR_ALLOC;
+            }
+        }
+        /**
+        * Read the next audio AU in the input file */
+        if( pC->ReaderAudioAU2.m_CTS > pC->ReaderAudioAU1.m_CTS )
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8) &pC->ReaderAudioAU,
+                (M4OSA_MemAddr8) &pC->ReaderAudioAU2, sizeof(M4_AccessUnit));
+            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderAudioStream,
+                &pC->ReaderAudioAU1);
+
+            if( pC->ReaderAudioAU1.m_maxsize
+                > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
+            {
+                /* Constant memory reader case, we need to reallocate the temporary buffers */
+                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                    *) &(pC->m_pDataAddress1), pC->ReaderAudioAU1.m_maxsize);
+                /*   pC->m_pDataAddress1
+                 * and pC->m_pDataAddress2 must be reallocated at the same time *
+                 * because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take
+                 * maximum value. Then the test "if(pC->ReaderAudioAU?.m_maxsize >
+                 * pC->pReaderAudioStream->m_basicProperties.m_maxAUSize)" is never true *
+                 * and the size of the second buffer is never changed.
+                 */
+                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                    *) &(pC->m_pDataAddress2), pC->ReaderAudioAU1.m_maxsize);
+                /* pC->m_pDataAddress1 and
+                 * pC->m_pDataAddress2 must be reallocated at the same time
+                 * Update stream properties
+                 */
+                pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
+                    pC->ReaderAudioAU1.m_maxsize;
+            }
+            /**/
+            M4OSA_memcpy((M4OSA_MemAddr8)pC->m_pDataAddress1,
+                (M4OSA_MemAddr8)pC->ReaderAudioAU1.m_dataAddress,
+                pC->ReaderAudioAU1.m_size);
+            pC->m_audioAUDuration =
+                pC->ReaderAudioAU1.m_CTS - pC->ReaderAudioAU2.m_CTS;
+            pC->ReaderAudioAU.m_dataAddress = pC->m_pDataAddress2;
+        }
+        else
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8) &pC->ReaderAudioAU,
+                (M4OSA_MemAddr8) &pC->ReaderAudioAU1, sizeof(M4_AccessUnit));
+            err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+                (M4_StreamHandler *)pC->pReaderAudioStream,
+                &pC->ReaderAudioAU2);
+            /* Crash in MCS while Audio AU copying ,
+             * constant memory reader case
+             */
+            if( pC->ReaderAudioAU2.m_maxsize
+                > pC->pReaderAudioStream->m_basicProperties.m_maxAUSize )
+            {
+                /* Constant memory reader case, we need to reallocate the temporary buffers */
+                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                    *) &(pC->m_pDataAddress2), pC->ReaderAudioAU2.m_maxsize);
+                /* pC->m_pDataAddress1 and
+                 * pC->m_pDataAddress2 must be reallocated at the same time
+                 * because pC->pReaderAudioStream->m_basicProperties.m_maxAUSize take maximum
+                 * value. Then the test "if(pC->ReaderAudioAU?.m_maxsize > pC->pReaderAudioStream->
+                 * m_basicProperties.m_maxAUSize)" is never true
+                 * and the size of the second buffer is never changed.
+                 */
+                M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                    *) &(pC->m_pDataAddress1), pC->ReaderAudioAU2.m_maxsize);
+                /* [ END ] 20091008  JFV PR fix surnxpsw#1071: pC->m_pDataAddress1 and
+                 pC->m_pDataAddress2 must be reallocated at the same time */
+                /* Update stream properties */
+                pC->pReaderAudioStream->m_basicProperties.m_maxAUSize =
+                    pC->ReaderAudioAU2.m_maxsize;
+            }
+            /**/
+            M4OSA_memcpy((M4OSA_MemAddr8)pC->m_pDataAddress2,
+                (M4OSA_MemAddr8)pC->ReaderAudioAU2.m_dataAddress,
+                pC->ReaderAudioAU2.m_size);
+            pC->m_audioAUDuration =
+                pC->ReaderAudioAU2.m_CTS - pC->ReaderAudioAU1.m_CTS;
+            pC->ReaderAudioAU.m_dataAddress = pC->m_pDataAddress1;
+        }
+
+        if( M4WAR_NO_MORE_AU == err )
+        {
+            M4OSA_TRACE2_0(
+                "M4MCS_intAudioNullEncoding(): \
+                m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
+            /* The audio transcoding is finished */
+            pC->AudioState = M4MCS_kStreamState_FINISHED;
+            return err;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intAudioNullEncoding(): \
+                m_pReaderDataIt->m_pFctGetNextAu(Audio) returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Prepare the writer AU */
+    err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
+        M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intAudioNullEncoding(): pWriterDataFcts->pStartAU(Audio) returns 0x%x",
+            err);
+        return err;
+    }
+
+    if( pC->uiAudioAUCount
+        == 0 ) /* If it is the first AU, we set it to silence
+        (else, errors 0x3841, 0x3847 in our AAC decoder) */
+    {
+        if( pC->InputFileProperties.AudioStreamType == M4VIDEOEDITING_kAAC
+            || pC->InputFileProperties.AudioStreamType
+            == M4VIDEOEDITING_kAACplus
+            || pC->InputFileProperties.AudioStreamType
+            == M4VIDEOEDITING_keAACplus )
+        {
+            if( pC->InputFileProperties.uiNbChannels == 1 )
+            {
+                pC->WriterAudioAU.size = M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress,
+                    (M4OSA_MemAddr8)M4VSS3GPP_AAC_AU_SILENCE_MONO,
+                    pC->WriterAudioAU.size);
+            }
+            else if( pC->InputFileProperties.uiNbChannels == 2 )
+            {
+                pC->WriterAudioAU.size = M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress,
+                    (M4OSA_MemAddr8)M4VSS3GPP_AAC_AU_SILENCE_STEREO,
+                    pC->WriterAudioAU.size);
+            }
+            else
+            {
+                /* Must never happen ...*/
+                M4OSA_TRACE1_0(
+                    "M4MCS_intAudioNullEncoding: Bad number of channels in audio input");
+                return M4MCS_ERR_INVALID_INPUT_FILE;
+            }
+        }
+        else if( pC->InputFileProperties.AudioStreamType
+            == M4VIDEOEDITING_kAMR_NB )
+        {
+            pC->WriterAudioAU.size = M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+            M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress,
+                (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048,
+                pC->WriterAudioAU.size);
+            /* Some remaining AMR AU needs to be copied */
+            if( pC->ReaderAudioAU.m_size != 0 )
+            {
+                /* Update Writer AU */
+                pC->WriterAudioAU.size += pC->ReaderAudioAU.m_size;
+                M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress
+                    + M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE,
+                    (M4OSA_MemAddr8)pC->ReaderAudioAU.m_dataAddress,
+                    pC->ReaderAudioAU.m_size);
+            }
+        }
+        else
+        {
+            /*MP3 case: copy the AU*/
+            M4OSA_TRACE3_1(
+                "M4MCS_intAudioNullEncoding(): Copying audio AU: size=%d",
+                pC->ReaderAudioAU.m_size);
+            M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress,
+                (M4OSA_MemAddr8)pC->ReaderAudioAU.m_dataAddress,
+                pC->ReaderAudioAU.m_size);
+            pC->WriterAudioAU.size = pC->ReaderAudioAU.m_size;
+        }
+    }
+    else
+    {
+        /**
+        * Copy audio data from reader AU to writer AU */
+        M4OSA_TRACE3_1(
+            "M4MCS_intAudioNullEncoding(): Copying audio AU: size=%d",
+            pC->ReaderAudioAU.m_size);
+        M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress,
+            (M4OSA_MemAddr8)pC->ReaderAudioAU.m_dataAddress,
+            pC->ReaderAudioAU.m_size);
+        pC->WriterAudioAU.size = pC->ReaderAudioAU.m_size;
+    }
+
+    /**
+    * Convert CTS unit from milliseconds to timescale */
+    pC->WriterAudioAU.CTS =
+        (M4OSA_Time)((( pC->ReaderAudioAU.m_CTS - pC->iAudioCtsOffset)
+        * (pC->WriterAudioStream.timeScale / 1000.0)));
+
+    if( pC->InputFileProperties.AudioStreamType == M4VIDEOEDITING_kAMR_NB
+        && pC->uiAudioAUCount == 0 )
+    {
+        pC->iAudioCtsOffset -=
+            20; /* Duration of a silence AMR AU, to handle the duration of the added
+                silence frame */
+    }
+    pC->WriterAudioAU.nbFrag = 0;
+    M4OSA_TRACE3_1("M4MCS_intAudioNullEncoding(): audio AU: CTS=%d ms",
+        pC->WriterAudioAU.CTS);
+
+    /**
+    * Write it to the output file */
+    pC->uiAudioAUCount++;
+    err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+        M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intAudioNullEncoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /* All the audio has been written */
+    pC->ReaderAudioAU.m_size = 0;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intAudioNullEncoding(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * @brief    Init Audio Transcoding
+ * @return   M4NO_ERROR:         No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intAudioTranscoding( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err;                        /**< General error */
+
+    M4OSA_UInt32
+        uiBytesDec; /**< Nb of bytes available in the decoder OUT buffer */
+    M4OSA_UInt32
+        uiDecoder2Ssrc_NbBytes; /**< Nb of bytes copied into the ssrc IN buffer */
+
+    int ssrcErr;                          /**< Error while ssrc processing */
+    M4OSA_UInt32 uiSsrcInSize; /**< Size in bytes of ssrc intput buffer */
+    M4OSA_UInt32
+        uiSsrcInRoom; /**< Nb of bytes available in the ssrc IN buffer */
+    M4OSA_MemAddr8
+        pSsrcInput; /**< Pointer to the good buffer location for ssrc input */
+    M4OSA_UInt32 uiSsrcOutSize; /**< Size in bytes of ssrc output buffer */
+    M4OSA_UInt32
+        uiBytesSsrc; /**< Nb of bytes available in the ssrc OUT buffer */
+
+    M4OSA_UInt8
+        needChannelConversion; /**< Flag to indicate if a stereo <-> mono conversion is needed */
+    M4OSA_UInt32
+        uiChannelConvertorCoeff; /**< Multiplicative coefficient if stereo
+                                    <-> mono conversion is applied */
+    M4OSA_MemAddr8 pChannelConvertorInput =
+        M4OSA_NULL; /**< Pointer to the good buffer location for channel convertor input */
+    M4OSA_UInt32 uiChannelConvertorNbSamples =
+        0; /**< Nb of pcm samples to convert in channel convertor */
+    M4OSA_MemAddr8 pChannelConvertorOutput =
+        M4OSA_NULL; /**< Pointer to the good buffer location for channel convertor output */
+
+    M4OSA_Time
+        frameTimeDelta; /**< Duration of the encoded (then written) data */
+    M4OSA_UInt32
+        uiEncoderInRoom; /**< Nb of bytes available in the encoder IN buffer */
+    M4OSA_UInt32
+        uiSsrc2Encoder_NbBytes; /**< Nb of bytes copied from the ssrc OUT buffer */
+    M4OSA_MemAddr8
+        pEncoderInput; /**< Pointer to the good buffer location for encoder input */
+    M4ENCODER_AudioBuffer pEncInBuffer;   /**< Encoder input buffer for api */
+    M4ENCODER_AudioBuffer pEncOutBuffer;  /**< Encoder output buffer for api */
+
+    M4OSA_Int16 *tempBuffOut = M4OSA_NULL;
+    /*FlB 2009.03.04: apply audio effects if an effect is active*/
+    M4OSA_Int8 *pActiveEffectNumber = &(pC->pActiveEffectNumber);
+
+    if( pC->noaudio )
+        return M4NO_ERROR;
+
+    /* _________________ */
+    /*|                 |*/
+    /*| READ AND DECODE |*/
+    /*|_________________|*/
+
+    /* Check if we have to empty the decoder out buffer first */
+    if( M4OSA_NULL != pC->pPosInDecBufferOut )
+    {
+        goto m4mcs_intaudiotranscoding_feed_resampler;
+    }
+
+    /* Check if all audio frame has been decoded */
+    if( pC->ReaderAudioAU.m_size == 0 )
+    {
+        /**
+        * Read the next audio AU in the input file */
+        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderAudioStream, &pC->ReaderAudioAU);
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+        fwrite(pC->ReaderAudioAU.m_dataAddress, pC->ReaderAudioAU.m_size, 1,
+            file_au_reader);
+        fwrite("____", 4, 1, file_au_reader);
+
+#endif
+
+        if( M4WAR_NO_MORE_AU == err ) /**< The audio transcoding is finished */
+        {
+            pC->AudioState = M4MCS_kStreamState_FINISHED;
+            M4OSA_TRACE2_0(
+                "M4MCS_intAudioTranscoding():\
+                 m_pReaderDataIt->m_pFctGetNextAu(audio) returns M4WAR_NO_MORE_AU");
+            return err;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intAudioTranscoding():\
+                 m_pReaderDataIt->m_pFctGetNextAu(Audio) returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Decode the AU */
+    pC->AudioDecBufferIn.m_dataAddress = pC->ReaderAudioAU.m_dataAddress;
+    pC->AudioDecBufferIn.m_bufferSize = pC->ReaderAudioAU.m_size;
+
+    err = pC->m_pAudioDecoder->m_pFctStepAudioDec(pC->pAudioDecCtxt,
+        &pC->AudioDecBufferIn, &pC->AudioDecBufferOut, M4OSA_FALSE);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intAudioTranscoding(): m_pAudioDecoder->m_pFctStepAudio returns 0x%x",
+            err);
+        return err;
+    }
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+    fwrite(pC->AudioDecBufferOut.m_dataAddress,
+        pC->AudioDecBufferOut.m_bufferSize, 1, file_pcm_decoder);
+
+#endif
+
+    /* update the part of audio that has been decoded into the frame */
+
+    pC->ReaderAudioAU.m_dataAddress += pC->AudioDecBufferIn.m_bufferSize;
+    pC->ReaderAudioAU.m_size -= pC->AudioDecBufferIn.m_bufferSize;
+
+    /* Set the current position in the decoder out buffer */
+    pC->pPosInDecBufferOut = pC->AudioDecBufferOut.m_dataAddress;
+
+    /* ________________ */
+    /*|                |*/
+    /*| FEED RESAMPLER |*/
+    /*|________________|*/
+
+m4mcs_intaudiotranscoding_feed_resampler:
+
+    /* Check if we have to empty the ssrc out buffer first */
+    if( M4OSA_NULL != pC->pPosInSsrcBufferOut )
+    {
+        goto m4mcs_intaudiotranscoding_prepare_input_buffer;
+    }
+
+    /* Compute number of bytes remaining in the decoder buffer */
+    uiSsrcInSize = pC->iSsrcNbSamplIn * sizeof(short)
+        * pC->pReaderAudioStream->m_nbChannels;
+    uiBytesDec = ( pC->AudioDecBufferOut.m_dataAddress
+        + pC->AudioDecBufferOut.m_bufferSize) - pC->pPosInDecBufferOut;
+
+    /* Check if we can feed directly the Ssrc with the decoder out buffer */
+    if( ( pC->pPosInSsrcBufferIn == pC->pSsrcBufferIn)
+        && (uiBytesDec >= uiSsrcInSize) )
+    {
+        pSsrcInput = pC->pPosInDecBufferOut;
+
+        /* update data consumed into decoder buffer after resampling */
+        if( uiBytesDec == uiSsrcInSize )
+            pC->pPosInDecBufferOut = M4OSA_NULL;
+        else
+            pC->pPosInDecBufferOut += uiSsrcInSize;
+
+        goto m4mcs_intaudiotranscoding_do_resampling;
+    }
+
+    /**
+    * Compute remaining space in Ssrc buffer in */
+    uiSsrcInRoom = ( pC->pSsrcBufferIn + uiSsrcInSize) - pC->pPosInSsrcBufferIn;
+
+    /**
+    * Nb of bytes copied is the minimum between nb of bytes remaining in
+    * decoder out buffer and space remaining in ssrc in buffer */
+    uiDecoder2Ssrc_NbBytes =
+        (uiSsrcInRoom < uiBytesDec) ? uiSsrcInRoom : uiBytesDec;
+
+    /**
+    * Copy from the decoder out buffer into the Ssrc in buffer */
+    M4OSA_memcpy(pC->pPosInSsrcBufferIn, pC->pPosInDecBufferOut,
+        uiDecoder2Ssrc_NbBytes);
+
+    /**
+    * Update the position in the decoder out buffer */
+    pC->pPosInDecBufferOut += uiDecoder2Ssrc_NbBytes;
+
+    /**
+    * Update the position in the Ssrc in buffer */
+    pC->pPosInSsrcBufferIn += uiDecoder2Ssrc_NbBytes;
+
+    /**
+    * Check if the decoder buffer out is empty */
+    if( ( pC->pPosInDecBufferOut - pC->AudioDecBufferOut.m_dataAddress)
+        == (M4OSA_Int32)pC->AudioDecBufferOut.m_bufferSize )
+    {
+        pC->pPosInDecBufferOut = M4OSA_NULL;
+    }
+
+    /* Check if the Ssrc in buffer is ready (= full) */
+    if( ( pC->pPosInSsrcBufferIn - pC->pSsrcBufferIn)
+        < (M4OSA_Int32)uiSsrcInSize )
+    {
+        goto m4mcs_intaudiotranscoding_end;
+    }
+
+    pSsrcInput = pC->pSsrcBufferIn;
+
+    /* update data consumed into ssrc buffer in after resampling (empty) */
+    pC->pPosInSsrcBufferIn = pC->pSsrcBufferIn;
+
+    /* ___________________ */
+    /*|                   |*/
+    /*| DO THE RESAMPLING |*/
+    /*|___________________|*/
+
+m4mcs_intaudiotranscoding_do_resampling:
+
+    /**
+    * No need for memcopy, we can feed Ssrc directly with the data in the audio
+    decoder out buffer*/
+
+    ssrcErr = 0;
+
+    if( pC->pReaderAudioStream->m_nbChannels == 1 )
+    {
+        tempBuffOut =
+            (short *)M4OSA_malloc((pC->iSsrcNbSamplOut * sizeof(short) * 2
+            * ((*pC).InputFileProperties).uiNbChannels),
+            M4VSS3GPP,(M4OSA_Char *) "tempBuffOut");
+        M4OSA_memset((M4OSA_MemAddr8)tempBuffOut, (pC->iSsrcNbSamplOut * sizeof(short) * 2
+            * ((*pC).InputFileProperties).uiNbChannels), 0);
+
+        LVAudioresample_LowQuality((short *)tempBuffOut, (short *)pSsrcInput,
+            pC->iSsrcNbSamplOut, (M4OSA_Int32)pC->pLVAudioResampler);
+    }
+    else
+    {
+        M4OSA_memset(pC->pSsrcBufferOut, (pC->iSsrcNbSamplOut * sizeof(short)
+            * ((*pC).InputFileProperties).uiNbChannels), 0);
+
+        LVAudioresample_LowQuality((short *)pC->pSsrcBufferOut,
+            (short *)pSsrcInput, pC->iSsrcNbSamplOut, (M4OSA_Int32)pC->pLVAudioResampler);
+    }
+
+    if( pC->pReaderAudioStream->m_nbChannels == 1 )
+    {
+        From2iToMono_16((short *)tempBuffOut, (short *)pC->pSsrcBufferOut,
+            (short)pC->iSsrcNbSamplOut);
+        M4OSA_free((M4OSA_MemAddr32)tempBuffOut);
+    }
+
+
+    if( 0 != ssrcErr )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intAudioTranscoding: SSRC_Process returns 0x%x, \
+            returning M4MCS_ERR_AUDIO_CONVERSION_FAILED",
+            ssrcErr);
+        return M4MCS_ERR_AUDIO_CONVERSION_FAILED;
+    }
+
+    pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+
+    /* ______________________ */
+    /*|                      |*/
+    /*| PREPARE INPUT BUFFER |*/
+    /*|______________________|*/
+
+m4mcs_intaudiotranscoding_prepare_input_buffer:
+
+    /* Set the flag for channel conversion requirement */
+    if( ( pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+        && (pC->pReaderAudioStream->m_nbChannels == 2) )
+    {
+        needChannelConversion = 1;
+        uiChannelConvertorCoeff = 4;
+    }
+    else if( ( pC->AudioEncParams.ChannelNum == M4ENCODER_kStereo)
+        && (pC->pReaderAudioStream->m_nbChannels == 1) )
+    {
+        needChannelConversion = 2;
+        uiChannelConvertorCoeff = 1;
+    }
+    else
+    {
+        needChannelConversion = 0;
+        uiChannelConvertorCoeff = 2;
+    }
+
+    /* Compute number of bytes remaining in the Ssrc buffer */
+    uiSsrcOutSize = pC->iSsrcNbSamplOut * sizeof(short)
+        * pC->pReaderAudioStream->m_nbChannels;
+    uiBytesSsrc =
+        ( pC->pSsrcBufferOut + uiSsrcOutSize) - pC->pPosInSsrcBufferOut;
+
+    /* Check if the ssrc buffer is full */
+    if( pC->pPosInSsrcBufferOut == pC->pSsrcBufferOut )
+    {
+        uiSsrc2Encoder_NbBytes =
+            pC->audioEncoderGranularity * uiChannelConvertorCoeff / 2;
+
+        /* Check if we can feed directly the encoder with the ssrc out buffer */
+        if( ( pC->pPosInAudioEncoderBuffer == M4OSA_NULL)
+            && (uiBytesSsrc >= uiSsrc2Encoder_NbBytes) )
+        {
+            /* update position in ssrc out buffer after encoding */
+            if( uiBytesSsrc == uiSsrc2Encoder_NbBytes )
+                pC->pPosInSsrcBufferOut = M4OSA_NULL;
+            else
+                pC->pPosInSsrcBufferOut += uiSsrc2Encoder_NbBytes;
+
+            /* mark the encoder buffer ready (= full) */
+            pC->pPosInAudioEncoderBuffer =
+                pC->pAudioEncoderBuffer + pC->audioEncoderGranularity;
+
+            if( needChannelConversion > 0 )
+            {
+                /* channel convertor writes directly into encoder buffer */
+                pEncoderInput = pC->pAudioEncoderBuffer;
+
+                pChannelConvertorInput = pC->pSsrcBufferOut;
+                pChannelConvertorOutput = pC->pAudioEncoderBuffer;
+                uiChannelConvertorNbSamples =
+                    uiSsrc2Encoder_NbBytes / sizeof(short);
+
+                goto m4mcs_intaudiotranscoding_channel_convertor;
+            }
+            else
+            {
+                /* encode directly from ssrc out buffer */
+                pEncoderInput = pC->pSsrcBufferOut;
+
+                goto m4mcs_intaudiotranscoding_encode_and_write;
+            }
+        }
+    }
+
+    /**
+    * Compute remaining space in encoder buffer in */
+    if( pC->pPosInAudioEncoderBuffer == M4OSA_NULL )
+    {
+        pC->pPosInAudioEncoderBuffer = pC->pAudioEncoderBuffer;
+    }
+
+    uiEncoderInRoom = ( pC->pAudioEncoderBuffer + pC->audioEncoderGranularity)
+        - pC->pPosInAudioEncoderBuffer;
+    pEncoderInput = pC->pAudioEncoderBuffer;
+
+    /**
+    * Nb of bytes copied is the minimum between nb of bytes remaining in
+    * decoder out buffer and space remaining in ssrc in buffer */
+    uiSsrc2Encoder_NbBytes =
+        (( uiEncoderInRoom * uiChannelConvertorCoeff / 2) < uiBytesSsrc)
+        ? (uiEncoderInRoom * uiChannelConvertorCoeff / 2) : uiBytesSsrc;
+
+    if( needChannelConversion > 0 )
+    {
+        /* channel convertor writes directly into encoder buffer */
+        pChannelConvertorInput = pC->pPosInSsrcBufferOut;
+        pChannelConvertorOutput = pC->pPosInAudioEncoderBuffer;
+        uiChannelConvertorNbSamples = uiSsrc2Encoder_NbBytes / sizeof(short);
+    }
+    else
+    {
+        /* copy from the ssrc out buffer into the encoder in buffer */
+        M4OSA_memcpy(pC->pPosInAudioEncoderBuffer, pC->pPosInSsrcBufferOut,
+            uiSsrc2Encoder_NbBytes);
+    }
+
+    /* Update position in ssrc out buffer after encoding */
+    pC->pPosInSsrcBufferOut += uiSsrc2Encoder_NbBytes;
+
+    /* Update the position in the encoder in buffer */
+    pC->pPosInAudioEncoderBuffer +=
+        uiSsrc2Encoder_NbBytes * 2 / uiChannelConvertorCoeff;
+
+    /* Check if the ssrc buffer out is empty */
+    if( ( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut)
+        == (M4OSA_Int32)uiSsrcOutSize )
+    {
+        pC->pPosInSsrcBufferOut = M4OSA_NULL;
+    }
+
+    /* go to next statement */
+    if( needChannelConversion > 0 )
+        goto m4mcs_intaudiotranscoding_channel_convertor;
+    else
+        goto m4mcs_intaudiotranscoding_encode_and_write;
+
+    /* _________________ */
+    /*|                 |*/
+    /*| STEREO <-> MONO |*/
+    /*|_________________|*/
+
+m4mcs_intaudiotranscoding_channel_convertor:
+
+    /* convert the input pcm stream to mono or to stereo */
+    switch( needChannelConversion )
+    {
+        case 1: /* stereo to mono */
+            From2iToMono_16((short *)pChannelConvertorInput,
+                (short *)pChannelConvertorOutput,
+                (short)(uiChannelConvertorNbSamples / 2));
+            break;
+
+        case 2: /* mono to stereo */
+            MonoTo2I_16((short *)pChannelConvertorInput,
+                (short *)pChannelConvertorOutput,
+                (short)uiChannelConvertorNbSamples);
+            break;
+    }
+
+    /* __________________ */
+    /*|                  |*/
+    /*| ENCODE AND WRITE |*/
+    /*|__________________|*/
+
+m4mcs_intaudiotranscoding_encode_and_write:
+
+    /* Check if the encoder in buffer is ready (= full) */
+    if( ( pC->pPosInAudioEncoderBuffer - pC->pAudioEncoderBuffer)
+        < (M4OSA_Int32)pC->audioEncoderGranularity )
+    {
+        goto m4mcs_intaudiotranscoding_end;
+    }
+
+    /* [Mono] or [Stereo interleaved] : all is in one buffer */
+    pEncInBuffer.pTableBuffer[0] = pEncoderInput;
+    pEncInBuffer.pTableBufferSize[0] = pC->audioEncoderGranularity;
+    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+    pEncInBuffer.pTableBufferSize[1] = 0;
+
+    /* Time in ms from data size, because it is PCM16 samples */
+    frameTimeDelta =
+        ( pEncInBuffer.pTableBufferSize[0] * uiChannelConvertorCoeff / 2)
+        / sizeof(short) / pC->pReaderAudioStream->m_nbChannels;
+
+    /**
+    * Prepare the writer AU */
+    err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
+        M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intAudioTranscoding(): pWriterDataFcts->pStartAU(Audio) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /*FlB 2009.03.04: apply audio effects if an effect is active*/
+    if( *pActiveEffectNumber >= 0 && *pActiveEffectNumber < pC->nbEffects )
+    {
+        if( pC->pEffects[*pActiveEffectNumber].ExtAudioEffectFct != M4OSA_NULL )
+        {
+            M4MCS_ExternalProgress pProgress;
+            M4OSA_UInt64 tempProgress = 0;
+            pProgress.uiClipTime = (M4OSA_UInt32)pC->ReaderAudioAU.m_CTS;
+
+            pProgress.uiOutputTime = ( pC->WriterAudioAU.CTS * 1000)
+                / pC->WriterAudioStream.timeScale;
+            tempProgress = ( (M4OSA_UInt64)pC->ReaderAudioAU.m_CTS
+                - pC->pEffects[*pActiveEffectNumber].uiStartTime
+                - pC->uiBeginCutTime) * 1000;
+            pProgress.uiProgress =
+                (M4OSA_UInt32)(tempProgress / (M4OSA_UInt64)pC->pEffects[
+                    *pActiveEffectNumber].uiDuration);
+
+                    err = pC->pEffects[*pActiveEffectNumber].ExtAudioEffectFct(
+                        pC->pEffects[*pActiveEffectNumber].pExtAudioEffectFctCtxt,
+                        (M4OSA_Int16 *)pEncInBuffer.pTableBuffer[0],
+                        pEncInBuffer.pTableBufferSize[0], &pProgress);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4MCS_intAudioTranscoding(): ExtAudioEffectFct() returns 0x%x",
+                            err);
+                        return err;
+                    }
+        }
+    }
+
+    /**
+    * Prepare output buffer */
+    pEncOutBuffer.pTableBuffer[0] =
+        (M4OSA_MemAddr8)pC->WriterAudioAU.dataAddress;
+    pEncOutBuffer.pTableBufferSize[0] = 0;
+
+#ifdef MCS_DUMP_PCM_TO_FILE
+
+    fwrite(pEncInBuffer.pTableBuffer[0], pEncInBuffer.pTableBufferSize[0], 1,
+        file_pcm_encoder);
+
+#endif
+
+    if( M4OSA_FALSE == pC->b_isRawWriter )
+    {
+        /* This allow to write PCM data to file and to encode AMR data,
+         when output file is not RAW */
+        if( pC->pOutputPCMfile != M4OSA_NULL )
+        {
+            pC->pOsaFileWritPtr->writeData(pC->pOutputPCMfile,
+                pEncInBuffer.pTableBuffer[0], pEncInBuffer.pTableBufferSize[0]);
+        }
+
+        /**
+        * Encode the PCM audio */
+        err = pC->pAudioEncoderGlobalFcts->pFctStep(pC->pAudioEncCtxt,
+            &pEncInBuffer, &pEncOutBuffer);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intAudioTranscoding(): pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                err);
+            return err;
+        }
+
+        /* update data consumed into encoder buffer in after encoding (empty) */
+        pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+
+        /**
+        * Set AU cts and size */
+        pC->WriterAudioAU.size =
+            pEncOutBuffer.
+            pTableBufferSize[0]; /**< Get the size of encoded data */
+        pC->WriterAudioAU.CTS += frameTimeDelta;
+
+        /**
+        * Update duration of the encoded AU */
+        pC->m_audioAUDuration =
+            ( frameTimeDelta * 1000) / pC->WriterAudioStream.timeScale;
+
+        /**
+        * Write the encoded AU to the output file */
+        pC->uiAudioAUCount++;
+        err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+            M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intAudioTranscoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
+                err);
+            return err;
+        }
+    }
+    else
+    {
+        /* update data consumed into encoder buffer in after encoding (empty) */
+        pC->pPosInAudioEncoderBuffer = M4OSA_NULL;
+
+        pC->WriterAudioAU.dataAddress =
+            (M4OSA_MemAddr32)
+            pEncoderInput; /* will be converted back to u8* in file write */
+        pC->WriterAudioAU.size = pC->audioEncoderGranularity;
+        pC->uiAudioAUCount++;
+
+        err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+            M4MCS_WRITER_AUDIO_STREAM_ID, &pC->WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intAudioTranscoding(): pWriterDataFcts->pProcessAU(Audio) returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /* _______________ */
+    /*|               |*/
+    /*| ONE PASS DONE |*/
+    /*|_______________|*/
+
+m4mcs_intaudiotranscoding_end:
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intAudioTranscoding(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intReallocTemporaryAU(M4OSA_MemAddr8* addr, M4OSA_UInt32 newSize)
+ * Used only in case of 3GP constant memory reader, to be able to realloc temporary AU
+ * because max AU size can be reevaluated during reading
+ * @return   M4NO_ERROR:         No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intReallocTemporaryAU( M4OSA_MemAddr8 *addr,
+                                             M4OSA_UInt32 newSize )
+{
+    if( *addr != M4OSA_NULL )
+    {
+        M4OSA_free(( M4OSA_MemAddr32) * addr);
+        *addr = (M4OSA_MemAddr8)M4OSA_malloc(newSize, M4MCS,
+            (M4OSA_Char *)"Reallocation of temporary AU buffer");
+
+        if( *addr == M4OSA_NULL )
+        {
+            return M4ERR_ALLOC;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intVideoNullEncoding(M4MCS_InternalContext* pC)
+ * @author   Alexis Vapillon (NXP Software Vision)
+ * @return   M4NO_ERROR:         No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intVideoNullEncoding( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    /* Duration of the AU (find the next AU duration
+     * to obtain a more precise video end cut)
+     */
+    M4OSA_UInt32 videoAUDuration = 0;
+
+    M4OSA_MemAddr8 WritebufferAdd = M4OSA_NULL;
+    M4OSA_Int32 lastdecodedCTS = 0;
+    M4_AccessUnit lReaderVideoAU; /**< Read video access unit */
+
+    if( pC->novideo )
+        return M4NO_ERROR;
+
+    /* H.264 Trimming */
+    if( ( ( pC->bH264Trim == M4OSA_TRUE)
+        && (pC->uiVideoAUCount < pC->m_pInstance->clip_sps.num_ref_frames)
+        && (pC->uiBeginCutTime > 0))
+        || (( pC->uiVideoAUCount == 0) && (pC->uiBeginCutTime > 0)) )
+    {
+        err = M4MCS_intVideoTranscoding(pC);
+        return err;
+    }
+
+
+    if((pC->bLastDecodedFrameCTS == M4OSA_FALSE) && (pC->uiBeginCutTime > 0))
+    {
+        // StageFright encoder does prefetch, the one frame we requested will not be written until
+        // the encoder is closed, so do it now rather than in MCS_close
+        if( ( M4NO_ERROR != err)
+            || (M4MCS_kEncoderRunning != pC->encoderState) )
+        {
+            M4OSA_TRACE1_2(
+                "!!! M4MCS_intVideoNullEncoding ERROR : M4MCS_intVideoTranscoding "
+                "returns 0x%X w/ encState=%d", err, pC->encoderState);
+
+            return err;
+        }
+
+        /* Stop and close the encoder now to flush the frame (prefetch) */
+        if( pC->pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+        {
+            err = pC->pVideoEncoderGlobalFcts->pFctStop(pC->pViEncCtxt);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "!!! M4MCS_intVideoNullEncoding ERROR : encoder stop returns 0x%X",
+                    err);
+                return err;
+            }
+        }
+        pC->encoderState = M4MCS_kEncoderStopped;
+        err = pC->pVideoEncoderGlobalFcts->pFctClose(pC->pViEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "!!! M4MCS_intVideoNullEncoding ERROR : encoder close returns 0x%X",
+                err);
+            return err;
+        }
+        pC->encoderState = M4MCS_kEncoderClosed;
+    }
+
+
+    if( ( pC->bH264Trim == M4OSA_TRUE)
+        && (pC->bLastDecodedFrameCTS == M4OSA_FALSE)
+        && (pC->uiBeginCutTime > 0) )
+    {
+
+        pC->bLastDecodedFrameCTS = M4OSA_TRUE;
+        err = pC->m_pVideoDecoder->m_pFctGetOption(pC->pViDecCtxt,
+            M4DECODER_kOptionID_AVCLastDecodedFrameCTS, &lastdecodedCTS);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intVideoNullEncoding: m_pVideoDecoder->m_pFctGetOption returns 0x%x!",
+                err);
+            return err;
+        }
+
+        err = pC->m_pReader->m_pFctJump(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream, &lastdecodedCTS);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intStepBeginVideoJump: m_pFctJump(V) returns 0x%x!",
+                err);
+            return err;
+        }
+
+
+        /* Initializes an access Unit */
+
+        err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream, &lReaderVideoAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+                err);
+            return err;
+        }
+
+        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream, &lReaderVideoAU);
+
+        if( M4WAR_NO_MORE_AU == err )
+        {
+            M4OSA_TRACE2_0(
+                "M4MCS_intVideoNullEncoding():\
+                 m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+            /* The audio transcoding is finished */
+            pC->VideoState = M4MCS_kStreamState_FINISHED;
+            return err;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intVideoNullEncoding():\
+                 m_pReaderDataIt->m_pFctGetNextAu(video) returns 0x%x",
+                err);
+            return err;
+        }
+
+        M4OSA_TRACE1_1(
+            "### [TS_CHECK] M4MCS_intVideoNullEncoding  video AU CTS: %d ",
+            lReaderVideoAU.m_CTS);
+
+
+    }
+
+
+    pC->bLastDecodedFrameCTS = M4OSA_TRUE;
+
+    /* - CRLV6775 -H.264 Trimming */
+
+    /* commented,this part is done further on one of the temporary video AU (video AU1 or video AU2)*/
+#if 0
+    /**
+    * Read the next video AU in the input file */
+
+    err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+        (M4_StreamHandler *)pC->pReaderVideoStream, &pC->ReaderVideoAU);
+#endif
+
+    /* Find the next AU duration to obtain a more precise video end cut*/
+    /**
+    * Initializes a new AU if needed */
+
+    if( pC->ReaderVideoAU1.m_structSize == 0 )
+    {
+        /**
+        * Initializes an access Unit */
+        err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream,
+            &pC->ReaderVideoAU1);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+                err);
+            return err;
+        }
+
+        pC->m_pDataVideoAddress1 =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->ReaderVideoAU1.m_maxsize, M4MCS,
+            (M4OSA_Char *)"Temporary video AU1 buffer");
+
+        if( pC->m_pDataVideoAddress1 == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("M4MCS_intVideoNullEncoding(): allocation error");
+            return M4ERR_ALLOC;
+        }
+
+        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream,
+            &pC->ReaderVideoAU1);
+
+        if( M4WAR_NO_MORE_AU == err )
+        {
+            M4OSA_TRACE2_0(
+                "M4MCS_intVideoNullEncoding():\
+                 m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+            /* The audio transcoding is finished */
+            pC->VideoState = M4MCS_kStreamState_FINISHED;
+            return err;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intVideoNullEncoding(): m_pReaderDataIt->m_pFctGetNextAu(video)\
+                 returns 0x%x", err);
+            return err;
+        }
+
+        if( pC->ReaderVideoAU1.m_maxsize
+            > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
+        {
+            /* Constant memory reader case, we need to reallocate the temporary buffers */
+            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU1.m_maxsize);
+            /* pC->m_pDataVideoAddress1
+            and pC->m_pDataVideoAddress2 must be reallocated at the same time */
+            /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
+             Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
+             m_basicProperties.m_maxAUSize)" is never true */
+            /* and the size of the second buffer is never changed. */
+            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU1.m_maxsize);
+            /* pC->m_pDataVideoAddress1 and
+            pC->m_pDataVideoAddress2 must be reallocated at the same time */
+            /* Update stream properties */
+            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
+                pC->ReaderVideoAU1.m_maxsize;
+        }
+        M4OSA_memcpy((M4OSA_MemAddr8)pC->m_pDataVideoAddress1,
+            (M4OSA_MemAddr8)pC->ReaderVideoAU1.m_dataAddress,
+            pC->ReaderVideoAU1.m_size);
+    }
+
+    if( pC->ReaderVideoAU2.m_structSize == 0 )
+    {
+        /**
+        * Initializes an access Unit */
+        err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream,
+            &pC->ReaderVideoAU2);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_open(): m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+                err);
+            return err;
+        }
+        pC->m_pDataVideoAddress2 =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->ReaderVideoAU2.m_maxsize, M4MCS,
+            (M4OSA_Char *)"Temporary video AU buffer");
+
+        if( pC->m_pDataVideoAddress2 == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("M4MCS_intVideoNullEncoding(): allocation error");
+            return M4ERR_ALLOC;
+        }
+    }
+    /**
+    * Read the next video AU in the input file */
+    if( pC->ReaderVideoAU2.m_CTS > pC->ReaderVideoAU1.m_CTS )
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8) &pC->ReaderVideoAU,
+            (M4OSA_MemAddr8) &pC->ReaderVideoAU2, sizeof(M4_AccessUnit));
+        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream,
+            &pC->ReaderVideoAU1);
+
+        if( pC->ReaderVideoAU1.m_maxsize
+            > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
+        {
+            /* Constant memory reader case, we need to reallocate the temporary buffers */
+            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU1.m_maxsize);
+            /* pC->m_pDataVideoAddress1 and
+             pC->m_pDataVideoAddress2 must be reallocated at the same time */
+            /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
+             Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
+             m_basicProperties.m_maxAUSize)" is never true */
+            /* and the size of the second buffer is never changed. */
+            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU1.m_maxsize);
+            /* pC->m_pDataVideoAddress1 and
+            pC->m_pDataVideoAddress2 must be reallocated at the same time */
+            /* Update stream properties */
+            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
+                pC->ReaderVideoAU1.m_maxsize;
+        }
+        M4OSA_memcpy((M4OSA_MemAddr8)pC->m_pDataVideoAddress1,
+            (M4OSA_MemAddr8)pC->ReaderVideoAU1.m_dataAddress,
+            pC->ReaderVideoAU1.m_size);
+        videoAUDuration = pC->ReaderVideoAU1.m_CTS - pC->ReaderVideoAU2.m_CTS;
+        pC->ReaderVideoAU.m_dataAddress = pC->m_pDataVideoAddress2;
+    }
+    else
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8) &pC->ReaderVideoAU,
+            (M4OSA_MemAddr8) &pC->ReaderVideoAU1, sizeof(M4_AccessUnit));
+        err = pC->m_pReaderDataIt->m_pFctGetNextAu(pC->pReaderContext,
+            (M4_StreamHandler *)pC->pReaderVideoStream,
+            &pC->ReaderVideoAU2);
+
+        if( pC->ReaderVideoAU2.m_maxsize
+            > pC->pReaderVideoStream->m_basicProperties.m_maxAUSize )
+        {
+            /* Constant memory reader case, we need to reallocate the temporary buffers */
+            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                *) &(pC->m_pDataVideoAddress2), pC->ReaderVideoAU2.m_maxsize);
+            /* pC->m_pDataVideoAddress1 and
+             pC->m_pDataVideoAddress2 must be reallocated at the same time */
+            /* because pC->pReaderVideoStream->m_basicProperties.m_maxAUSize take maximum value.
+             Then the test "if(pC->ReaderVideoAU?.m_maxsize > pC->pReaderVideoStream->
+             m_basicProperties.m_maxAUSize)" is never true */
+            /* and the size of the second buffer is never changed. */
+            M4MCS_intReallocTemporaryAU((M4OSA_MemAddr8
+                *) &(pC->m_pDataVideoAddress1), pC->ReaderVideoAU2.m_maxsize);
+            /* pC->m_pDataVideoAddress1 and
+            pC->m_pDataVideoAddress2 must be reallocated at the same time */
+            /* Update stream properties */
+            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize =
+                pC->ReaderVideoAU2.m_maxsize;
+        }
+        M4OSA_memcpy((M4OSA_MemAddr8)pC->m_pDataVideoAddress2,
+            (M4OSA_MemAddr8)pC->ReaderVideoAU2.m_dataAddress,
+            pC->ReaderVideoAU2.m_size);
+        videoAUDuration = pC->ReaderVideoAU2.m_CTS - pC->ReaderVideoAU1.m_CTS;
+        pC->ReaderVideoAU.m_dataAddress = pC->m_pDataVideoAddress1;
+    }
+
+    if( M4WAR_NO_MORE_AU == err )
+    {
+        M4OSA_TRACE2_0(
+            "M4MCS_intVideoNullEncoding():\
+             m_pReaderDataIt->m_pFctGetNextAu(video) returns M4WAR_NO_MORE_AU");
+        /* The video transcoding is finished */
+        pC->VideoState = M4MCS_kStreamState_FINISHED;
+        return err;
+    }
+    else if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intVideoNullEncoding(): m_pReaderDataIt->m_pFctGetNextAu(Video) returns 0x%x",
+            err);
+        return err;
+    }
+    else
+    {
+        /**
+        * Prepare the writer AU */
+        err = pC->pWriterDataFcts->pStartAU(pC->pWriterContext,
+            M4MCS_WRITER_VIDEO_STREAM_ID, &pC->WriterVideoAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intVideoNullEncoding(): pWriterDataFcts->pStartAU(Video) returns 0x%x",
+                err);
+            return err;
+        }
+#ifdef TIMESCALE_BUG
+        /* If we are in timescale modification mode or classic copy mode */
+
+        if( pC->uiVideoTimescale != 0 )
+        {
+            /**
+            * Copy video data from reader AU to writer AU */
+            //M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress,
+            //(M4OSA_MemAddr8)pC->ReaderVideoAU.m_dataAddress, pC->ReaderVideoAU.m_size);
+            pC->WriterVideoAU.size = pC->ReaderVideoAU.m_size;
+
+            /* Call internal function to change AU timescale */
+            err = M4MCS_intChangeAUVideoTimescale(pC);
+
+            /**
+            * Convert CTS unit from milliseconds to timescale */
+            pC->WriterVideoAU.CTS =
+                (M4OSA_Time)((( pC->ReaderVideoAU.m_CTS - pC->dViDecStartingCts)
+                * (pC->WriterVideoStream.timeScale / 1000.0)));
+            pC->WriterVideoAU.nbFrag = 0;
+            pC->WriterVideoAU.attribute = pC->ReaderVideoAU.m_attribute;
+        }
+        else
+
+#endif
+
+        {
+            /**
+            * Copy video data from reader AU to writer AU */
+            M4OSA_TRACE3_1(
+                "M4MCS_intVideoNullEncoding(): Copying video AU: size=%d",
+                pC->ReaderVideoAU.m_size);
+            /* + CRLV6775 -H.264 Trimming */
+            if( M4OSA_TRUE == pC->bH264Trim )
+            {
+                if( pC->H264MCSTempBufferSize
+                    < (pC->ReaderVideoAU.m_size + 2048) )
+                {
+                    pC->H264MCSTempBufferSize =
+                        (pC->ReaderVideoAU.m_size + 2048);
+
+                    if( pC->H264MCSTempBuffer != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pC->H264MCSTempBuffer);
+                    }
+                    pC->H264MCSTempBuffer =
+                        (M4OSA_UInt8 *)M4OSA_malloc(pC->H264MCSTempBufferSize,
+                        M4MCS, (M4OSA_Char *)"pC->H264MCSTempBuffer");
+
+                    if( pC->H264MCSTempBuffer == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4MCS_intVideoNullEncoding(): allocation error");
+                        return M4ERR_ALLOC;
+                    }
+                }
+
+                pC->H264MCSTempBufferDataSize = pC->H264MCSTempBufferSize;
+
+                err = H264MCS_ProcessNALU(pC->m_pInstance,
+                    (M4OSA_UInt8 *)pC->ReaderVideoAU.m_dataAddress,
+                    pC->ReaderVideoAU.m_size, pC->H264MCSTempBuffer,
+                    (M4OSA_Int32 *)&pC->H264MCSTempBufferDataSize);
+
+                if( pC->m_pInstance->is_done == 1 )
+                {
+                    M4MCS_convetFromByteStreamtoNALStream(
+                        (M4OSA_UInt8 *)pC->ReaderVideoAU.m_dataAddress ,
+                        pC->ReaderVideoAU.m_size);
+
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress,
+                        (M4OSA_MemAddr8)(pC->ReaderVideoAU.m_dataAddress + 4),
+                        pC->ReaderVideoAU.m_size - 4);
+                    pC->WriterVideoAU.size = pC->ReaderVideoAU.m_size - 4;
+                    WritebufferAdd =
+                        (M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress;
+                }
+                else
+                {
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress,
+                        (M4OSA_MemAddr8)(pC->H264MCSTempBuffer + 4),
+                        pC->H264MCSTempBufferDataSize - 4);
+                    pC->WriterVideoAU.size = pC->H264MCSTempBufferDataSize - 4;
+                    WritebufferAdd =
+                        (M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress;
+                }
+            }
+            /* H.264 Trimming */
+            else
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)pC->WriterVideoAU.dataAddress,
+                    (M4OSA_MemAddr8)pC->ReaderVideoAU.m_dataAddress,
+                    pC->ReaderVideoAU.m_size);
+                pC->WriterVideoAU.size = pC->ReaderVideoAU.m_size;
+            }
+            /**
+            * Convert CTS unit from milliseconds to timescale */
+            pC->WriterVideoAU.CTS =
+                (M4OSA_Time)((( pC->ReaderVideoAU.m_CTS - pC->dViDecStartingCts)
+                * (pC->WriterVideoStream.timeScale / 1000.0)));
+            pC->WriterVideoAU.nbFrag = 0;
+            pC->WriterVideoAU.attribute = pC->ReaderVideoAU.m_attribute;
+
+            M4OSA_TRACE3_1("M4MCS_intVideoNullEncoding(): video AU: CTS=%d ms",
+                pC->WriterVideoAU.CTS);
+        }
+
+        /**
+        * Write it to the output file */
+        pC->uiVideoAUCount++;
+        err = pC->pWriterDataFcts->pProcessAU(pC->pWriterContext,
+            M4MCS_WRITER_VIDEO_STREAM_ID, &pC->WriterVideoAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_intVideoNullEncoding(): pWriterDataFcts->pProcessAU(Video) returns 0x%x",
+                err);
+            return err;
+        }
+        /* + CRLV6775 -H.264 Trimming */
+        if( M4OSA_TRUE == pC->bH264Trim )
+        {
+            if( pC->m_pInstance->is_done == 1 )
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)(WritebufferAdd - 4),
+                    (M4OSA_MemAddr8)(pC->ReaderVideoAU.m_dataAddress), 4);
+            }
+            else
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)(WritebufferAdd - 4),
+                    (M4OSA_MemAddr8)(pC->H264MCSTempBuffer), 4);
+            }
+        } /* H.264 Trimming */
+    }
+    /**
+    * Check for end cut. */
+    /* Bug fix 11/12/2008: We absolutely want to have less or same video duration ->
+    (2*videoAUDuration) to have a more precise end cut*/
+    if( pC->ReaderVideoAU.m_CTS + (2 *videoAUDuration) > pC->uiEndCutTime )
+    {
+        pC->VideoState = M4MCS_kStreamState_FINISHED;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_intVideoNullEncoding(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intVideoTranscoding(M4MCS_InternalContext* pC)
+ * @author   Alexis Vapillon (NXP Software Vision)
+ * @return   M4NO_ERROR:         No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intVideoTranscoding( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4_MediaTime mtTranscodedTime = 0.0;
+    M4ENCODER_FrameMode FrameMode;
+    M4OSA_Int32 derive = 0;
+
+    /**
+    * Get video CTS to decode */
+    mtTranscodedTime = pC->dViDecCurrentCts;
+    FrameMode = M4ENCODER_kNormalFrame;
+
+    /**
+    * Decode video */
+    M4OSA_TRACE3_1(
+        "M4MCS_intVideoTranscoding(): Calling m_pVideoDecoder->m_pFctDecode(%.2f)",
+        mtTranscodedTime);
+    pC->isRenderDup = M4OSA_FALSE;
+    err = pC->m_pVideoDecoder->m_pFctDecode(pC->pViDecCtxt, &mtTranscodedTime,
+        M4OSA_FALSE);
+
+    if( M4WAR_NO_MORE_AU == err )
+    {
+        FrameMode =
+            M4ENCODER_kLastFrame; /**< We will give this value to the encoder to
+            ask for the end of the encoding */
+        pC->VideoState = M4MCS_kStreamState_FINISHED;
+    }
+    else if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+    {
+        M4OSA_TRACE2_0("Decoding output the same frame as before 3");
+        pC->isRenderDup = M4OSA_TRUE;
+    }
+    else if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_intVideoTranscoding(): m_pVideoDecoder->m_pFctDecode returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Check for end cut.
+    * We must check here if the end cut is reached, because in that case we must
+    * call the last encode step (-> bLastFrame set to true) */
+    if( ( pC->dViDecCurrentCts + pC->dCtsIncrement + 0.5) >= (pC->uiEndCutTime
+        + M4MCS_ABS(pC->dViDecStartingCts - pC->uiBeginCutTime)) )
+    {
+        FrameMode =
+            M4ENCODER_kLastFrame; /**< We will give this value to the encoder to
+            ask for the end of the encoding */
+        pC->VideoState = M4MCS_kStreamState_FINISHED;
+        derive = (M4OSA_Int32)(( pC->dViDecCurrentCts + pC->dCtsIncrement + 0.5)
+            - (pC->uiEndCutTime
+            + M4MCS_ABS(pC->dViDecStartingCts - pC->uiBeginCutTime)));
+    }
+
+    /* Update starting CTS to have a more precise value (
+    the begin cut is not a real CTS)*/
+    if( pC->uiVideoAUCount == 0 )
+    {
+        pC->dViDecStartingCts = mtTranscodedTime;
+        pC->dViDecCurrentCts = pC->dViDecStartingCts;
+    }
+
+    /**
+    * Encode video */
+    M4OSA_TRACE3_1(
+        "M4MCS_intVideoTranscoding(): Calling pVideoEncoderGlobalFcts->pFctEncode with videoCts\
+         = %.2f",pC->ReaderVideoAU.m_CTS);
+    pC->uiVideoAUCount++;
+    /* update the given duration (the begin cut is not a real CTS)*/
+    err = pC->pVideoEncoderGlobalFcts->pFctEncode(pC->pViEncCtxt, M4OSA_NULL,
+        (pC->dViDecCurrentCts - pC->dViDecStartingCts - (derive >> 1)),
+        FrameMode);
+
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intGetInputClipProperties(M4MCS_InternalContext* pContext)
+ * @author   Dounya Manai (NXP Software Vision)
+ * @brief    Retrieve the properties of the audio and video streams from the input file.
+ * @param    pContext            (IN) MCS context
+ * @return   M4NO_ERROR:         No error
+ * @return   M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intGetInputClipProperties( M4MCS_InternalContext *pC )
+{
+    M4DECODER_MPEG4_DecoderConfigInfo DecConfInfo;
+    M4READER_3GP_H263Properties H263prop;
+    M4OSA_ERR err;
+    M4OSA_UInt32 videoBitrate;
+    M4DECODER_AVCProfileLevel AVCProfle;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4DECODER_VideoSize videoSize;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    M4_AACType iAacType = 0;
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2(M4OSA_NULL == pC, M4ERR_PARAMETER,
+        "M4MCS_intGetInputClipProperties: pC is M4OSA_NULL");
+
+    /**
+    * Reset common characteristics */
+    pC->InputFileProperties.bAnalysed = M4OSA_FALSE;
+    pC->InputFileProperties.FileType = 0;
+    pC->InputFileProperties.Version[0] = M4VIDEOEDITING_VERSION_MAJOR;
+    pC->InputFileProperties.Version[1] = M4VIDEOEDITING_VERSION_MINOR;
+    pC->InputFileProperties.Version[2] = M4VIDEOEDITING_VERSION_REVISION;
+    pC->InputFileProperties.uiClipDuration = 0;
+
+    M4OSA_memset((M4OSA_MemAddr8) &pC->InputFileProperties.ftyp,
+        sizeof(M4VIDEOEDITING_FtypBox), 0);
+
+    /**
+    * Reset video characteristics */
+    pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+    pC->InputFileProperties.uiClipVideoDuration = 0;
+    pC->InputFileProperties.uiVideoBitrate = 0;
+    pC->InputFileProperties.uiVideoMaxAuSize = 0;
+    pC->InputFileProperties.uiVideoWidth = 0;
+    pC->InputFileProperties.uiVideoHeight = 0;
+    pC->InputFileProperties.uiVideoTimeScale = 0;
+    pC->InputFileProperties.fAverageFrameRate = 0.0;
+    pC->InputFileProperties.ProfileAndLevel =
+        M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+    pC->InputFileProperties.uiH263level = 0;
+    pC->InputFileProperties.uiVideoProfile = 0;
+    pC->InputFileProperties.bMPEG4dataPartition = M4OSA_FALSE;
+    pC->InputFileProperties.bMPEG4rvlc = M4OSA_FALSE;
+    pC->InputFileProperties.bMPEG4resynchMarker = M4OSA_FALSE;
+
+    /**
+    * Reset audio characteristics */
+    pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+    pC->InputFileProperties.uiClipAudioDuration = 0;
+    pC->InputFileProperties.uiAudioBitrate = 0;
+    pC->InputFileProperties.uiAudioMaxAuSize = 0;
+    pC->InputFileProperties.uiNbChannels = 0;
+    pC->InputFileProperties.uiSamplingFrequency = 0;
+    pC->InputFileProperties.uiExtendedSamplingFrequency = 0;
+    pC->InputFileProperties.uiDecodedPcmSize = 0;
+
+    /* Reset compatibility chart (not used in MCS) */
+    pC->InputFileProperties.bVideoIsEditable = M4OSA_FALSE;
+    pC->InputFileProperties.bAudioIsEditable = M4OSA_FALSE;
+    pC->InputFileProperties.bVideoIsCompatibleWithMasterClip = M4OSA_FALSE;
+    pC->InputFileProperties.bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+    /**
+    * Video stream properties */
+    if( M4OSA_NULL != pC->pReaderVideoStream )
+    {
+        switch( pC->pReaderVideoStream->m_basicProperties.m_streamType )
+        {
+            case M4DA_StreamTypeVideoMpeg4:
+                pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kMPEG4;
+                break;
+
+            case M4DA_StreamTypeVideoH263:
+                pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kH263;
+                break;
+
+            case M4DA_StreamTypeVideoMpeg4Avc:
+                pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kH264;
+                break;
+
+            case M4DA_StreamTypeUnknown:
+            default:
+                pC->InputFileProperties.VideoStreamType =
+                    M4VIDEOEDITING_kUnsupportedVideo;
+                break;
+        }
+
+        /* if bitrate not available retrieve an estimation of the overall bitrate */
+        pC->InputFileProperties.uiVideoBitrate =
+            pC->pReaderVideoStream->m_basicProperties.m_averageBitRate;
+
+        if( 0 == pC->InputFileProperties.uiVideoBitrate )
+        {
+            pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
+                M4READER_kOptionID_Bitrate, &videoBitrate);
+
+            if( M4OSA_NULL != pC->pReaderAudioStream )
+            {
+                /* we get the overall bitrate, substract the audio bitrate if any */
+                videoBitrate -=
+                    pC->pReaderAudioStream->m_basicProperties.m_averageBitRate;
+            }
+            pC->InputFileProperties.uiVideoBitrate = videoBitrate;
+        }
+
+        /**
+        * Retrieve the Profile & Level */
+        if( ( M4VIDEOEDITING_kH263 != pC->InputFileProperties.VideoStreamType)
+            && (M4VIDEOEDITING_kH264
+            != pC->InputFileProperties.VideoStreamType) )
+        {
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+            /* Use the DSI parsing function from the external video shell decoder.
+            See the comments in M4VSS3GPP_ClipAnalysis.c, it's pretty much the
+            same issue. */
+
+            err = M4DECODER_EXTERNAL_ParseVideoDSI(pC->pReaderVideoStream->
+                m_basicProperties.m_pDecoderSpecificInfo,
+                pC->pReaderVideoStream->
+                m_basicProperties.m_decoderSpecificInfoSize,
+                &DecConfInfo, &videoSize);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intGetInputClipProperties():\
+                     M4DECODER_EXTERNAL_ParseVideoDSI returns 0x%08X",
+                    err);
+                return err;
+            }
+
+            pC->pReaderVideoStream->m_videoWidth = videoSize.m_uiWidth;
+            pC->pReaderVideoStream->m_videoHeight = videoSize.m_uiHeight;
+
+#else
+            /*FB 2009-02-09: add a check on the video decoder context to
+            avoid crash when the MCS is used only with audio codecs compilated*/
+
+            if( pC->m_pVideoDecoder != M4OSA_NULL )
+            {
+                if( M4OSA_NULL == pC->pViDecCtxt )
+                {
+                    err = pC->m_pVideoDecoder->m_pFctCreate(&pC->pViDecCtxt,
+                        &pC->pReaderVideoStream->m_basicProperties,
+                        pC->m_pReaderDataIt, &pC->ReaderVideoAU,
+                        M4OSA_NULL);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4MCS_intGetInputClipProperties:\
+                             m_pVideoDecoder->m_pFctCreate returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+
+                err = pC->m_pVideoDecoder->m_pFctGetOption(pC->pViDecCtxt,
+                    M4DECODER_MPEG4_kOptionID_DecoderConfigInfo,
+                    &DecConfInfo);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4MCS_intGetInputClipProperties:\
+                         m_pVideoDecoder->m_pFctGetOption returns 0x%x!",
+                        err);
+                    return err;
+                }
+            }
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+            pC->InputFileProperties.uiVideoProfile = DecConfInfo.uiProfile;
+            pC->InputFileProperties.uiVideoTimeScale = DecConfInfo.uiTimeScale;
+            pC->InputFileProperties.bMPEG4dataPartition =
+                DecConfInfo.bDataPartition;
+            pC->InputFileProperties.bMPEG4rvlc = DecConfInfo.bUseOfRVLC;
+            pC->InputFileProperties.bMPEG4resynchMarker =
+                DecConfInfo.uiUseOfResynchMarker;
+
+            /* Supported enum value for profile and level */
+            switch( pC->InputFileProperties.uiVideoProfile )
+            {
+                case 0x08:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kMPEG4_SP_Level_0;
+                    break;
+
+                case 0x09:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kMPEG4_SP_Level_0b;
+                    break;
+
+                case 0x01:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kMPEG4_SP_Level_1;
+                    break;
+
+                case 0x02:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kMPEG4_SP_Level_2;
+                    break;
+
+                case 0x03:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kMPEG4_SP_Level_3;
+                    break;
+
+                case 0x04:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kMPEG4_SP_Level_4a;
+                    break;
+
+                case 0x05:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kMPEG4_SP_Level_5;
+                    break;
+            }
+        }
+        else if( M4VIDEOEDITING_kH263
+            == pC->InputFileProperties.VideoStreamType )
+        {
+            err = pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
+                M4READER_3GP_kOptionID_H263Properties, &H263prop);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intGetInputClipProperties: m_pReader->m_pFctGetOption returns 0x%x!",
+                    err);
+                return err;
+            }
+
+            pC->InputFileProperties.uiH263level = H263prop.uiLevel;
+            pC->InputFileProperties.uiVideoProfile = H263prop.uiProfile;
+
+            /* Supported enum value for profile and level */
+            if( pC->InputFileProperties.uiVideoProfile == 0 )
+            {
+                switch( pC->InputFileProperties.uiH263level )
+                {
+                    case 10:
+                        pC->InputFileProperties.ProfileAndLevel =
+                            M4VIDEOEDITING_kH263_Profile_0_Level_10;
+                        break;
+
+                    case 20:
+                        pC->InputFileProperties.ProfileAndLevel =
+                            M4VIDEOEDITING_kH263_Profile_0_Level_20;
+                        break;
+
+                    case 30:
+                        pC->InputFileProperties.ProfileAndLevel =
+                            M4VIDEOEDITING_kH263_Profile_0_Level_30;
+                        break;
+
+                    case 40:
+                        pC->InputFileProperties.ProfileAndLevel =
+                            M4VIDEOEDITING_kH263_Profile_0_Level_40;
+                        break;
+
+                    case 45:
+                        pC->InputFileProperties.ProfileAndLevel =
+                            M4VIDEOEDITING_kH263_Profile_0_Level_45;
+                        break;
+                }
+            }
+
+            /* For h263 set default timescale : 30000:1001 */
+            pC->InputFileProperties.uiVideoTimeScale = 30000;
+        }
+        else if( M4VIDEOEDITING_kH264
+            == pC->InputFileProperties.VideoStreamType )
+        {
+            AVCProfle = M4DECODER_AVC_kProfile_and_Level_Out_Of_Range;
+            pC->InputFileProperties.uiVideoTimeScale = 30000;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+            err = M4DECODER_EXTERNAL_ParseAVCDSI(pC->pReaderVideoStream->
+                m_basicProperties.m_pDecoderSpecificInfo,
+                pC->pReaderVideoStream->
+                m_basicProperties.m_decoderSpecificInfoSize,
+                &AVCProfle);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intGetInputClipProperties():\
+                     M4DECODER_EXTERNAL_ParseAVCDSI returns 0x%08X",
+                    err);
+                return err;
+            }
+
+#else
+
+            if( pC->m_pVideoDecoder != M4OSA_NULL )
+            {
+                if( M4OSA_NULL == pC->pViDecCtxt )
+                {
+                    err = pC->m_pVideoDecoder->m_pFctCreate(&pC->pViDecCtxt,
+                        &pC->pReaderVideoStream->m_basicProperties,
+                        pC->m_pReaderDataIt, &pC->ReaderVideoAU,
+                        M4OSA_NULL);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4MCS_intGetInputClipProperties:\
+                             m_pVideoDecoder->m_pFctCreate returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+                err = pC->m_pVideoDecoder->m_pFctGetOption(pC->pViDecCtxt,
+                    M4DECODER_kOptionID_AVCProfileAndLevel, &AVCProfle);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4MCS_intGetInputClipProperties:\
+                         m_pVideoDecoder->m_pFctGetOption returns 0x%x!",
+                        err);
+                    return err;
+                }
+            }
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+            switch( AVCProfle )
+            {
+                case M4DECODER_AVC_kProfile_0_Level_1:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_1;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_1b:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_1b;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_1_1:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_1_1;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_1_2:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_1_2;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_1_3:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_1_3;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_2:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_2;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_2_1:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_2_1;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_2_2:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_2_2;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_3:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_3;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_3_1:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_3_1;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_3_2:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_3_2;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_4:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_4;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_4_1:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_4_1;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_4_2:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_4_2;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_5:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_5;
+                    break;
+
+                case M4DECODER_AVC_kProfile_0_Level_5_1:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kH264_Profile_0_Level_5_1;
+                    break;
+
+                case M4DECODER_AVC_kProfile_and_Level_Out_Of_Range:
+                default:
+                    pC->InputFileProperties.ProfileAndLevel =
+                        M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+            }
+        }
+
+        /* Here because width x height is correct only after dsi parsing
+        (done in create decoder) */
+        pC->InputFileProperties.uiVideoHeight =
+            pC->pReaderVideoStream->m_videoHeight;
+        pC->InputFileProperties.uiVideoWidth =
+            pC->pReaderVideoStream->m_videoWidth;
+        pC->InputFileProperties.uiClipVideoDuration =
+            (M4OSA_UInt32)pC->pReaderVideoStream->m_basicProperties.m_duration;
+        pC->InputFileProperties.fAverageFrameRate =
+            pC->pReaderVideoStream->m_averageFrameRate;
+        pC->InputFileProperties.uiVideoMaxAuSize =
+            pC->pReaderVideoStream->m_basicProperties.m_maxAUSize;
+    }
+    else
+    {
+        if( M4OSA_TRUE == pC->bUnsupportedVideoFound )
+        {
+            pC->InputFileProperties.VideoStreamType =
+                M4VIDEOEDITING_kUnsupportedVideo;
+        }
+        else
+        {
+            pC->InputFileProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+        }
+    }
+
+    /**
+    * Audio stream properties */
+    if( M4OSA_NULL != pC->pReaderAudioStream )
+    {
+        switch( pC->pReaderAudioStream->m_basicProperties.m_streamType )
+        {
+            case M4DA_StreamTypeAudioAmrNarrowBand:
+                pC->InputFileProperties.AudioStreamType =
+                    M4VIDEOEDITING_kAMR_NB;
+                break;
+
+            case M4DA_StreamTypeAudioAac:
+                pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kAAC;
+                break;
+
+            case M4DA_StreamTypeAudioMp3:
+                pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kMP3;
+                break;
+
+            case M4DA_StreamTypeAudioEvrc:
+                pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kEVRC;
+                break;
+
+            case M4DA_StreamTypeUnknown:
+            default:
+                pC->InputFileProperties.AudioStreamType =
+                    M4VIDEOEDITING_kUnsupportedAudio;
+                break;
+        }
+
+        if( ( M4OSA_NULL != pC->m_pAudioDecoder)
+            && (M4OSA_NULL == pC->pAudioDecCtxt) )
+        {
+            M4OSA_TRACE3_1(
+                "M4MCS_intGetInputClipProperties: calling CreateAudioDecoder, userData= 0x%x",
+                pC->m_pCurrentAudioDecoderUserData);
+            /* Trick, I use pUserData to retrieve aac properties, waiting for some
+             better implementation... */
+            if( M4DA_StreamTypeAudioAac
+                == pC->pReaderAudioStream->m_basicProperties.m_streamType )
+            {
+                if( M4OSA_FALSE == pC->bExtOMXAudDecoder )
+                    err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
+                    &pC->pAudioDecCtxt,
+                    pC->pReaderAudioStream, &(pC->AacProperties));
+                else
+                {
+                    err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
+                        &pC->pAudioDecCtxt, pC->pReaderAudioStream,
+                        pC->m_pCurrentAudioDecoderUserData);
+
+                    if( M4NO_ERROR == err )
+                    {
+                        /* AAC properties*/
+                        //get from Reader; temporary, till Audio decoder shell API available to
+                        //get the AAC properties
+                        pC->AacProperties.aNumChan =
+                            pC->pReaderAudioStream->m_nbChannels;
+                        pC->AacProperties.aSampFreq =
+                            pC->pReaderAudioStream->m_samplingFrequency;
+
+                        err = pC->m_pAudioDecoder->m_pFctGetOptionAudioDec(
+                            pC->pAudioDecCtxt, M4AD_kOptionID_StreamType,
+                            (M4OSA_DataOption) &iAacType);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4MCS_intGetInputClipProperties:\
+                                 m_pAudioDecoder->m_pFctGetOptionAudioDec returns err 0x%x",
+                                err);
+                            iAacType = M4_kAAC; //set to default
+                            err = M4NO_ERROR;
+                        }
+                        else
+                        {
+                            M4OSA_TRACE3_1(
+                                "M4MCS_intGetInputClipProperties:\
+                                 m_pAudioDecoder->m_pFctGetOptionAudioDec returns streamType %d",
+                                iAacType);
+                        }
+
+                        switch( iAacType )
+                        {
+                            case M4_kAAC:
+                                pC->AacProperties.aSBRPresent = 0;
+                                pC->AacProperties.aPSPresent = 0;
+                                break;
+
+                            case M4_kAACplus:
+                                pC->AacProperties.aSBRPresent = 1;
+                                pC->AacProperties.aPSPresent = 0;
+                                pC->AacProperties.aExtensionSampFreq =
+                                    pC->pReaderAudioStream->
+                                    m_samplingFrequency; //TODO
+                                break;
+
+                            case M4_keAACplus:
+                                pC->AacProperties.aSBRPresent = 1;
+                                pC->AacProperties.aPSPresent = 1;
+                                pC->AacProperties.aExtensionSampFreq =
+                                    pC->pReaderAudioStream->
+                                    m_samplingFrequency; //TODO
+                                break;
+                              case M4_kUnknown:
+                              break;
+                              default:
+                              break;
+                            }
+                            M4OSA_TRACE3_2(
+                                "M4MCS_intGetInputClipProperties: AAC NBChans=%d, SamplFreq=%d",
+                                pC->AacProperties.aNumChan,
+                                pC->AacProperties.aSampFreq);
+                    }
+                }
+            }
+            else
+                err = pC->m_pAudioDecoder->m_pFctCreateAudioDec(
+                &pC->pAudioDecCtxt, pC->pReaderAudioStream,
+                pC->m_pCurrentAudioDecoderUserData);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_intGetInputClipProperties:\
+                     m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+
+        //EVRC
+        if( pC->pReaderAudioStream->m_basicProperties.m_streamType
+            == M4DA_StreamTypeAudioEvrc )
+        {
+            /* decoder not implemented yet, provide some default values for the null encoding */
+            pC->pReaderAudioStream->m_nbChannels = 1;
+            pC->pReaderAudioStream->m_samplingFrequency = 8000;
+        }
+
+        /**
+        * Bugfix P4ME00001128: With some IMTC files, the AMR bit rate is 0 kbps according
+         the GetProperties function */
+        if( 0 == pC->pReaderAudioStream->m_basicProperties.m_averageBitRate )
+        {
+            if( M4VIDEOEDITING_kAMR_NB
+                == pC->InputFileProperties.AudioStreamType )
+            {
+                /**
+                * Better returning a guessed 12.2 kbps value than a sure-to-be-false
+                0 kbps value! */
+                pC->InputFileProperties.uiAudioBitrate =
+                    M4VIDEOEDITING_k12_2_KBPS;
+            }
+            else if( M4VIDEOEDITING_kEVRC
+                == pC->InputFileProperties.AudioStreamType )
+            {
+                /**
+                * Better returning a guessed 8.5 kbps value than a sure-to-be-false
+                0 kbps value! */
+                pC->InputFileProperties.uiAudioBitrate =
+                    M4VIDEOEDITING_k9_2_KBPS;
+            }
+            else
+            {
+                M4OSA_UInt32 FileBitrate;
+
+                /* Can happen also for aac, in this case we calculate an approximative */
+                /* value from global bitrate and video bitrate */
+                err = pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
+                    M4READER_kOptionID_Bitrate,
+                    (M4OSA_DataOption) &FileBitrate);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4MCS_intGetInputClipProperties: M4READER_kOptionID_Bitrate returns 0x%x",
+                        err);
+                    return err;
+                }
+                pC->InputFileProperties.uiAudioBitrate =
+                    FileBitrate
+                    - pC->
+                    InputFileProperties.
+                    uiVideoBitrate /* normally setted to 0, if no video */;
+            }
+        }
+        else
+        {
+            pC->InputFileProperties.uiAudioBitrate =
+                pC->pReaderAudioStream->m_basicProperties.m_averageBitRate;
+        }
+
+        pC->InputFileProperties.uiNbChannels =
+            pC->pReaderAudioStream->m_nbChannels;
+        pC->InputFileProperties.uiSamplingFrequency =
+            pC->pReaderAudioStream->m_samplingFrequency;
+        pC->InputFileProperties.uiClipAudioDuration =
+            (M4OSA_UInt32)pC->pReaderAudioStream->m_basicProperties.m_duration;
+        pC->InputFileProperties.uiAudioMaxAuSize =
+            pC->pReaderAudioStream->m_basicProperties.m_maxAUSize;
+
+        /* Bug: with aac, value is 0 until decoder start() is called */
+        pC->InputFileProperties.uiDecodedPcmSize =
+            pC->pReaderAudioStream->m_byteFrameLength
+            * pC->pReaderAudioStream->m_byteSampleSize
+            * pC->pReaderAudioStream->m_nbChannels;
+
+        /* New aac properties */
+        if( M4DA_StreamTypeAudioAac
+            == pC->pReaderAudioStream->m_basicProperties.m_streamType )
+        {
+            pC->InputFileProperties.uiNbChannels = pC->AacProperties.aNumChan;
+            pC->InputFileProperties.uiSamplingFrequency =
+                pC->AacProperties.aSampFreq;
+
+            if( pC->AacProperties.aSBRPresent )
+            {
+                pC->InputFileProperties.AudioStreamType =
+                    M4VIDEOEDITING_kAACplus;
+                pC->InputFileProperties.uiExtendedSamplingFrequency =
+                    pC->AacProperties.aExtensionSampFreq;
+            }
+
+            if( pC->AacProperties.aPSPresent )
+            {
+                pC->InputFileProperties.AudioStreamType =
+                    M4VIDEOEDITING_keAACplus;
+            }
+        }
+    }
+    else
+    {
+        if( M4OSA_TRUE == pC->bUnsupportedAudioFound )
+        {
+            pC->InputFileProperties.AudioStreamType =
+                M4VIDEOEDITING_kUnsupportedAudio;
+        }
+        else
+        {
+            pC->InputFileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+        }
+    }
+
+    /* Get 'ftyp' atom */
+    err = pC->m_pReader->m_pFctGetOption(pC->pReaderContext,
+        M4READER_kOptionID_3gpFtypBox, &pC->InputFileProperties.ftyp);
+
+    if( M4NO_ERROR == err )
+    {
+        M4OSA_UInt8 i;
+
+        for ( i = 0; i < pC->InputFileProperties.ftyp.nbCompatibleBrands; i++ )
+            if( M4VIDEOEDITING_BRAND_EMP
+                == pC->InputFileProperties.ftyp.compatible_brands[i] )
+                pC->InputFileProperties.VideoStreamType =
+                M4VIDEOEDITING_kMPEG4_EMP;
+    }
+
+    /* Analysis is successful */
+    if( pC->InputFileProperties.uiClipVideoDuration
+        > pC->InputFileProperties.uiClipAudioDuration )
+        pC->InputFileProperties.uiClipDuration =
+        pC->InputFileProperties.uiClipVideoDuration;
+    else
+        pC->InputFileProperties.uiClipDuration =
+        pC->InputFileProperties.uiClipAudioDuration;
+
+    pC->InputFileProperties.FileType = pC->InputFileType;
+    pC->InputFileProperties.bAnalysed = M4OSA_TRUE;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB(M4OSA_MemAddr8 pAudioFrame)
+ * @brief   Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
+ * @note
+ * @param   pCpAudioFrame   (IN) AMRNB frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_UInt32 M4MCS_intGetFrameSize_AMRNB( M4OSA_MemAddr8 pAudioFrame )
+{
+    M4OSA_UInt32 frameSize = 0;
+    M4OSA_UInt32 frameType = ( ( *pAudioFrame) &(0xF << 3)) >> 3;
+
+    switch( frameType )
+    {
+        case 0:
+            frameSize = 95;
+            break; /*  4750 bps */
+
+        case 1:
+            frameSize = 103;
+            break; /*  5150 bps */
+
+        case 2:
+            frameSize = 118;
+            break; /*  5900 bps */
+
+        case 3:
+            frameSize = 134;
+            break; /*  6700 bps */
+
+        case 4:
+            frameSize = 148;
+            break; /*  7400 bps */
+
+        case 5:
+            frameSize = 159;
+            break; /*  7950 bps */
+
+        case 6:
+            frameSize = 204;
+            break; /* 10200 bps */
+
+        case 7:
+            frameSize = 244;
+            break; /* 12000 bps */
+
+        case 8:
+            frameSize = 39;
+            break; /* SID (Silence) */
+
+        case 15:
+            frameSize = 0;
+            break; /* No data */
+
+        default:
+            M4OSA_TRACE3_0(
+                "M4MCS_intGetFrameSize_AMRNB(): Corrupted AMR frame! returning 0.");
+            return 0;
+    }
+
+    return (1 + (( frameSize + 7) / 8));
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC(M4OSA_MemAddr8 pAudioFrame)
+ * @brief   Return the length, in bytes, of the EVRC frame contained in the given buffer
+ * @note
+ *     0 1 2 3
+ *    +-+-+-+-+
+ *    |fr type|              RFC 3558
+ *    +-+-+-+-+
+ *
+ * Frame Type: 4 bits
+ *    The frame type indicates the type of the corresponding codec data
+ *    frame in the RTP packet.
+ *
+ * For EVRC and SMV codecs, the frame type values and size of the
+ * associated codec data frame are described in the table below:
+ *
+ * Value   Rate      Total codec data frame size (in octets)
+ * ---------------------------------------------------------
+ *   0     Blank      0    (0 bit)
+ *   1     1/8        2    (16 bits)
+ *   2     1/4        5    (40 bits; not valid for EVRC)
+ *   3     1/2       10    (80 bits)
+ *   4     1         22    (171 bits; 5 padded at end with zeros)
+ *   5     Erasure    0    (SHOULD NOT be transmitted by sender)
+ *
+ * @param   pCpAudioFrame   (IN) EVRC frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+ */
+static M4OSA_UInt32 M4MCS_intGetFrameSize_EVRC( M4OSA_MemAddr8 pAudioFrame )
+{
+    M4OSA_UInt32 frameSize = 0;
+    M4OSA_UInt32 frameType = ( *pAudioFrame) &0x0F;
+
+    switch( frameType )
+    {
+        case 0:
+            frameSize = 0;
+            break; /*  blank */
+
+        case 1:
+            frameSize = 16;
+            break; /*  1/8 */
+
+        case 2:
+            frameSize = 40;
+            break; /*  1/4 */
+
+        case 3:
+            frameSize = 80;
+            break; /*  1/2 */
+
+        case 4:
+            frameSize = 171;
+            break; /*  1 */
+
+        case 5:
+            frameSize = 0;
+            break; /*  erasure */
+
+        default:
+            M4OSA_TRACE3_0(
+                "M4MCS_intGetFrameSize_EVRC(): Corrupted EVRC frame! returning 0.");
+            return 0;
+    }
+
+    return (1 + (( frameSize + 7) / 8));
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCheckMaxFileSize(M4MCS_Context pContext)
+ * @brief    Check if max file size is greater enough to encode a file with the
+ *           current selected bitrates and duration.
+ * @param    pContext            (IN) MCS context
+ * @return   M4NO_ERROR
+ * @return   M4MCS_ERR_MAXFILESIZE_TOO_SMALL
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intCheckMaxFileSize( M4MCS_Context pContext )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+
+    M4OSA_UInt32 duration;
+    M4OSA_UInt32 audiobitrate;
+    M4OSA_UInt32 videobitrate;
+
+    /* free file size : OK */
+    if( pC->uiMaxFileSize == 0 )
+        return M4NO_ERROR;
+
+    /* duration */
+    if( pC->uiEndCutTime == 0 )
+    {
+        duration = pC->InputFileProperties.uiClipDuration - pC->uiBeginCutTime;
+    }
+    else
+    {
+        duration = pC->uiEndCutTime - pC->uiBeginCutTime;
+    }
+
+    /* audio bitrate */
+    if( pC->noaudio )
+    {
+        audiobitrate = 0;
+    }
+    else if( pC->AudioEncParams.Format == M4ENCODER_kAudioNULL )
+    {
+        audiobitrate = pC->InputFileProperties.uiAudioBitrate;
+    }
+    else if( pC->uiAudioBitrate == M4VIDEOEDITING_kUndefinedBitrate )
+    {
+        switch( pC->AudioEncParams.Format )
+        {
+            case M4ENCODER_kAMRNB:
+                audiobitrate = M4VIDEOEDITING_k12_2_KBPS;
+                break;
+                //EVRC
+                //            case M4ENCODER_kEVRC:
+                //                audiobitrate = M4VIDEOEDITING_k9_2_KBPS;
+                //                break;
+
+            default: /* AAC and MP3*/
+                audiobitrate =
+                    (pC->AudioEncParams.ChannelNum == M4ENCODER_kMono)
+                    ? M4VIDEOEDITING_k16_KBPS : M4VIDEOEDITING_k32_KBPS;
+                break;
+        }
+    }
+    else
+    {
+        audiobitrate = pC->uiAudioBitrate;
+    }
+
+    /* video bitrate */
+    if( pC->novideo )
+    {
+        videobitrate = 0;
+    }
+    else if( pC->EncodingVideoFormat == M4ENCODER_kNULL )
+    {
+        videobitrate = pC->InputFileProperties.uiVideoBitrate;
+    }
+    else if( pC->uiVideoBitrate == M4VIDEOEDITING_kUndefinedBitrate )
+    {
+        videobitrate = M4VIDEOEDITING_k16_KBPS;
+    }
+    else
+    {
+        videobitrate = pC->uiVideoBitrate;
+    }
+
+    /* max file size */
+    if( (M4OSA_UInt32)pC->uiMaxFileSize
+        < (M4OSA_UInt32)(M4MCS_MOOV_OVER_FILESIZE_RATIO
+        * (audiobitrate + videobitrate) * (duration / 8000.0)) )
+        return M4MCS_ERR_MAXFILESIZE_TOO_SMALL;
+    else
+        return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4VIDEOEDITING_Bitrate M4MCS_intGetNearestBitrate(M4OSA_UInt32 freebitrate, M4OSA_Int8 mode)
+ * @brief    Returns the closest bitrate value from the enum list of type M4VIDEOEDITING_Bitrate
+ * @param    freebitrate: unsigned int value
+ * @param    mode: -1:previous,0:current,1:next
+ * @return   bitrate value in enum list M4VIDEOEDITING_Bitrate
+ ******************************************************************************
+ */
+static M4VIDEOEDITING_Bitrate
+M4MCS_intGetNearestBitrate( M4OSA_Int32 freebitrate, M4OSA_Int8 mode )
+{
+    M4OSA_Int32 bitarray [] =
+    {
+        0, M4VIDEOEDITING_k16_KBPS, M4VIDEOEDITING_k24_KBPS,
+        M4VIDEOEDITING_k32_KBPS, M4VIDEOEDITING_k48_KBPS,
+        M4VIDEOEDITING_k64_KBPS, M4VIDEOEDITING_k96_KBPS,
+        M4VIDEOEDITING_k128_KBPS, M4VIDEOEDITING_k192_KBPS,
+        M4VIDEOEDITING_k256_KBPS, M4VIDEOEDITING_k288_KBPS,
+        M4VIDEOEDITING_k384_KBPS, M4VIDEOEDITING_k512_KBPS,
+        M4VIDEOEDITING_k800_KBPS, M4VIDEOEDITING_k2_MBPS,
+        M4VIDEOEDITING_k5_MBPS,
+        M4VIDEOEDITING_k8_MBPS, /*+ New Encoder bitrates */
+        M4OSA_INT32_MAX
+    };
+
+    const M4OSA_UInt32 nbbitrates = 14;
+    M4OSA_UInt32 i;
+
+    for ( i = 0; freebitrate >= bitarray[i]; i++ );
+
+    switch( mode )
+    {
+        case -1: /* previous */
+            if( i <= 2 )
+                return 0;
+            else
+                return bitarray[i - 2];
+            break;
+
+        case 0: /* current */
+            if( i <= 1 )
+                return 0;
+            else
+                return bitarray[i - 1];
+            break;
+
+        case 1: /* next */
+            if( i >= nbbitrates )
+                return M4OSA_INT32_MAX;
+            else
+                return bitarray[i];
+            break;
+    }
+
+    return 0;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders(M4MCS_InternalContext* pC);
+ * @brief    Free all resources allocated by M4MCS_open()
+ * @param    pContext            (IN) MCS context
+ * @return   M4NO_ERROR:         No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4MCS_intCleanUp_ReadersDecoders( M4MCS_InternalContext *pC )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_1("M4MCS_intCleanUp_ReadersDecoders called with pC=0x%x", pC);
+
+    /* ----- Free reader stuff, if needed ----- */
+
+    if( M4OSA_NULL != pC->
+        pReaderContext ) /**< may be M4OSA_NULL if M4MCS_open was not called */
+    {
+        err = pC->m_pReader->m_pFctClose(pC->pReaderContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1("M4MCS_cleanUp: m_pReader->m_pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC->m_pReader->m_pFctDestroy(pC->pReaderContext);
+        pC->pReaderContext = M4OSA_NULL;
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_cleanUp: m_pReader->m_pFctDestroy returns 0x%x", err);
+            /**< don't return, we still have stuff to free */
+        }
+    }
+
+    if( pC->m_pDataAddress1 != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pDataAddress1);
+        pC->m_pDataAddress1 = M4OSA_NULL;
+    }
+
+    if( pC->m_pDataAddress2 != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pDataAddress2);
+        pC->m_pDataAddress2 = M4OSA_NULL;
+    }
+    /*Bug fix 11/12/2008 (to obtain more precise video end cut)*/
+    if( pC->m_pDataVideoAddress1 != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pDataVideoAddress1);
+        pC->m_pDataVideoAddress1 = M4OSA_NULL;
+    }
+
+    if( pC->m_pDataVideoAddress2 != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pDataVideoAddress2);
+        pC->m_pDataVideoAddress2 = M4OSA_NULL;
+    }
+    /**/
+    /* ----- Free video decoder stuff, if needed ----- */
+
+    if( M4OSA_NULL != pC->pViDecCtxt )
+    {
+        err = pC->m_pVideoDecoder->m_pFctDestroy(pC->pViDecCtxt);
+        pC->pViDecCtxt = M4OSA_NULL;
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_cleanUp: m_pVideoDecoder->pFctDestroy returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+    }
+
+    /* ----- Free the audio decoder stuff ----- */
+
+    if( M4OSA_NULL != pC->pAudioDecCtxt )
+    {
+        err = pC->m_pAudioDecoder->m_pFctDestroyAudioDec(pC->pAudioDecCtxt);
+        pC->pAudioDecCtxt = M4OSA_NULL;
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4MCS_cleanUp: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+    }
+
+    if( M4OSA_NULL != pC->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->AudioDecBufferOut.m_dataAddress);
+        pC->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+ *                             M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+ * @brief   Set the MCS input and output files. It is the same as M4MCS_open without
+ *                                M4MCS_WITH_FAST_OPEN flag
+It is used in VideoArtist
+ * @note    It opens the input file, but the output file is not created yet.
+ * @param   pContext            (IN) MCS context
+ * @param   pFileIn             (IN) Input file to transcode (The type of this parameter
+ *                                    (URL, pipe...) depends on the OSAL implementation).
+ * @param   mediaType           (IN) Container type (.3gp,.amr, ...) of input file.
+ * @param   pFileOut            (IN) Output file to create  (The type of this parameter
+ *                                (URL, pipe...) depends on the OSAL implementation).
+ * @param   pTempFile           (IN) Temporary file for the constant memory writer to store
+ *                                 metadata ("moov.bin").
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ * @return  M4ERR_FILE_NOT_FOUND:   The input file has not been found
+ * @return  M4MCS_ERR_INVALID_INPUT_FILE:   The input file is not a valid file, or is corrupted
+ * @return  M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM:  The input file contains no
+ *                                                         supported audio or video stream
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+                                 M4VIDEOEDITING_FileType InputFileType,
+                                  M4OSA_Void* pFileOut, M4OSA_Void* pTempFile)
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext*)(pContext);
+    M4OSA_ERR err;
+
+    M4READER_MediaFamily mediaFamily;
+    M4_StreamHandler* pStreamHandler;
+
+    M4OSA_TRACE2_3("M4MCS_open_normalMode called with pContext=0x%x, pFileIn=0x%x,\
+     pFileOut=0x%x", pContext, pFileIn, pFileOut);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+     "M4MCS_open_normalMode: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileIn) , M4ERR_PARAMETER,
+     "M4MCS_open_normalMode: pFileIn is M4OSA_NULL");
+
+    if ((InputFileType == M4VIDEOEDITING_kFileType_JPG)
+        ||(InputFileType == M4VIDEOEDITING_kFileType_PNG)
+        ||(InputFileType == M4VIDEOEDITING_kFileType_GIF)
+        ||(InputFileType == M4VIDEOEDITING_kFileType_BMP))
+    {
+        M4OSA_TRACE1_0("M4MCS_open_normalMode: Still picture is not\
+             supported with this function");
+        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+    }
+
+    /**
+    * Check state automaton */
+    if (M4MCS_kState_CREATED != pC->State)
+    {
+        M4OSA_TRACE1_1("M4MCS_open_normalMode(): Wrong State (%d), returning M4ERR_STATE",
+             pC->State);
+        return M4ERR_STATE;
+    }
+
+    /* Copy function input parameters into our context */
+    pC->pInputFile     = pFileIn;
+    pC->InputFileType  = InputFileType;
+    pC->pOutputFile    = pFileOut;
+    pC->pTemporaryFile = pTempFile;
+
+    /***********************************/
+    /* Open input file with the reader */
+    /***********************************/
+
+    err = M4MCS_setCurrentReader(pContext, pC->InputFileType);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Reset reader related variables */
+    pC->VideoState          = M4MCS_kStreamState_NOSTREAM;
+    pC->AudioState          = M4MCS_kStreamState_NOSTREAM;
+    pC->pReaderVideoStream  = M4OSA_NULL;
+    pC->pReaderAudioStream  = M4OSA_NULL;
+
+    /*******************************************************/
+    /* Initializes the reader shell and open the data file */
+    /*******************************************************/
+    err = pC->m_pReader->m_pFctCreate(&pC->pReaderContext);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctCreate returns 0x%x", err);
+        return err;
+    }
+
+    /**
+    * Link the reader interface to the reader context */
+    pC->m_pReaderDataIt->m_readerContext = pC->pReaderContext;
+
+    /**
+    * Set the reader shell file access functions */
+    err = pC->m_pReader->m_pFctSetOption(pC->pReaderContext,
+         M4READER_kOptionID_SetOsaFileReaderFctsPtr,
+        (M4OSA_DataOption)pC->pOsaFileReadPtr);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctSetOption returns 0x%x", err);
+        return err;
+    }
+
+    /**
+    * Open the input file */
+    err = pC->m_pReader->m_pFctOpen(pC->pReaderContext, pC->pInputFile);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_UInt32 uiDummy, uiCoreId;
+        M4OSA_TRACE1_1("M4MCS_open_normalMode(): m_pReader->m_pFctOpen returns 0x%x", err);
+
+        /**
+        * If the error is from the core reader, we change it to a public VXS error */
+        M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
+        if (M4MP4_READER == uiCoreId)
+        {
+            M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning M4MCS_ERR_INVALID_INPUT_FILE");
+            return M4MCS_ERR_INVALID_INPUT_FILE;
+        }
+        return err;
+    }
+
+    /**
+    * Get the streams from the input file */
+    while (M4NO_ERROR == err)
+    {
+        err = pC->m_pReader->m_pFctGetNextStream(pC->pReaderContext, &mediaFamily,
+            &pStreamHandler);
+
+        /**
+        * In case we found a BIFS stream or something else...*/
+        if((err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
+            || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)))
+        {
+            err = M4NO_ERROR;
+            continue;
+        }
+
+        if (M4NO_ERROR == err) /**< One stream found */
+        {
+            /**
+            * Found the first video stream */
+            if ((M4READER_kMediaFamilyVideo == mediaFamily) \
+                && (M4OSA_NULL == pC->pReaderVideoStream))
+            {
+                if ((M4DA_StreamTypeVideoH263==pStreamHandler->m_streamType) ||
+                    (M4DA_StreamTypeVideoMpeg4==pStreamHandler->m_streamType)
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+                    ||(M4DA_StreamTypeVideoMpeg4Avc==pStreamHandler->m_streamType))
+#else
+                    ||((M4DA_StreamTypeVideoMpeg4Avc==pStreamHandler->m_streamType)
+                    &&(pC->m_pVideoDecoderItTable[M4DECODER_kVideoTypeAVC] != M4OSA_NULL)))
+#endif
+                {
+                    M4OSA_TRACE3_0("M4MCS_open_normalMode():\
+                     Found a H263 or MPEG-4 video stream in input 3gpp clip");
+
+                    /**
+                    * Keep pointer to the video stream */
+                    pC->pReaderVideoStream = (M4_VideoStreamHandler*)pStreamHandler;
+                    pC->bUnsupportedVideoFound = M4OSA_FALSE;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                    * Init our video stream state variable */
+                    pC->VideoState = M4MCS_kStreamState_STARTED;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+                         (M4_StreamHandler*)pC->pReaderVideoStream);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+                             m_pReader->m_pFctReset(video) returns 0x%x", err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext, pStreamHandler,
+                         &pC->ReaderVideoAU);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+                             m_pReader->m_pFctFillAuStruct(video) returns 0x%x", err);
+                        return err;
+                    }
+                }
+                else /**< Not H263 or MPEG-4 (H264, etc.) */
+                {
+                    M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+                         Found an unsupported video stream (0x%x) in input 3gpp clip",
+                             pStreamHandler->m_streamType);
+
+                    pC->bUnsupportedVideoFound = M4OSA_TRUE;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+            /**
+            * Found the first audio stream */
+            else if ((M4READER_kMediaFamilyAudio == mediaFamily)
+                && (M4OSA_NULL == pC->pReaderAudioStream))
+            {
+                if ((M4DA_StreamTypeAudioAmrNarrowBand==pStreamHandler->m_streamType) ||
+                    (M4DA_StreamTypeAudioAac==pStreamHandler->m_streamType) ||
+                    (M4DA_StreamTypeAudioMp3==pStreamHandler->m_streamType) ||
+                    (M4DA_StreamTypeAudioEvrc==pStreamHandler->m_streamType) )
+                {
+                    M4OSA_TRACE3_0("M4MCS_open_normalMode(): Found an AMR-NB, AAC \
+                        or MP3 audio stream in input clip");
+
+                    /**
+                    * Keep pointer to the audio stream */
+                    pC->pReaderAudioStream = (M4_AudioStreamHandler*)pStreamHandler;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+                    pC->bUnsupportedAudioFound = M4OSA_FALSE;
+
+                    /**
+                    * Init our audio stream state variable */
+                    pC->AudioState = M4MCS_kStreamState_STARTED;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pC->m_pReader->m_pFctReset(pC->pReaderContext,
+                         (M4_StreamHandler*)pC->pReaderAudioStream);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+                             m_pReader->m_pFctReset(audio) returns 0x%x", err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pC->m_pReader->m_pFctFillAuStruct(pC->pReaderContext, pStreamHandler,
+                         &pC->ReaderAudioAU);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4MCS_open_normalMode(): \
+                            m_pReader->m_pFctFillAuStruct(audio) returns 0x%x", err);
+                        return err;
+                    }
+
+                    /**
+                    * Output max AU size is equal to input max AU size (this value
+                    * will be changed if there is audio transcoding) */
+                    pC->uiAudioMaxAuSize = pStreamHandler->m_maxAUSize;
+
+                }
+                else
+                {
+                    /**< Not AMR-NB, AAC, MP3 nor EVRC (AMR-WB, WAV...) */
+                    M4OSA_TRACE1_1("M4MCS_open_normalMode(): Found an unsupported audio stream\
+                         (0x%x) in input 3gpp clip", pStreamHandler->m_streamType);
+
+                    pC->bUnsupportedAudioFound = M4OSA_TRUE;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+        }
+    } /**< end of while (M4NO_ERROR == err) */
+
+    /**
+    * Check we found at least one supported stream */
+    if((M4OSA_NULL == pC->pReaderVideoStream) && (M4OSA_NULL == pC->pReaderAudioStream))
+    {
+        M4OSA_TRACE1_0("M4MCS_open_normalMode(): returning \
+            M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM");
+        return M4MCS_ERR_INPUT_FILE_CONTAINS_NO_SUPPORTED_STREAM;
+    }
+
+#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
+    if(pC->VideoState == M4MCS_kStreamState_STARTED)
+    {
+        err = M4MCS_setCurrentVideoDecoder(pContext,
+            pC->pReaderVideoStream->m_basicProperties.m_streamType);
+        M4ERR_CHECK_RETURN(err);
+    }
+#endif
+
+    if(pC->AudioState == M4MCS_kStreamState_STARTED)
+    {
+        //EVRC
+        if(M4DA_StreamTypeAudioEvrc != pStreamHandler->m_streamType)
+         /* decoder not supported yet, but allow to do null encoding */
+        {
+            err = M4MCS_setCurrentAudioDecoder(pContext,
+                 pC->pReaderAudioStream->m_basicProperties.m_streamType);
+            M4ERR_CHECK_RETURN(err);
+        }
+    }
+
+    /**
+    * Get the audio and video stream properties */
+    err = M4MCS_intGetInputClipProperties(pC);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4MCS_open_normalMode():\
+             M4MCS_intGetInputClipProperties returns 0x%x", err);
+        return err;
+    }
+
+    /**
+    * Set the begin cut decoding increment according to the input frame rate */
+    if (0. != pC->InputFileProperties.fAverageFrameRate) /**< sanity check */
+    {
+        pC->iVideoBeginDecIncr = (M4OSA_Int32)(3000. \
+            / pC->InputFileProperties.fAverageFrameRate); /**< about 3 frames */
+    }
+    else
+    {
+        pC->iVideoBeginDecIncr = 200; /**< default value: 200 milliseconds (3 frames @ 15fps)*/
+    }
+
+    /**
+    * Update state automaton */
+    pC->State = M4MCS_kState_OPENED;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4MCS_open_normalMode(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+
+M4OSA_ERR M4MCS_registerExternalVideoDecoder( M4MCS_Context pContext,
+                                             M4VD_VideoType decoderType,
+                                             M4VD_Interface *pDecoderInterface,
+                                             M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4DECODER_VideoInterface *shellInterface;
+    M4DECODER_VideoType nativeType;
+    M4DECODER_EXTERNAL_UserDataType shellUserData;
+
+    switch( decoderType )
+    {
+        case M4VD_kMpeg4VideoDec:
+        case M4VD_kH263VideoDec:
+            nativeType = M4DECODER_kVideoTypeMPEG4;
+            break;
+
+        case M4VD_kH264VideoDec:
+            nativeType = M4DECODER_kVideoTypeAVC;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4MCS_registerExternalVideoDecoder: unknown decoderType %d",
+                decoderType);
+            return M4ERR_PARAMETER;
+            break;
+    }
+
+    shellUserData =
+        (M4DECODER_EXTERNAL_UserDataType)M4OSA_malloc(sizeof(*shellUserData),
+        M4MCS,
+        (M4OSA_Char *)"userData structure for the external shell decoder");
+
+    if( M4OSA_NULL == shellUserData )
+    {
+        M4OSA_TRACE1_0(
+            "M4MCS_registerExternalVideoDecoder:\
+                 failed to allocate userData structure for the external shell decoder");
+        return M4ERR_ALLOC;
+    }
+
+    shellUserData->externalFuncs = pDecoderInterface;
+    shellUserData->externalUserData = pUserData;
+
+    err = M4DECODER_EXTERNAL_getInterface(&shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalVideoDecoder:\
+                 M4DECODER_EXTERNAL_getInterface failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellUserData);
+        return err;
+    }
+
+    err = M4MCS_registerVideoDecoder(pContext, nativeType, shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalVideoDecoder:\
+                 M4MCS_registerVideoDecoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellInterface);
+        M4OSA_free((M4OSA_MemAddr32)shellUserData);
+        return err;
+    }
+
+    ( (M4MCS_InternalContext
+        *)pContext)->m_pVideoDecoderUserDataTable[nativeType] = shellUserData;
+
+    return M4NO_ERROR;
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+}
+
+M4OSA_ERR M4MCS_registerExternalVideoEncoder( M4MCS_Context pContext,
+                                             M4VE_EncoderType encoderType,
+                                             M4VE_Interface *pEncoderInterface,
+                                             M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_ENCODERS
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4ENCODER_GlobalInterface *shellInterface;
+    M4ENCODER_Format nativeType;
+
+    switch( encoderType )
+    {
+        case M4VE_kH263VideoEnc:
+            err = M4EGE_H263_getInterfaces(&nativeType, &shellInterface,
+                M4ENCODER_OPEN_ADVANCED);
+
+            break;
+
+        case M4VE_kMpeg4VideoEnc:
+            err = M4EGE_MPEG4_getInterfaces(&nativeType, &shellInterface,
+                M4ENCODER_OPEN_ADVANCED);
+            break;
+
+        case M4VE_kH264VideoEnc:
+            M4OSA_TRACE1_0(
+                "M4MCS_registerExternalVideoEncoder: H264 encoder type not implemented yet");
+            return M4ERR_NOT_IMPLEMENTED;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4MCS_registerExternalVideoEncoder: unknown encoderType %d",
+                encoderType);
+            return M4ERR_PARAMETER;
+            break;
+    }
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalVideoDecoder: M4EGE_getInterface failed with error 0x%08X",
+            err);
+        return err;
+    }
+
+    err = M4MCS_registerVideoEncoder(pContext, nativeType, shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalVideoEncoder:\
+                 M4MCS_registerVideoEncoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellInterface);
+        return err;
+    }
+
+    ( (M4MCS_InternalContext
+        *)pContext)->pVideoEncoderExternalAPITable[nativeType]
+    = pEncoderInterface;
+    ( (M4MCS_InternalContext
+        *)pContext)->pVideoEncoderUserDataTable[nativeType] = pUserData;
+
+    return M4NO_ERROR;
+
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_registerExternalAudioDecoder(M4MCS_Context pContext,
+ *                                    M4AD_Type decoderType,
+ *                                    M4AD_Interface *pDecoderInterface);
+ * @brief    This function will register a specific external audio decoder.
+ * @note    According to the decoderType, this function will store in the internal context the
+ *                decoder interface.
+ * @param    context                (IN/OUT) MCS context.
+ * @param    decoderType            (IN) Audio decoder type
+ * @param    pDecoderInterface    (IN) Audio decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null, or the decoder type is invalid
+ *                                (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_registerExternalAudioDecoder( M4MCS_Context pContext,
+                                             M4AD_Type decoderType,
+                                             M4AD_Interface *pDecoderInterface )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)pContext;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "M4MCS_registerExternalAudioDecoder: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+        "M4MCS_registerExternalAudioDecoder: invalid pointer on decoder interface");
+
+    if( M4MCS_kState_CREATED != pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalAudioDecoder(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    if( decoderType >= M4AD_kType_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "M4MCS_registerExternalAudioDecoder: Invalid audio decoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->m_pAudioDecoderFlagTable[decoderType] == M4OSA_TRUE
+        && pC->m_pAudioDecoderItTable[decoderType] != M4OSA_NULL )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalAudioDecoder: error parameter: an external decoder of type\
+                 %i is already registered",
+            decoderType);
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->m_pAudioDecoderItTable[decoderType] != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[decoderType]);
+        pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+    }
+
+    pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
+    pC->m_pAudioDecoderFlagTable[decoderType] =
+        M4OSA_TRUE; /* external decoder */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4MCS_registerExternalAudioEncoder(M4MCS_Context pContext,
+ *                                             M4ENCODER_AudioFormat mediaType,
+ *                                             M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
+ * @brief    This function will register a specific external audio encoder.
+ * @note    According to the Mediatype, this function will store in the internal context the
+ *             encoder context.
+ * @param    pContext:                (IN) Execution context.
+ * @param    mediaType:                (IN) The media type.
+ * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerExternalAudioEncoder( M4MCS_Context pContext,
+                                             M4ENCODER_AudioFormat MediaType,
+                                             M4ENCODER_AudioGlobalInterface *pEncGlobalInterface )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)pContext;
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+        "MCS: context is M4OSA_NULL in M4MCS_registerExternalAudioEncoder");
+    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pEncGlobalInterface is M4OSA_NULL in M4MCS_registerExternalAudioEncoder");
+
+    M4OSA_TRACE3_2(
+        "MCS: M4MCS_registerExternalAudioEncoder called with pContext=0x%x, \
+        pEncGlobalInterface=0x%x",
+        pC, pEncGlobalInterface);
+
+    if( M4MCS_kState_CREATED != pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalAudioEncoder(): Wrong State (%d), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    if( MediaType >= M4ENCODER_kAudio_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "M4MCS_registerExternalAudioEncoder(): Invalid audio encoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->pAudioEncoderFlag[MediaType] == M4OSA_TRUE
+        && pC->pAudioEncoderInterface[MediaType] != M4OSA_NULL )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerExternalAudioEncoder: error parameter:\
+             an external encoder of type %i is already registered",
+            MediaType);
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->pAudioEncoderInterface[MediaType] != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderInterface[MediaType]);
+        pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
+    }
+
+    /*
+    * Save encoder interface in context */
+    pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
+    pC->pAudioEncoderFlag[MediaType] = M4OSA_TRUE; /* external encoder */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_getExifInfo(M4MCS_Context pContext);
+ * @brief    Retrieve the EXIF tags informations from a Still picture
+ * @note    This function will allocate and fill a Exif tag struct
+ *            exifTags structure must be allocated/deallocated by the user
+ *            exifTags members will point to internal SPE information, user should not try
+ *             to modify or deallocate them
+ * @param    pContext            (IN) MCS context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_getExifInfo( M4MCS_Context pContext, M4MCS_ExifInfos *exifTags )
+{
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)(pContext);
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_1("M4MCS_getExifInfo called with pContext=0x%x", pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4MCS_getExifInfo: pContext is M4OSA_NULL");
+
+#ifdef M4MCS_SUPPORT_STILL_PICTURE
+
+    if( pC->m_bIsStillPicture )
+    {
+        /**
+        * Call the corresponding still picture MCS function*/
+        return M4MCS_stillPicGetExifInfo(pC, exifTags);
+    }
+
+#endif /*M4MCS_SUPPORT_STILL_PICTURE*/
+
+    return M4ERR_NOT_IMPLEMENTED;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerVideoDecoderExtended(M4MCS_Context    context,
+M4DECODER_VideoType        decoderType,
+M4DECODER_VideoInterface    *pDecoderInterface,
+M4OSA_Void* pUserData)
+ * @brief    Registers an external Video decoder
+ * @note This is much different from the external video decoder to cope up with specific
+ *        requirement of OMX codec implementation.
+So we use M4DECODER_VideoInterface instead of M4VD_Interface.
+ * @param  pContext           (IN) MCS context
+ * @param  decoderType        (IN) Type of decoder (MPEG4 ...)
+ * @param  pVidDecoderInterface  (IN) Decoder interface of type 'M4DECODER_VideoInterface'
+ * @param  pUserData          (IN) Pointer on a user data to give to external decoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoDecoderExtended( M4MCS_Context context,
+                                             M4VD_VideoType decoderType,
+                                             M4OSA_Context pVidDecoderInterface,
+                                             M4OSA_Void *pUserData )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4DECODER_VideoType nativeType;
+    M4DECODER_EXTERNAL_UserDataType shellUserData;
+    M4DECODER_VideoInterface *pDecoderInterface =
+        (M4DECODER_VideoInterface *)pVidDecoderInterface;
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)context;
+    M4OSA_Bool bResetCurrentVideoDecoder = M4OSA_FALSE;
+    M4_StreamType mediaType = M4DA_StreamTypeUnknown;
+
+    M4OSA_TRACE3_1(
+        "M4MCS_registerVideoDecoderExtended invoked with context = 0x%x",
+        context);
+
+    switch( decoderType )
+    {
+        case M4VD_kMpeg4VideoDec:
+        case M4VD_kH263VideoDec:
+            nativeType = M4DECODER_kVideoTypeMPEG4;
+            mediaType = M4DA_StreamTypeVideoMpeg4;
+            break;
+
+        case M4VD_kH264VideoDec:
+            nativeType = M4DECODER_kVideoTypeAVC;
+            mediaType = M4DA_StreamTypeVideoMpeg4Avc;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4MCS_registerVideoDecoderExtended: unknown decoderType %d",
+                decoderType);
+            return M4ERR_PARAMETER;
+    }
+
+    if( M4OSA_NULL != pC->m_pVideoDecoder )
+    {
+        M4OSA_TRACE3_0(
+            "M4MCS_registerVideoDecoderExtended: pC->m_pVideoDecoder already set to \
+            previous registered dec shell");
+
+        if( ( ( ( pC->pReaderVideoStream->m_basicProperties.m_streamType
+            == M4DA_StreamTypeVideoH263)
+            || (pC->pReaderVideoStream->m_basicProperties.m_streamType
+            == M4DA_StreamTypeVideoMpeg4))
+            && (mediaType == M4DA_StreamTypeVideoMpeg4))
+            || (( pC->pReaderVideoStream->m_basicProperties.m_streamType
+            == M4DA_StreamTypeVideoMpeg4Avc)
+            && (mediaType == M4DA_StreamTypeVideoMpeg4Avc)) )
+            bResetCurrentVideoDecoder = M4OSA_TRUE;
+    }
+
+    err = M4MCS_registerVideoDecoder(context, nativeType, pDecoderInterface);
+
+    /** Provide the application user data back to the interface functions. **
+    * For now we donot provide 'M4DECODER_EXTERNAL_UserDataType' **/
+    ( (M4MCS_InternalContext
+        *)context)->m_pVideoDecoderUserDataTable[nativeType] = pUserData;
+
+    if( ( M4NO_ERROR == err) && (M4OSA_TRUE == bResetCurrentVideoDecoder) )
+    {
+        err = M4MCS_setCurrentVideoDecoder(context, mediaType);
+        M4OSA_TRACE3_1(
+            "M4MCS_registerVideoDecoderExtended: M4MCS_setCurrentVideoDecoder returned 0x%x",
+            err);
+    }
+    M4OSA_TRACE1_1(
+        "M4MCS_registerVideoDecoderExtended returning with error  = 0x%x", err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerVideoEncoderExtended()
+ * @brief    Registers an external Video encoder
+ * @note This is much different from the external video encoder to cope up with specific
+ *            requirement of OMX codec implementation.
+So we use M4ENCODER_GlobalInterface instead of M4VE_Interface.
+ * @param  pContext           (IN) MCS context
+ * @param  encoderType        (IN) Type of encoder (MPEG4 ...)
+ * @param  pEncoderInterface  (IN) Encoder interface of type 'M4ENCODER_VideoInterface'
+ * @param  pUserData          (IN) Pointer on a user data to give to external encoder
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerVideoEncoderExtended( M4MCS_Context pContext,
+                                             M4VE_EncoderType encoderType,
+                                             M4OSA_Context pEncoderInterface,
+                                             M4OSA_Void *pUserData )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4ENCODER_Format nativeType;
+    M4ENCODER_GlobalInterface *pEncShellInterface =
+        (M4ENCODER_GlobalInterface *)pEncoderInterface;
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)pContext;
+    M4OSA_Bool bResetCurrentVideoEncoder = M4OSA_FALSE;
+    M4VIDEOEDITING_VideoFormat mediaType = M4VIDEOEDITING_kNoneVideo;
+
+    M4OSA_TRACE3_1(
+        "M4MCS_registerVideoEncoderExtended invoked with context = 0x%x",
+        pContext);
+
+    switch( encoderType )
+    {
+        case M4VE_kMpeg4VideoEnc:
+            nativeType = M4ENCODER_kMPEG4;
+            mediaType = M4VIDEOEDITING_kMPEG4;
+            break;
+
+        case M4VE_kH263VideoEnc:
+            nativeType = M4ENCODER_kH263;
+            mediaType = M4VIDEOEDITING_kH263;
+            break;
+
+        case M4VE_kH264VideoEnc:
+            nativeType = M4ENCODER_kH264;
+            mediaType = M4VIDEOEDITING_kH264;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4MCS_registerVideoEncoderExtended: unknown encoderType %d",
+                encoderType);
+            return M4ERR_PARAMETER;
+    }
+
+    if( M4OSA_NULL != pC->pVideoEncoderGlobalFcts )
+    {
+        M4OSA_TRACE3_0(
+            "M4MCS_registerVideoEncoderExtended:\
+                 pC->pVideoEncoderGlobalFcts already set to previous registered Enc shell");
+
+        if( pC->EncodingVideoFormat == nativeType )
+            bResetCurrentVideoEncoder = M4OSA_TRUE;
+    }
+
+    err = M4MCS_registerVideoEncoder(pContext, nativeType, pEncShellInterface);
+
+    ( (M4MCS_InternalContext
+        *)pContext)->pVideoEncoderExternalAPITable[nativeType]
+    = pEncoderInterface;
+    ( (M4MCS_InternalContext
+        *)pContext)->pVideoEncoderUserDataTable[nativeType] = pUserData;
+
+    if( ( M4NO_ERROR == err) && (M4OSA_TRUE == bResetCurrentVideoEncoder) )
+    {
+        err = M4MCS_setCurrentVideoEncoder(pContext, mediaType);
+        M4OSA_TRACE3_1(
+            "M4MCS_registerVideoEncoderExtended: M4MCS_setCurrentVideoEncoder returned 0x%x",
+            err);
+    }
+    M4OSA_TRACE1_1(
+        "M4MCS_registerVideoEncoderExtended returning with error  = 0x%x", err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerAudioEncoderExtended(M4MCS_Context pContext,
+M4ENCODER_AudioFormat encoderType,
+M4ENCODER_AudioGlobalInterface    *pEncoderInterface,
+M4OSA_Void* pUserData);
+ * @brief    Registers an external Audio Encoder
+ * @note This is much different from the external audio encoder to cope up with specific
+ *        requirement of OMX codec implementation.
+ * @param  pContext           (IN) MCS context
+ * @param  encoderType        (IN) Type of encoder
+ * @param  pEncoderInterface  (IN) Encoder interface to OMX shell function
+ * @param  pUserData          (IN) Pointer on a user data to give to external encoder
+ *                              (OMX Core Context)
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioEncoderExtended( M4MCS_Context pContext,
+                                             M4ENCODER_AudioFormat encoderType,
+                                             M4ENCODER_AudioGlobalInterface *pEncoderInterface,
+                                             M4OSA_Void *pUserData )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)pContext;
+    M4OSA_Bool bResetCurrentAudioEncoder = M4OSA_FALSE;
+    M4VIDEOEDITING_AudioFormat mediaType = M4VIDEOEDITING_kNoneAudio;
+
+    switch( encoderType )
+    {
+        case M4ENCODER_kAMRNB:
+            mediaType = M4VIDEOEDITING_kAMR_NB;
+            break;
+
+        case M4ENCODER_kAAC:
+            mediaType = M4VIDEOEDITING_kAAC;
+            break;
+
+        case M4ENCODER_MP3:
+            mediaType = M4VIDEOEDITING_kMP3;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4MCS_registerAudioEncoderExtended: unknown encoderType %d",
+                encoderType);
+            return M4ERR_PARAMETER;
+    }
+
+    if( M4OSA_NULL != pC->pAudioEncoderGlobalFcts )
+    {
+        M4OSA_TRACE3_0(
+            "M4MCS_registerAudioEncoderExtended: pC->pAudioEncoderGlobalFcts already set to \
+                previous registered Enc shell");
+
+        if( pC->AudioEncParams.Format == encoderType )
+            bResetCurrentAudioEncoder = M4OSA_TRUE;
+    }
+
+    err = M4MCS_registerAudioEncoder(pContext, encoderType, pEncoderInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerAudioEncoderExtended: \
+                M4MCS_registerAudioEncoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)pEncoderInterface);
+        return err;
+    }
+
+    ( (M4MCS_InternalContext
+        *)pContext)->pAudioEncoderInterface[encoderType] = pEncoderInterface;
+    ( (M4MCS_InternalContext
+        *)pContext)->pAudioEncoderUserDataTable[encoderType] = pUserData;
+
+    if( ( M4NO_ERROR == err) && (M4OSA_TRUE == bResetCurrentAudioEncoder) )
+    {
+        err = M4MCS_setCurrentAudioEncoder(pContext, mediaType);
+        M4OSA_TRACE3_1(
+            "M4MCS_registerAudioEncoderExtended: M4MCS_setCurrentAudioEncoder returned 0x%x",
+            err);
+    }
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_registerAudioDecoderExtended(M4MCS_Context pContext,
+M4AD_Type decoderType,
+M4AD_Interface    *pDecoderInterface,
+M4OSA_Void* pUserData);
+ * @brief    Registers an external Audio Decoder
+ * @note This is much different from the external audio decoder to cope up with specific
+ *             requirement of OMX codec implementation.
+ * @param  pContext           (IN) MCS context
+ * @param  decoderType        (IN) Type of decoder
+ * @param  pDecoderInterface  (IN) Decoder interface to OMX shell function
+ * @param  pUserData          (IN) Pointer on a user data to give to external decoder
+ *                                (OMX Core Context)
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:        MCS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_registerAudioDecoderExtended( M4MCS_Context pContext,
+                                             M4AD_Type decoderType,
+                                             M4AD_Interface *pDecoderInterface,
+                                             M4OSA_Void *pUserData )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext *)pContext;
+    M4OSA_Bool bResetCurrentAudioDecoder = M4OSA_FALSE;
+    M4_StreamType mediaType = M4DA_StreamTypeUnknown;
+
+    switch( decoderType )
+    {
+        case M4AD_kTypeAMRNB:
+            mediaType = M4DA_StreamTypeAudioAmrNarrowBand;
+            break;
+
+        case M4AD_kTypeAAC:
+            mediaType = M4DA_StreamTypeAudioAac;
+            break;
+
+        case M4AD_kTypeMP3:
+            mediaType = M4DA_StreamTypeAudioMp3;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4MCS_registerAudioDecoderExtended: unknown decoder type %d",
+                decoderType);
+            return M4ERR_PARAMETER;
+    }
+
+    if( M4OSA_NULL != pC->m_pAudioDecoder )
+    {
+        M4OSA_TRACE3_0(
+            "M4MCS_registerAudioDecoderExtended:\
+                 pC->m_pAudioDecoder already set to previous registered Dec shell");
+
+        if( pC->pReaderAudioStream->m_basicProperties.m_streamType
+            == mediaType )
+            bResetCurrentAudioDecoder = M4OSA_TRUE;
+
+        /* Audio decoder may be created for getting input Clip properties.
+         In that case, previous audio dec context needs to be destroyed*
+        * before registering new decoder shell */
+        if( M4OSA_NULL != pC->pAudioDecCtxt )
+        {
+            err = pC->m_pAudioDecoder->m_pFctDestroyAudioDec(pC->pAudioDecCtxt);
+            pC->pAudioDecCtxt = M4OSA_NULL;
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4MCS_registerAudioDecoderExtended:\
+                         m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+                    err);
+            }
+        }
+    }
+
+    err = M4MCS_registerAudioDecoder(pContext, decoderType, pDecoderInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4MCS_registerAudioDecoderExtended:\
+                 M4MCS_registerAudioDecoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)pDecoderInterface);
+        return err;
+    }
+
+    ( (M4MCS_InternalContext
+        *)pContext)->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
+    ( (M4MCS_InternalContext
+        *)pContext)->m_pAudioDecoderUserDataTable[decoderType] = pUserData;
+
+    ( (M4MCS_InternalContext *)pContext)->bExtOMXAudDecoder = M4OSA_TRUE;
+
+    if( ( M4NO_ERROR == err) && (M4OSA_TRUE == bResetCurrentAudioDecoder) )
+    {
+        err = M4MCS_setCurrentAudioDecoder(pContext, mediaType);
+        M4OSA_TRACE3_1(
+            "M4MCS_registerAudioDecoderExtended: M4MCS_setCurrentAudioDecoder returned 0x%x",
+            err);
+    }
+    return err;
+}
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c b/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c
new file mode 100755
index 0000000..f2e0373
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_AudioEffects.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4MCS_API.c
+ * @brief  MCS implementation (Video Compressor Service)
+ * @note   This file implements the API and the processing of the MCS
+ *************************************************************************
+ **/
+
+/****************/
+/*** Includes ***/
+/****************/
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h"   /**< OSAL memory management */
+#include "M4OSA_Debug.h"    /**< OSAL debug management */
+
+/* Our headers */
+#include "M4MCS_API.h"
+#include "M4MCS_ErrorCodes.h"
+#include "M4MCS_InternalTypes.h"
+#include "M4MCS_InternalConfig.h"
+#include "M4MCS_InternalFunctions.h"
+
+/* Common headers (for aac) */
+#include "M4_Common.h"
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pContext)
+ * @brief    Check if an effect has to be applied currently
+ * @note    It is called by the stepEncoding function
+ * @param    pContext    (IN) MCS internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_intCheckAudioEffects(M4MCS_InternalContext* pC)
+{
+    M4OSA_Int8 *pActiveEffectNumber = &(pC->pActiveEffectNumber);
+
+    *pActiveEffectNumber = -1;
+
+    if(pC->ReaderAudioAU.m_CTS > pC->uiBeginCutTime
+    && pC->ReaderAudioAU.m_CTS < pC->uiEndCutTime)
+    {
+        M4OSA_UInt32 outputRelatedTime = 0;
+        M4OSA_UInt8 uiEffectIndex = 0;
+        outputRelatedTime =
+        (M4OSA_UInt32)(pC->ReaderAudioAU.m_CTS  - pC->uiBeginCutTime + 0.5);
+
+        for(uiEffectIndex=0; uiEffectIndex<pC->nbEffects; uiEffectIndex++)
+        {
+            if ((outputRelatedTime >=
+                (M4OSA_UInt32)(pC->pEffects[uiEffectIndex].uiStartTime)) &&
+                (outputRelatedTime <
+                (M4OSA_UInt32)(pC->pEffects[uiEffectIndex].uiStartTime +\
+                pC->pEffects[uiEffectIndex].uiDuration)))
+            {
+                *pActiveEffectNumber = uiEffectIndex;
+                uiEffectIndex = pC->nbEffects;
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn()
+ * @brief    Apply audio effect FadeIn to pPCMdata
+ * @param   pC            (IN/OUT) Internal edit context
+ * @param    pPCMdata    (IN/OUT) Input and Output PCM audio data
+ * @param    uiPCMsize    (IN)     Size of pPCMdata
+ * @param    pProgress    (IN)     Effect progress
+ * @return    M4NO_ERROR:             No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeIn(  M4OSA_Void *pFunctionContext,
+                                            M4OSA_Int16 *pPCMdata,
+                                            M4OSA_UInt32 uiPCMsize,
+                                            M4MCS_ExternalProgress *pProgress)
+{
+    /* we will cast each Int16 sample into this Int32 variable */
+    M4OSA_Int32 i32sample;
+
+    /**
+     * Sanity check */
+    if(pProgress->uiProgress > 1000)
+    {
+        pProgress->uiProgress = 1000;
+    }
+
+    /**
+     * From buffer size (bytes) to number of sample (int16): divide by two */
+    uiPCMsize >>= 1;
+
+    /**
+     * Loop on samples */
+    while (uiPCMsize-->0) /**< decrementing to optimize */
+    {
+        i32sample = *pPCMdata;
+        i32sample *= pProgress->uiProgress;
+        i32sample /= 1000;
+        *pPCMdata++ = (M4OSA_Int16)i32sample;
+    }
+
+    /**
+     *    Return */
+    M4OSA_TRACE3_0("M4MCS_editAudioEffectFct_FadeIn: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut()
+ * @brief    Apply audio effect FadeIn to pPCMdata
+ * @param    pC            (IN/OUT) Internal edit context
+ * @param    pPCMdata    (IN/OUT) Input and Output PCM audio data
+ * @param    uiPCMsize    (IN)     Size of pPCMdata
+ * @param    pProgress    (IN)     Effect progress
+ * @return   M4NO_ERROR:             No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_editAudioEffectFct_FadeOut( M4OSA_Void *pFunctionContext,
+                                            M4OSA_Int16 *pPCMdata,
+                                            M4OSA_UInt32 uiPCMsize,
+                                            M4MCS_ExternalProgress *pProgress)
+{
+    /* we will cast each Int16 sample into this Int32 variable */
+    M4OSA_Int32 i32sample;
+
+    /**
+     * Sanity check */
+    if(pProgress->uiProgress > 1000)
+    {
+        pProgress->uiProgress = 1000;
+    }
+    pProgress->uiProgress = 1000 - pProgress->uiProgress;
+
+    /**
+     * From buffer size (bytes) to number of sample (int16): divide by two */
+    uiPCMsize >>= 1;
+
+    /**
+     * Loop on samples */
+    while (uiPCMsize-->0) /**< decrementing to optimize */
+    {
+        i32sample = *pPCMdata;
+        i32sample *= pProgress->uiProgress;
+        i32sample /= 1000;
+        *pPCMdata++ = (M4OSA_Int16)i32sample;
+    }
+
+    /**
+     *    Return */
+    M4OSA_TRACE3_0("M4MCS_editAudioEffectFct_FadeOut: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_BitstreamParser.c b/libvideoeditor/vss/mcs/src/M4MCS_BitstreamParser.c
new file mode 100755
index 0000000..182e476
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_BitstreamParser.c
@@ -0,0 +1,944 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4MCS_Parsing.c
+ * @brief  MCS implementation (Video Compressor Service)
+ * @note   This file implements the VOL parsing and timescale 'on the fly' modification
+ *************************************************************************
+ **/
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+
+/* Core headers */
+#include "M4MCS_API.h"
+#include "M4MCS_InternalTypes.h"
+#include "M4VD_Tools.h"
+
+
+#ifdef TIMESCALE_BUG
+
+/*typedef struct
+{
+    M4OSA_UInt32 stream_byte;
+    M4OSA_UInt32 stream_index;
+    M4OSA_MemAddr8 in;
+
+} M4MCS_Bitstream_ctxt;*/
+typedef M4VS_Bitstream_ctxt M4MCS_Bitstream_ctxt;
+
+/*
+ ************************************************************************
+ * M4OSA_UInt32 M4MCS_GetBitsFromMemory( )
+ * @brief
+ * @return
+ ************************************************************************
+ */
+static M4OSA_UInt32 M4MCS_GetBitsFromMemory(
+                                    M4MCS_Bitstream_ctxt* parsingCtxt,
+                                    M4OSA_UInt32 nb_bits)
+{
+    return(M4VD_Tools_GetBitsFromMemory((M4VS_Bitstream_ctxt*) parsingCtxt, nb_bits));
+}
+
+/**
+ ***********************************************************************
+ * M4OSA_ERR M4MCS_WriteBitsToMemory( )
+ * @brief
+ * @return
+ ***********************************************************************
+ */
+static M4OSA_ERR M4MCS_WriteBitsToMemory(   M4OSA_UInt32 bitsToWrite,
+                                            M4OSA_MemAddr32 dest_bits,
+                                            M4OSA_UInt8 offset,
+                                            M4OSA_UInt8 nb_bits)
+{
+    return (M4VD_Tools_WriteBitsToMemory(bitsToWrite,
+                                         dest_bits,
+                                         offset, nb_bits));
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_WriteByteToMemory( )
+ * @brief
+ * @return
+ ************************************************************************
+ */
+static M4OSA_ERR M4MCS_WriteByteToMemory(   M4OSA_UInt8 BytesToWrite,
+                                            M4OSA_MemAddr8 dest_bytes)
+{
+    M4OSA_MemAddr8 addr = dest_bytes;
+
+    *addr = BytesToWrite;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_Void M4MCS_intCheckIndex( )
+ * @brief :
+ * @note :    This function can be used to write until 15 bits ...
+ *            Depending on the bits offset, it increases or not the 8 bits pointer.
+ *            It must be called if more than 8 bits have to be written not consequently.
+ * @return
+ ************************************************************************
+ */
+static M4OSA_Void M4MCS_intCheckIndex(  M4OSA_UInt8 *index,
+                                        M4OSA_UInt32 a,
+                                        M4OSA_MemAddr8* in)
+{
+    M4OSA_UInt32 offset = a;
+
+    if(offset > 8 && offset <=16)
+    {
+        offset-=8;
+        (*in)++;
+    }
+    if((*index+offset) >= 8)
+    {
+        *index = (*index+offset)-8;
+        (*in)++;
+    }
+    else
+    {
+        *index += offset;
+    }
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_intParseVideoDSI( )
+ * @brief :  This function parses video DSI and changes writer vop time increment resolution
+ * @note  :  It also calculates the number of bits on which the vop_time_increment is coded in
+ *           the input stream
+ * @return
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_intParseVideoDSI(M4MCS_InternalContext* pC)
+{
+    M4MCS_Bitstream_ctxt parsingCtxt;
+    M4OSA_UInt32 code,j;
+    M4OSA_MemAddr8 start, in;
+    M4OSA_UInt8 i;
+    M4OSA_UInt32 time_incr_length, new_time_incr_length;
+    M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
+
+    /* Fill default values */
+    pC->volParsing.video_object_layer_shape = 0;
+    pC->volParsing.sprite_enable = 0;
+    pC->volParsing.reduced_resolution_vop_enable = 0;
+    pC->volParsing.scalability = 0;
+    pC->volParsing.enhancement_type = 0;
+    pC->volParsing.complexity_estimation_disable = 0;
+    pC->volParsing.interlaced = 0;
+    pC->volParsing.sprite_warping_points = 0;
+    pC->volParsing.sprite_brightness_change = 0;
+    pC->volParsing.quant_precision = 5;
+
+    parsingCtxt.stream_byte = 0;
+    parsingCtxt.stream_index = 8;
+    parsingCtxt.in = pC->WriterVideoStreamInfo.Header.pBuf;
+
+    start = pC->WriterVideoStreamInfo.Header.pBuf;
+
+    while (parsingCtxt.in - start\
+         < pC->pReaderVideoStream->m_basicProperties.m_decoderSpecificInfoSize)
+    {
+        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+        if (code == 0)
+        {
+            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+            if (code == 0)
+            {
+                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+                if (code == 1)
+                {
+                    /* start code found */
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+                    if(code == 0xB5) /* Visual object start code */
+                    {
+                        /* is object layer identifier */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                        if (code == 1)
+                        {
+                            /* visual object verid */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 4);
+                            vol_verid = code;
+                            /* visual object layer priority */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 3);
+                        }
+                        else
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 7); /* Realign on byte */
+                            vol_verid = 1;
+                        }
+                    }
+                    else if ((code > 0x1F) && (code < 0x30))
+                    { /* find vol start code */
+                        /* random accessible vol */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+
+                        /* video object type indication */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+
+                        /* is object layer identifier */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                        if (code == 1)
+                        {
+                            /* video object layer verid */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 4);
+                            vol_verid = code;
+                            /* video object layer priority */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 3);
+                        }
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 4);/* aspect ratio */
+                        if (code == 15)
+                            /* par_width and par_height (8+8) */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 16);
+                        /* vol control parameters */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                        if (code == 1)
+                        {
+                            /* chroma format + low delay (3+1) */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 3);
+                            /* vbv parameters */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                            if (code == 1)
+                            {
+                                /* first and latter half bitrate + 2 marker bits
+                                  (15 + 1 + 15 + 1)*/
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 32);
+
+                                /* first and latter half vbv buffer size + first half
+                                   vbv occupancy
+                                + marker bits (15+1+3+11+1)*/
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 31);
+
+                                /* first half vbv occupancy + marker bits (15+1)*/
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 16);
+                            }
+                        }
+                        /* video object layer shape */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 2);
+
+                        /* Need to save it for vop parsing */
+                        pC->volParsing.video_object_layer_shape = code;
+
+                        if (code != 0) return 0; /* only rectangular case supported */
+                        /* Marker bit */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                        /* VOP time increment resolution */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 16);
+
+                        /* Computes time increment length */
+                        j    = code - 1;
+                        for (i = 0; (i < 32) && (j != 0); j >>=1)
+                        {
+                            i++;
+                        }
+                        time_incr_length = (i == 0) ? 1 : i;
+                        /* Save time increment length and original timescale */
+                        pC->uiOrigTimescaleLength = time_incr_length;
+                        pC->uiOrigVideoTimescale = code;
+
+                        /* Compute new time increment length */
+                        j    = pC->uiVideoTimescale - 1;
+                        for (i = 0; (i < 32) && (j != 0); j >>=1)
+                        {
+                            i++;
+                        }
+                        time_incr_length = (i == 0) ? 1 : i;
+                        /* Save new time increment length */
+                        pC->uiTimescaleLength = time_incr_length;
+
+                        /* Write new VOP time increment resolution */
+                        if(parsingCtxt.stream_index == 0)
+                        {
+                            in = parsingCtxt.in - 2;
+                        }
+                        else
+                        {
+                            in = parsingCtxt.in - 3;
+                        }
+                        M4MCS_WriteByteToMemory(pC->uiVideoTimescale, in,
+                            parsingCtxt.stream_index, 16 );
+
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* Marker bit */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* Fixed VOP rate */
+                        if (code == 1)
+                        {
+                            /* Fixed VOP time increment resolution */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt,
+                                    time_incr_length);
+                        }
+
+                        if(pC->volParsing.video_object_layer_shape != 1) /* 1 = Binary */
+                        {
+                            if(pC->volParsing.video_object_layer_shape == 0) /* 0 = rectangular */
+                            {
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* Marker bit */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 13);/* Width */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* Marker bit */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 13);/* Height */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* Marker bit */
+                            }
+                        }
+
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* interlaced */
+                        pC->volParsing.interlaced = code;
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* OBMC disable */
+
+                        if(vol_verid == 1)
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* sprite enable */
+                            pC->volParsing.sprite_enable = code;
+                        }
+                        else
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 2);/* sprite enable */
+                            pC->volParsing.sprite_enable = code;
+                        }
+                        if ((pC->volParsing.sprite_enable == 1) ||
+                            (pC->volParsing.sprite_enable == 2))
+                            /* Sprite static = 1 and Sprite GMC = 2 */
+                        {
+                            if (pC->volParsing.sprite_enable != 2)
+                            {
+                                /* sprite width */
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 13);
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 1);/* Marker bit */
+                                /* sprite height */
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 13);
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 1);/* Marker bit */
+                                /* sprite l coordinate */
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 13);
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 1);/* Marker bit */
+                                /* sprite top coordinate */
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 13);
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 1);/* Marker bit */
+                            }
+                            /* sprite warping points */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 6);
+                            pC->volParsing.sprite_warping_points = code;
+                            /* sprite warping accuracy */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 2);
+
+                            /* sprite brightness change */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                            pC->volParsing.sprite_brightness_change = code;
+                            if (pC->volParsing.sprite_enable != 2)
+                            {
+                                /* low latency sprite enable */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                            }
+                        }
+                        if ((vol_verid != 1) && (pC->volParsing.video_object_layer_shape != 0))
+                        {
+                            code = M4MCS_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* sadct disable */
+                        }
+
+                        code = M4MCS_GetBitsFromMemory(
+                                &parsingCtxt, 1); /* not 8 bits */
+                        if (code)
+                        {   /* quant precision */
+                            code = M4MCS_GetBitsFromMemory(
+                                    &parsingCtxt, 4);
+                            pC->volParsing.quant_precision = code;
+                            /* bits per pixel */
+                            code = M4MCS_GetBitsFromMemory(
+                                    &parsingCtxt, 4);
+                        }
+
+                        /* greyscale not supported */
+                        if(pC->volParsing.video_object_layer_shape == 3)
+                        {
+                            /* nogray quant update + composition method + linear composition */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 3);
+                        }
+
+                        code = M4MCS_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* quant type */
+                        if (code)
+                        {
+                            /* load intra quant mat */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                            if (code)
+                            {
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 8);
+                                i    = 1;
+                                while (i < 64)
+                                {
+                                    code = M4MCS_GetBitsFromMemory(
+                                            &parsingCtxt, 8);
+                                    if (code == 0)
+                                        break;
+                                    i++;
+                                }
+                            }
+                            /* load non intra quant mat */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                            if (code)
+                            {
+                                code = M4MCS_GetBitsFromMemory(
+                                        &parsingCtxt, 8);
+                                i    = 1;
+                                while (i < 64)
+                                {
+                                    code = M4MCS_GetBitsFromMemory(
+                                            &parsingCtxt, 8);
+                                    if (code == 0)
+                                        break;
+                                    i++;
+                                }
+                            }
+                        }
+
+                        if (vol_verid != 1)
+                        {
+                            code = M4MCS_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* quarter sample */
+                        }
+                        /* complexity estimation disable */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                        pC->volParsing.complexity_estimation_disable = code;
+                        if (!code)
+                        {
+                            return M4ERR_NOT_IMPLEMENTED;
+                        }
+
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* resync marker disable*/
+
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* data partitionned */
+                        if (code)
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);/* reversible VLC */
+                        }
+
+                        if (vol_verid != 1)
+                        {
+                            code = M4MCS_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* newpred */
+                            if (code)
+                            {
+                                return M4ERR_PARAMETER;
+                            }
+                            /* reduced resolution vop enable */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                            pC->volParsing.reduced_resolution_vop_enable = code;
+                        }
+
+                        code = M4MCS_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* scalability */
+                        pC->volParsing.scalability = code;
+                        if (code)
+                        {
+                            code = M4MCS_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* hierarchy type */
+                            b_hierarchy_type = code;
+                            code = M4MCS_GetBitsFromMemory(
+                                    &parsingCtxt, 4);/* ref layer id */
+
+                            /* ref sampling direct */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+
+                            /* hor sampling factor N */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+
+                            /* hor sampling factor M */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+
+                            /* vert sampling factor N */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+
+                            /* vert sampling factor M */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+
+                            /* enhancement type */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                            pC->volParsing.enhancement_type = code;
+                            if ((!b_hierarchy_type) &&
+                                (pC->volParsing.video_object_layer_shape == 1))
+                            {
+                                /* use ref shape */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                                /* use ref texture */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                                /* shape hor sampling factor N */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape hor sampling factor M */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape vert sampling factor N */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape vert sampling factor M */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 5);
+                            }
+                        }
+                        break;
+                    }
+                }
+                else
+                {
+                    if ((code >> 2) == 0x20)
+                    {
+                        /* H263 ...-> wrong*/
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4MCS_intChangeAUVideoTimescale( )
+ * @brief
+ * @return
+ ************************************************************************
+ */
+M4OSA_ERR M4MCS_intChangeAUVideoTimescale(M4MCS_InternalContext* pC)
+{
+    M4MCS_Bitstream_ctxt parsingCtxt;
+    M4OSA_UInt32 code;
+    M4OSA_MemAddr8 start, in;
+    M4OSA_MemAddr32 in_temp;
+    M4OSA_UInt8 i, in_index=0; /* in_index is the bit index in the input buffer */
+    M4OSA_UInt32 new_time_incr;
+    M4OSA_Int32 diff_timescale= 0 ;
+    M4OSA_UInt32 stuffing_byte=0;
+    M4OSA_UInt8 vopCodingType, vop_fcode_forward, vop_fcode_backward, nb_zeros;
+
+    parsingCtxt.stream_byte = 0;
+    parsingCtxt.stream_index = 8;
+    parsingCtxt.in = pC->ReaderVideoAU.m_dataAddress;
+
+    start = pC->ReaderVideoAU.m_dataAddress;
+    in = pC->WriterVideoAU.dataAddress;
+
+    M4OSA_memset(in, pC->ReaderVideoAU.m_size , 0);
+    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+    M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, 0, 8);
+    in++;
+    if (code == 0)
+    {
+        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, 0, 8);
+        in++;
+        if (code == 0)
+        {
+            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, 0, 8);
+            in++;
+            if (code == 1)
+            {
+                /* start code found */
+                code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+                M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, 0, 8);
+                in++;
+                if (code == 0xB6)
+                { /* find vop start code */
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 2); /* VOP coding type */
+                    M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                            in_index, 2);
+                    M4MCS_intCheckIndex(&in_index,2,&in);
+                    vopCodingType = code; /* Save it before needed further in parsing */
+                    do
+                    {
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* Modulo time base */
+                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                                in_index, 1);
+                        M4MCS_intCheckIndex(&in_index,1,&in);
+                    } while(code != 0);
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* Marker bit */
+                    M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                            in_index, 1);
+                    M4MCS_intCheckIndex(&in_index,1,&in);
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt,
+                                            pC->uiOrigTimescaleLength);
+                    /* VOP time increment */
+
+                    /* Calculates new time increment and write it to AU */
+                    new_time_incr = (pC->uiVideoTimescale * code) /
+                                    pC->uiOrigVideoTimescale;
+                    M4MCS_WriteByteToMemory(new_time_incr, in,
+                                    in_index, pC->uiTimescaleLength );
+                    M4MCS_intCheckIndex(&in_index,pC->uiTimescaleLength,
+                                    &in);
+
+                    /* VOP not coded */
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* Marker bit */
+                    M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                    in_index, 1);
+                    M4MCS_intCheckIndex(&in_index,1,&in);
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* VOP not coded bit */
+                    M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                    in_index, 1);
+                    M4MCS_intCheckIndex(&in_index,1,&in);
+                    if(code == 1)
+                    {
+                        //break; /* TODO !!! -> Goto stuffing */
+                    }
+                    /* newpred ignored */
+
+                    if((pC->volParsing.video_object_layer_shape != 2) &&
+                        (vopCodingType == 1 || vopCodingType == 3 &&
+                        pC->volParsing.sprite_enable == 2))
+                    {
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* VOP rounding type */
+                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                    in_index, 1);
+                        M4MCS_intCheckIndex(&in_index,1,&in);
+                    }
+
+                    if(pC->volParsing.reduced_resolution_vop_enable &&
+                        pC->volParsing.video_object_layer_shape == 0 &&
+                        (vopCodingType == 0 || vopCodingType == 1))
+                    {
+                        /* VOP reduced resolution */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                    in_index, 1);
+                        M4MCS_intCheckIndex(&in_index,1,&in);
+                    }
+
+                    if(pC->volParsing.video_object_layer_shape != 0)
+                    {
+                        if(pC->volParsing.sprite_enable == 1 &&
+                            vopCodingType == 0)
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 13); /* VOP width */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 13);
+                            M4MCS_intCheckIndex(&in_index,13,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* Marker bit */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                            M4MCS_intCheckIndex(&in_index,1,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 13); /* VOP height */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 13);
+                            M4MCS_intCheckIndex(&in_index,13,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* Marker bit */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                            M4MCS_intCheckIndex(&in_index,1,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 13); /* VOP horizontal
+                                                                              mc spatial ref */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 13);
+                            M4MCS_intCheckIndex(&in_index,13,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* Marker bit */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                            M4MCS_intCheckIndex(&in_index,1,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 13); /* VOP vertical
+                                                                              mc spatial ref */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 13);
+                            M4MCS_intCheckIndex(&in_index,13,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* Marker bit */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                            M4MCS_intCheckIndex(&in_index,1,&in);
+                        }
+                        if(pC->volParsing.video_object_layer_shape != 1 &&
+                            pC->volParsing.scalability &&
+                            pC->volParsing.enhancement_type)
+                        {
+                            /* Background composition */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                            M4MCS_intCheckIndex(&in_index,1,&in);
+                        }
+                        /* Change conv ratio disable */
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+
+                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                        M4MCS_intCheckIndex(&in_index,1,&in);
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* VOP constant alpha */
+                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                        M4MCS_intCheckIndex(&in_index,1,&in);
+                        if(code)
+                        {
+                            /* VOP constant alpha value */
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 8);
+                            M4MCS_intCheckIndex(&in_index,8,&in);
+                        }
+                    }
+
+                    if(pC->volParsing.video_object_layer_shape != 2)
+                    {
+                        if(!pC->volParsing.complexity_estimation_disable)
+                        {
+                            return M4ERR_NOT_IMPLEMENTED;
+                        }
+                    }
+
+                    if(pC->volParsing.video_object_layer_shape != 2)
+                    {
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 3); /* intra dc vlc thr */
+                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 3);
+                        M4MCS_intCheckIndex(&in_index,3,&in);
+                        if(pC->volParsing.interlaced)
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* top field first */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                            M4MCS_intCheckIndex(&in_index,1,&in);
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1); /* alternate vertical
+                                                                             scan flag */
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 1);
+                            M4MCS_intCheckIndex(&in_index,1,&in);
+                        }
+                    }
+
+                    if((pC->volParsing.sprite_enable == 1 || pC->volParsing.sprite_enable == 2) &&
+                        vopCodingType == 3)
+                    {
+                        if(pC->volParsing.sprite_warping_points > 0 ||
+                            (pC->volParsing.sprite_brightness_change))
+                        {
+                            return M4ERR_NOT_IMPLEMENTED;
+                        }
+                        if(pC->volParsing.sprite_enable == 1)
+                        {
+                            return M4ERR_NOT_IMPLEMENTED;
+                        }
+                    }
+
+                    if(pC->volParsing.video_object_layer_shape != 2)
+                    {
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt,
+                            pC->volParsing.quant_precision); /* vop_quant */
+                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index,
+                            pC->volParsing.quant_precision);
+                        M4MCS_intCheckIndex(&in_index,pC->volParsing.quant_precision,&in);
+                        if(pC->volParsing.video_object_layer_shape == 3)
+                        {
+                            return M4ERR_NOT_IMPLEMENTED;
+                        }
+                        if(vopCodingType != 0) /* P-VOP or S-VOP or B-VOP case */
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 3); /* vop fcode forward*/
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 3);
+                            M4MCS_intCheckIndex(&in_index,3,&in);
+                            vop_fcode_forward = code;
+                        }
+                        if(vopCodingType == 2) /* B-VOP */
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 3); /* vop fcode forward*/
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 3);
+                            M4MCS_intCheckIndex(&in_index,3,&in);
+                            vop_fcode_backward = code;
+                        }
+
+                    }
+
+#if 1
+                    /* Align on read */
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8-(parsingCtxt.stream_index));
+                    M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                        in_index, 8-(parsingCtxt.stream_index));
+                    M4MCS_intCheckIndex(&in_index,8-(parsingCtxt.stream_index),&in);
+
+                    do
+                    {
+                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+                        if(code == 0)
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+                            if(code == 0)
+                            {
+                                nb_zeros = 0;
+                                if((vopCodingType == 1 || vopCodingType == 3)
+                                    && vop_fcode_forward > 1) /* P-VOP or S-VOP case */
+                                {
+                                    code = M4MCS_GetBitsFromMemory(&parsingCtxt,
+                                        vop_fcode_forward-1);
+                                    nb_zeros = vop_fcode_forward-1;
+                                }
+                                else if(vopCodingType == 2 && (vop_fcode_forward > 1 ||
+                                    vop_fcode_backward > 1)) /* B-VOP case */
+                                {
+                                    if(vop_fcode_forward > vop_fcode_backward)
+                                    {
+                                        if(15+vop_fcode_forward > 17)
+                                        {
+                                            code = M4MCS_GetBitsFromMemory(&parsingCtxt,
+                                                vop_fcode_forward-1);
+                                        }
+                                        else
+                                        {
+                                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                                        }
+                                        nb_zeros = vop_fcode_forward-1;
+                                    }
+                                    else
+                                    {
+                                        if(15+vop_fcode_backward > 17)
+                                        {
+                                            code = M4MCS_GetBitsFromMemory(&parsingCtxt,
+                                                vop_fcode_backward-1);
+                                        }
+                                        else
+                                        {
+                                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                                        }
+                                        nb_zeros = vop_fcode_backward-1;
+                                    }
+                                    if(code == 0)
+                                    {
+                                        code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                                        if(code != 1)
+                                        {
+                                            M4MCS_WriteByteToMemory(0, (M4OSA_MemAddr32)in,
+                                                in_index, 8);
+                                            M4MCS_intCheckIndex(&in_index,8,&in);
+                                            M4MCS_WriteByteToMemory(0, (M4OSA_MemAddr32)in,
+                                                in_index, 8);
+                                            M4MCS_intCheckIndex(&in_index,8,&in);
+                                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                                in_index, 1);
+                                            M4MCS_intCheckIndex(&in_index,1,&in);
+                                            goto realign;
+                                        }
+                                        else
+                                        {
+                                            M4MCS_intChangeVideoPacketVideoTimescale(pC );
+                                        }
+                                    }
+                                    else
+                                    {
+
+                                        goto realign;
+                                    }
+                                }
+                                else /* I-VOP case or P-VOP or S-VOP case with
+                                     vop_fcode_forward = 1 */
+                                {
+                                    /* Read next bit that must be one */
+                                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 1);
+                                    if(code != 1)
+                                    {
+                                        goto realign;
+                                    }
+                                    else
+                                    {
+                                        /* Realign on byte */
+
+                                        /* Write resync marker */
+                                        M4MCS_WriteByteToMemory(0, (M4OSA_MemAddr32)in,
+                                            in_index, 8);
+                                        M4MCS_intCheckIndex(&in_index,8,&in);
+                                        M4MCS_WriteByteToMemory(0, (M4OSA_MemAddr32)in,
+                                            in_index, 8);
+                                        M4MCS_intCheckIndex(&in_index,8,&in);
+                                        M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in,
+                                            in_index, 1);
+                                        M4MCS_intCheckIndex(&in_index,1,&in);
+
+                                        /* Change timescale into video packet header */
+                                        M4MCS_intChangeVideoPacketVideoTimescale(pC );
+                                    }
+
+                                }
+                            }
+                            else
+                            {
+                                M4MCS_WriteByteToMemory(0, (M4OSA_MemAddr32)in, in_index, 8);
+                                M4MCS_intCheckIndex(&in_index,8,&in);
+                                M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 8);
+                                M4MCS_intCheckIndex(&in_index,8,&in);
+realign:
+                                /* Realign on read */
+                                code = M4MCS_GetBitsFromMemory(&parsingCtxt,
+                                    8-(parsingCtxt.stream_index));
+                                M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index,
+                                    8-(parsingCtxt.stream_index));
+                                M4MCS_intCheckIndex(&in_index,8-(parsingCtxt.stream_index),&in);
+                            }
+                        }
+                        else
+                        {
+                            M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 8);
+                            M4MCS_intCheckIndex(&in_index,8,&in);
+                        }
+                    } while(parsingCtxt.in - pC->ReaderVideoAU.m_dataAddress\
+                        < pC->ReaderVideoAU.m_size);
+#else
+                    /* Align on write */
+                    code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8-in_index);
+                    M4MCS_WriteByteToMemory(code, (M4OSA_MemAddr32)in, in_index, 8-in_index);
+                    M4MCS_intCheckIndex(&in_index,8-in_index,&in);
+
+                    /* Read 8 bits words, and write them to the output AU
+                    (write is 8 bits aligned) */
+                    diff_timescale = pC->uiOrigTimescaleLength - pC->uiTimescaleLength;
+                    if(diff_timescale > 0)
+                    {
+                        while (parsingCtxt.in - start <= pC->ReaderVideoAU.m_size)
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+                            //WritebyteToMemory(code, in);
+                            *in = code;
+                            in++;
+                        }
+                    }
+                    else
+                    {
+                        while (parsingCtxt.in - start < pC->ReaderVideoAU.m_size)
+                        {
+                            code = M4MCS_GetBitsFromMemory(&parsingCtxt, 8);
+                            //WritebyteToMemory(code, in);
+                            *in = code;
+                            in++;
+                        }
+                    }
+#endif
+                    in--;
+
+                    for(i=0;i<parsingCtxt.stream_index;i++)
+                    {
+                        stuffing_byte = stuffing_byte << 1;
+                        stuffing_byte += 1;
+                    }
+                    M4MCS_WriteByteToMemory(stuffing_byte, (M4OSA_MemAddr32)in,
+                        8-parsingCtxt.stream_index, parsingCtxt.stream_index);
+                    pC->WriterVideoAU.size = in + 1 - pC->WriterVideoAU.dataAddress;
+                    //*in ;
+                }
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+#endif /* TIMESCALE_BUG */
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c b/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c
new file mode 100755
index 0000000..d61d2d2
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_Codecs.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4MCS_Codecs.c
+ * @brief  MCS implementation
+ * @note   This file contains all functions related to audio/video
+ *         codec manipulations.
+ ************************************************************************
+ */
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+#include "NXPSW_CompilerSwitches.h"
+#include "M4OSA_Debug.h"            /* Include for OSAL debug services */
+#include "M4MCS_InternalTypes.h"    /* Internal types of the MCS */
+
+
+#ifdef M4MCS_SUPPORT_VIDEC_3GP
+#include "M4_MPEG4VI_VideoHandler.h"  /*needed for renderer error codes*/
+#endif
+
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_clearInterfaceTables()
+ * @brief    Clear encoders, decoders, reader and writers interfaces tables
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    The context is null
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_clearInterfaceTables(M4MCS_Context pContext)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4OSA_UInt8 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    /* Initialisation that will allow to check if registering twice */
+    pC->pWriterGlobalFcts = M4OSA_NULL;
+    pC->pWriterDataFcts = M4OSA_NULL;
+    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+
+    pC->pCurrentVideoEncoderExternalAPI = M4OSA_NULL;
+    pC->pCurrentVideoEncoderUserData = M4OSA_NULL;
+
+    for (i = 0; i < M4WRITER_kType_NB; i++ )
+    {
+        pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+        pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+    }
+
+    for (i = 0; i < M4ENCODER_kVideo_NB; i++ )
+    {
+        pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+        pC->pVideoEncoderExternalAPITable[i] = M4OSA_NULL;
+        pC->pVideoEncoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    for (i = 0; i < M4ENCODER_kAudio_NB; i++ )
+    {
+        pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+        pC->pAudioEncoderFlag[i] = M4OSA_FALSE;
+        pC->pAudioEncoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    /* Initialisation that will allow to check if registering twice */
+    pC->m_pReader = M4OSA_NULL;
+    pC->m_pReaderDataIt   = M4OSA_NULL;
+    pC->m_uiNbRegisteredReaders  = 0;
+
+    for (i = 0; i < M4READER_kMediaType_NB; i++ )
+    {
+        pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+        pC->m_pReaderDataItTable[i]   = M4OSA_NULL;
+    }
+
+    pC->m_pVideoDecoder = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    pC->m_pCurrentVideoDecoderUserData = M4OSA_NULL;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+    pC->m_uiNbRegisteredVideoDec = 0;
+    for (i = 0; i < M4DECODER_kVideoType_NB; i++ )
+    {
+        pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+        pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+    }
+
+    pC->m_pAudioDecoder = M4OSA_NULL;
+    for (i = 0; i < M4AD_kType_NB; i++ )
+    {
+        pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+        pC->m_pAudioDecoderFlagTable[i] = M4OSA_FALSE;
+        pC->m_pAudioDecoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4MCS_registerWriter()
+ * @brief    This function will register a specific file format writer.
+ * @note    According to the Mediatype, this function will store in the internal context
+ *          the writer context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return      M4ERR_PARAMETER     pContext,pWtrGlobalInterface or
+ *                                  pWtrDataInterface is M4OSA_NULL
+ *                                  (debug only), or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerWriter(M4MCS_Context pContext, M4WRITER_OutputFileType MediaType,
+                                 M4WRITER_GlobalInterface* pWtrGlobalInterface,
+                                 M4WRITER_DataInterface* pWtrDataInterface)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+    /**
+     *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
+         "MCS: context is M4OSA_NULL in M4MCS_registerWriter");
+    M4OSA_DEBUG_IF2((pWtrGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
+         "pWtrGlobalInterface is M4OSA_NULL in M4MCS_registerWriter");
+    M4OSA_DEBUG_IF2((pWtrDataInterface == M4OSA_NULL),M4ERR_PARAMETER,
+         "pWtrDataInterface is M4OSA_NULL in M4MCS_registerWriter");
+
+    M4OSA_TRACE3_3("MCS: M4MCS_registerWriter called with pContext=0x%x,\
+     pWtrGlobalInterface=0x%x, pWtrDataInterface=0x%x", pC,pWtrGlobalInterface,
+     pWtrDataInterface);
+
+    if((MediaType == M4WRITER_kUnknown) || (MediaType >= M4WRITER_kType_NB))
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pC->WriterInterface[MediaType].pGlobalFcts != M4OSA_NULL)
+    {
+      /* a writer corresponding to this media type has already been registered !*/
+      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "This media type has already been registered");
+      return M4ERR_PARAMETER;
+    }
+
+    /*
+     * Save writer interface in context */
+    pC->WriterInterface[MediaType].pGlobalFcts = pWtrGlobalInterface;
+    pC->WriterInterface[MediaType].pDataFcts = pWtrDataInterface;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4MCS_registerEncoder()
+ * @brief    This function will register a specific video encoder.
+ * @note    According to the Mediatype, this function will store in the internal context
+ *           the encoder context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
+ *                             or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerVideoEncoder (
+                    M4MCS_Context pContext,
+                    M4ENCODER_Format MediaType,
+                    M4ENCODER_GlobalInterface *pEncGlobalInterface)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+    /**
+     *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
+         "MCS: context is M4OSA_NULL in M4MCS_registerVideoEncoder");
+    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
+         "pEncGlobalInterface is M4OSA_NULL in M4MCS_registerVideoEncoder");
+
+    M4OSA_TRACE3_2("MCS: M4MCS_registerVideoEncoder called with pContext=0x%x,\
+         pEncGlobalInterface=0x%x", pC, pEncGlobalInterface);
+
+    if (MediaType >= M4ENCODER_kVideo_NB)
+    {
+      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid video encoder type");
+      return M4ERR_PARAMETER;
+    }
+
+    if (pC->pVideoEncoderInterface[MediaType] != M4OSA_NULL)
+    {
+        /* can be legitimate, in cases where we have one version that can use external encoders
+        but which still has the built-in one to be able to work without an external encoder; in
+        this case the new encoder simply replaces the old one (i.e. we unregister it first). */
+        M4OSA_free((M4OSA_MemAddr32)pC->pVideoEncoderInterface[MediaType]);
+        pC->pVideoEncoderInterface[MediaType] = M4OSA_NULL;
+    }
+
+    /*
+     * Save encoder interface in context */
+    pC->pVideoEncoderInterface[MediaType] = pEncGlobalInterface;
+    /* The actual userData and external API will be set by the registration function in the case
+    of an external encoder (add it as a parameter to this function in the long run?) */
+    pC->pVideoEncoderUserDataTable[MediaType] = M4OSA_NULL;
+    pC->pVideoEncoderExternalAPITable[MediaType] = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4MCS_registerAudioEncoder()
+ * @brief    This function will register a specific audio encoder.
+ * @note    According to the Mediatype, this function will store in the internal context
+ *           the encoder context.
+ * @param    pContext:                (IN) Execution context.
+ * @param    mediaType:                (IN) The media type.
+ * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
+ * @return    M4NO_ERROR: there is no error
+ * @return   M4ERR_PARAMETER:   pContext or pEncGlobalInterface is
+ *                              M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerAudioEncoder(
+                    M4MCS_Context pContext,
+                    M4ENCODER_AudioFormat MediaType,
+                    M4ENCODER_AudioGlobalInterface *pEncGlobalInterface)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+    /**
+     *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL),M4ERR_PARAMETER,
+         "MCS: context is M4OSA_NULL in M4MCS_registerAudioEncoder");
+    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL),M4ERR_PARAMETER,
+         "pEncGlobalInterface is M4OSA_NULL in M4MCS_registerAudioEncoder");
+
+    M4OSA_TRACE3_2("MCS: M4MCS_registerAudioEncoder called with pContext=0x%x,\
+         pEncGlobalInterface=0x%x", pC, pEncGlobalInterface);
+
+    if (MediaType >= M4ENCODER_kAudio_NB)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid audio encoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if(M4OSA_NULL != pC->pAudioEncoderInterface[MediaType])
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderInterface[MediaType]);
+        pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
+
+        if(M4OSA_NULL != pC->pAudioEncoderUserDataTable[MediaType])
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderUserDataTable[MediaType]);
+            pC->pAudioEncoderUserDataTable[MediaType] = M4OSA_NULL;
+        }
+    }
+
+    /*
+     * Save encoder interface in context */
+    pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
+    pC->pAudioEncoderFlag[MediaType] = M4OSA_FALSE; /* internal encoder */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_registerReader()
+ * @brief    Register reader.
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerReader(
+                        M4MCS_Context pContext,
+                        M4READER_MediaType mediaType,
+                        M4READER_GlobalInterface *pRdrGlobalInterface,
+                        M4READER_DataInterface *pRdrDataInterface)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrGlobalInterface),
+         M4ERR_PARAMETER, "M4MCS_registerReader: invalid pointer on global interface");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrDataInterface),
+         M4ERR_PARAMETER, "M4MCS_registerReader: invalid pointer on data interface");
+
+    if (mediaType == M4READER_kMediaTypeUnknown || mediaType >= M4READER_kMediaType_NB)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pC->m_pReaderGlobalItTable[mediaType] != M4OSA_NULL)
+    {
+        /* a reader corresponding to this media type has already been registered !*/
+      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "This media type has already been registered");
+      return M4ERR_PARAMETER;
+    }
+
+    pC->m_pReaderGlobalItTable[mediaType] = pRdrGlobalInterface;
+    pC->m_pReaderDataItTable[mediaType]   = pRdrDataInterface;
+
+    pC->m_uiNbRegisteredReaders++;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_registerVideoDecoder()
+ * @brief    Register video decoder
+ * @param    pContext                (IN/OUT) MCS context.
+ * @param    decoderType            (IN) Decoder type
+ * @param    pDecoderInterface    (IN) Decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only), or the decoder
+ *                              type is invalid
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerVideoDecoder(
+                            M4MCS_Context pContext,
+                            M4DECODER_VideoType decoderType,
+                            M4DECODER_VideoInterface *pDecoderInterface)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+         "M4MCS_registerVideoDecoder: invalid pointer on decoder interface");
+
+    if (decoderType >= M4DECODER_kVideoType_NB)
+    {
+      M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid video decoder type");
+      return M4ERR_PARAMETER;
+    }
+
+    if (pC->m_pVideoDecoderItTable[decoderType] != M4OSA_NULL)
+    {
+#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
+        /* a decoder corresponding to this media type has already been registered !*/
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Decoder has already been registered");
+        return M4ERR_PARAMETER;
+#else /* external decoders are possible */
+        /* can be legitimate, in cases where we have one version that can use external decoders
+        but which still has the built-in one to be able to work without an external decoder; in
+        this case the new decoder simply replaces the old one (i.e. we unregister it first). */
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pVideoDecoderItTable[decoderType]);
+        pC->m_pVideoDecoderItTable[decoderType] = M4OSA_NULL;
+        /* oh, and don't forget the user data, too. */
+        if (pC->m_pVideoDecoderUserDataTable[decoderType] != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pVideoDecoderUserDataTable[decoderType]);
+            pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+        }
+#endif /* are external decoders possible? */
+    }
+
+    pC->m_pVideoDecoderItTable[decoderType] = pDecoderInterface;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+    /* The actual userData will be set by the registration function in the case
+    of an external decoder (add it as a parameter to this function in the long run?) */
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+    pC->m_uiNbRegisteredVideoDec++;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_registerAudioDecoder()
+ * @brief    Register audio decoder
+ * @note        This function is used internaly by the MCS to
+ *              register audio decoders,
+ * @param    context                (IN/OUT) MCS context.
+ * @param    decoderType            (IN) Audio decoder type
+ * @param    pDecoderInterface    (IN) Audio decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return   M4ERR_PARAMETER:    A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_registerAudioDecoder(
+                                    M4MCS_Context pContext,
+                                    M4AD_Type decoderType,
+                                    M4AD_Interface *pDecoderInterface)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+         "M4MCS_registerAudioDecoder: invalid pointer on decoder interface");
+
+    if (decoderType >= M4AD_kType_NB)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid audio decoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[decoderType]);
+        pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+
+        if(M4OSA_NULL != pC->m_pAudioDecoderUserDataTable[decoderType])
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderUserDataTable[decoderType]);
+            pC->m_pAudioDecoderUserDataTable[decoderType] = M4OSA_NULL;
+        }
+    }
+    pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
+    pC->m_pAudioDecoderFlagTable[decoderType] = M4OSA_FALSE; /* internal decoder */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_unRegisterAllWriters()
+ * @brief    Unregister writer
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllWriters(M4MCS_Context pContext)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    for (i = 0; i < M4WRITER_kType_NB; i++)
+    {
+        if (pC->WriterInterface[i].pGlobalFcts != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->WriterInterface[i].pGlobalFcts );
+            pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+        }
+        if (pC->WriterInterface[i].pDataFcts != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->WriterInterface[i].pDataFcts );
+            pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+        }
+    }
+
+    pC->pWriterGlobalFcts = M4OSA_NULL;
+    pC->pWriterDataFcts = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_unRegisterAllEncoders()
+ * @brief    Unregister the encoders
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllEncoders(M4MCS_Context pContext)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    for (i = 0; i < M4ENCODER_kVideo_NB; i++)
+    {
+        if (pC->pVideoEncoderInterface[i] != M4OSA_NULL)
+        {
+            M4OSA_free( (M4OSA_MemAddr32)pC->pVideoEncoderInterface[i] );
+            pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+        }
+    }
+
+    for (i = 0; i < M4ENCODER_kAudio_NB; i++)
+    {
+        if (pC->pAudioEncoderInterface[i] != M4OSA_NULL)
+        {
+            /*Don't free external audio encoders interfaces*/
+            if (M4OSA_FALSE == pC->pAudioEncoderFlag[i])
+            {
+                M4OSA_free( (M4OSA_MemAddr32)pC->pAudioEncoderInterface[i] );
+            }
+            pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_unRegisterAllReaders()
+ * @brief    Unregister reader
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllReaders(M4MCS_Context pContext)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    for (i = 0; i < M4READER_kMediaType_NB; i++)
+    {
+        if (pC->m_pReaderGlobalItTable[i] != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderGlobalItTable[i] );
+            pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+        }
+        if (pC->m_pReaderDataItTable[i] != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderDataItTable[i] );
+            pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->m_uiNbRegisteredReaders = 0;
+    pC->m_pReader = M4OSA_NULL;
+    pC->m_pReaderDataIt = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_unRegisterAllDecoders()
+ * @brief    Unregister the decoders
+ * @param    pContext            (IN/OUT) MCS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_unRegisterAllDecoders(M4MCS_Context pContext)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    for (i = 0; i < M4DECODER_kVideoType_NB; i++)
+    {
+        if (pC->m_pVideoDecoderItTable[i] != M4OSA_NULL)
+        {
+            M4OSA_free( (M4OSA_MemAddr32)pC->m_pVideoDecoderItTable[i] );
+            pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+        }
+    }
+
+    for (i = 0; i < M4AD_kType_NB; i++)
+    {
+        if (pC->m_pAudioDecoderItTable[i] != M4OSA_NULL)
+        {
+            /*Don't free external audio decoders interfaces*/
+            if (M4OSA_FALSE == pC->m_pAudioDecoderFlagTable[i])
+            {
+                M4OSA_free( (M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[i] );
+            }
+            pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->m_uiNbRegisteredVideoDec = 0;
+    pC->m_pVideoDecoder = M4OSA_NULL;
+
+    pC->m_pAudioDecoder = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_setCurrentWriter()
+ * @brief    Set current writer
+ * @param    pContext            (IN/OUT) MCS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentWriter( M4MCS_Context pContext,
+                                    M4VIDEOEDITING_FileType mediaType)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4WRITER_OutputFileType writerType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    switch (mediaType)
+    {
+        case M4VIDEOEDITING_kFileType_3GPP:
+        case M4VIDEOEDITING_kFileType_MP4:
+            writerType = M4WRITER_k3GPP;
+            break;
+        case M4VIDEOEDITING_kFileType_AMR:
+            writerType = M4WRITER_kAMR;
+            break;
+        case M4VIDEOEDITING_kFileType_MP3:
+            writerType = M4WRITER_kMP3;
+            break;
+        case M4VIDEOEDITING_kFileType_PCM:
+            pC->b_isRawWriter = M4OSA_TRUE;
+            writerType = M4WRITER_kPCM;
+            break;
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+                 "Writer type not supported");
+            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    pC->pWriterGlobalFcts = pC->WriterInterface[writerType].pGlobalFcts;
+    pC->pWriterDataFcts = pC->WriterInterface[writerType].pDataFcts;
+
+    if (pC->pWriterGlobalFcts == M4OSA_NULL || pC->pWriterDataFcts == M4OSA_NULL)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+             "Writer type not supported");
+        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    pC->pWriterDataFcts->pWriterContext = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_setCurrentVideoEncoder()
+ * @brief    Set a video encoder
+ * @param    pContext            (IN/OUT) MCS context.
+ * @param    MediaType           (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentVideoEncoder(
+                                M4MCS_Context pContext,
+                                M4VIDEOEDITING_VideoFormat mediaType)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4ENCODER_Format encoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    switch (mediaType)
+    {
+        case M4VIDEOEDITING_kH263:
+            encoderType = M4ENCODER_kH263;
+            break;
+        case M4VIDEOEDITING_kMPEG4:
+        case M4VIDEOEDITING_kMPEG4_EMP:
+            encoderType = M4ENCODER_kMPEG4;
+            break;
+        case M4VIDEOEDITING_kH264:
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+            encoderType = M4ENCODER_kH264;
+        break;
+#endif
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+                 "Video encoder type not supported");
+            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    pC->pVideoEncoderGlobalFcts = pC->pVideoEncoderInterface[encoderType];
+    pC->pCurrentVideoEncoderExternalAPI = pC->pVideoEncoderExternalAPITable[encoderType];
+    pC->pCurrentVideoEncoderUserData = pC->pVideoEncoderUserDataTable[encoderType];
+
+    if (pC->pVideoEncoderGlobalFcts == M4OSA_NULL)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+             "Video encoder type not supported");
+        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_setCurrentAudioEncoder()
+ * @brief    Set an audio encoder
+ * @param    context            (IN/OUT) MCS context.
+ * @param    MediaType        (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentAudioEncoder(
+                                M4MCS_Context pContext,
+                                M4VIDEOEDITING_AudioFormat mediaType)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4ENCODER_AudioFormat encoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    switch (mediaType)
+    {
+        case M4VIDEOEDITING_kAMR_NB:
+            encoderType = M4ENCODER_kAMRNB;
+            break;
+        case M4VIDEOEDITING_kAAC:
+            encoderType = M4ENCODER_kAAC;
+            break;
+        case M4VIDEOEDITING_kMP3:
+            encoderType = M4ENCODER_kMP3;
+            break;
+//EVRC
+//        case M4VIDEOEDITING_kEVRC:
+//            encoderType = M4ENCODER_kEVRC;
+//            break;
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+                 "Audio encoder type not supported");
+            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    pC->pAudioEncoderGlobalFcts = pC->pAudioEncoderInterface[encoderType];
+    pC->pCurrentAudioEncoderUserData = pC->pAudioEncoderUserDataTable[encoderType];
+
+    if (pC->pAudioEncoderGlobalFcts == M4OSA_NULL)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+             "Audio encoder type not supported");
+        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_setCurrentReader()
+ * @brief    Set current reader
+ * @param    pContext            (IN/OUT) MCS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentReader( M4MCS_Context pContext,
+                                    M4VIDEOEDITING_FileType mediaType)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4READER_MediaType readerType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    switch (mediaType)
+    {
+        case M4VIDEOEDITING_kFileType_3GPP:
+        case M4VIDEOEDITING_kFileType_MP4:
+            readerType = M4READER_kMediaType3GPP;
+            break;
+        case M4VIDEOEDITING_kFileType_AMR:
+            readerType = M4READER_kMediaTypeAMR;
+            break;
+        case M4VIDEOEDITING_kFileType_MP3:
+            readerType = M4READER_kMediaTypeMP3;
+            break;
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+                 "Reader type not supported");
+            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    pC->m_pReader       = pC->m_pReaderGlobalItTable[readerType];
+    pC->m_pReaderDataIt = pC->m_pReaderDataItTable[readerType];
+
+    if (pC->m_pReader == M4OSA_NULL || pC->m_pReaderDataIt == M4OSA_NULL)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+             "Reader type not supported");
+        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_setCurrentVideoDecoder()
+ * @brief    Set a video decoder
+ * @param    pContext            (IN/OUT) MCS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_MCS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentVideoDecoder(   M4MCS_Context pContext,
+                                            M4_StreamType mediaType)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4DECODER_VideoType decoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    switch (mediaType)
+    {
+        case M4DA_StreamTypeVideoMpeg4:
+        case M4DA_StreamTypeVideoH263:
+            decoderType = M4DECODER_kVideoTypeMPEG4;
+            break;
+        case M4DA_StreamTypeVideoMpeg4Avc:
+            decoderType = M4DECODER_kVideoTypeAVC;
+            break;
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+                 "Video decoder type not supported");
+            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    pC->m_pVideoDecoder = pC->m_pVideoDecoderItTable[decoderType];
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    pC->m_pCurrentVideoDecoderUserData =
+            pC->m_pVideoDecoderUserDataTable[decoderType];
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    if (pC->m_pVideoDecoder == M4OSA_NULL)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+             "Video decoder type not supported");
+        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4MCS_setCurrentAudioDecoder()
+ * @brief    Set an audio decoder
+ * @param    context            (IN/OUT) MCS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR   M4MCS_setCurrentAudioDecoder(   M4MCS_Context pContext,
+                                            M4_StreamType mediaType)
+{
+    M4MCS_InternalContext* pC = (M4MCS_InternalContext*)pContext;
+    M4AD_Type decoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER, "invalid context pointer");
+
+    switch (mediaType)
+    {
+        case M4DA_StreamTypeAudioAmrNarrowBand:
+            decoderType = M4AD_kTypeAMRNB;
+            break;
+        case M4DA_StreamTypeAudioAac:
+        case M4DA_StreamTypeAudioAacADTS:
+        case M4DA_StreamTypeAudioAacADIF:
+            decoderType = M4AD_kTypeAAC;
+            break;
+        case M4DA_StreamTypeAudioMp3:
+            decoderType = M4AD_kTypeMP3;
+            break;
+//EVRC
+//        case M4DA_StreamTypeAudioEvrc:
+//            decoderType = M4AD_kTypeEVRC;
+//            break;
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+                 "Audio decoder type not supported");
+            return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    pC->m_pAudioDecoder = pC->m_pAudioDecoderItTable[decoderType];
+    pC->m_pCurrentAudioDecoderUserData =
+                    pC->m_pAudioDecoderUserDataTable[decoderType];
+
+    if (pC->m_pAudioDecoder == M4OSA_NULL)
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED,
+             "Audio decoder type not supported");
+        return M4MCS_WAR_MEDIATYPE_NOT_SUPPORTED;
+    }
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c b/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c
new file mode 100755
index 0000000..d6a9e09
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_MediaAndCodecSubscription.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4MCS_MediaAndCodecSubscription.c
+ * @brief  Media readers and codecs subscription
+ * @note   This file implements the subscription of supported media
+ *         readers and decoders for the MCS. Potential support can
+ *         be activated or de-activated
+ *         using compilation flags set in the projects settings.
+ ************************************************************************
+ */
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+#include "NXPSW_CompilerSwitches.h"
+
+
+#include "M4OSA_Debug.h"
+#include "M4MCS_InternalTypes.h"                /**< Include for MCS specific types */
+#include "M4MCS_InternalFunctions.h"            /**< Registration module */
+
+/* _______________________ */
+/*|                       |*/
+/*|  reader subscription  |*/
+/*|_______________________|*/
+
+/* Reader registration : at least one reader must be defined */
+#ifndef M4VSS_SUPPORT_READER_3GP
+#ifndef M4VSS_SUPPORT_READER_AMR
+#ifndef M4VSS_SUPPORT_READER_MP3
+#error "no reader registered"
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+#endif /* M4VSS_SUPPORT_READER_AMR */
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+/* Include files for each reader to subscribe */
+#ifdef M4VSS_SUPPORT_READER_3GP
+#include "VideoEditor3gpReader.h"
+#endif
+
+#ifdef M4VSS_SUPPORT_READER_AMR
+#include "M4READER_Amr.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_MP3
+#include "VideoEditorMp3Reader.h"
+#endif
+
+/* ______________________________ */
+/*|                              |*/
+/*|  video decoder subscription  |*/
+/*|______________________________|*/
+
+#include "VideoEditorAudioDecoder.h"
+#include "VideoEditorVideoDecoder.h"
+
+
+
+/* _______________________ */
+/*|                       |*/
+/*|  writer subscription  |*/
+/*|_______________________|*/
+
+/* Writer registration : at least one writer must be defined */
+#ifndef M4VSS_SUPPORT_WRITER_AMR
+#ifndef M4VSS_SUPPORT_WRITER_3GPP
+#ifndef M4VSS_SUPPORT_WRITER_PCM
+#ifndef M4VSS_SUPPORT_WRITER_MP3
+#error "no writer registered"
+#endif /* M4VSS_SUPPORT_WRITER_MP3 */
+#endif /* M4VSS_SUPPORT_WRITER_PCM */
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+#endif /* M4VSS_SUPPORT_WRITER_AMR */
+
+/* Include files for each writer to subscribe */
+#ifdef M4VSS_SUPPORT_WRITER_AMR
+extern M4OSA_ERR M4WRITER_AMR_getInterfaces( M4WRITER_OutputFileType* Type,
+                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                             M4WRITER_DataInterface** SrcDataInterface);
+#endif
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces( M4WRITER_OutputFileType* Type,
+                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                             M4WRITER_DataInterface** SrcDataInterface);
+#endif
+#ifdef M4VSS_SUPPORT_WRITER_PCM
+extern M4OSA_ERR M4WRITER_PCM_getInterfaces( M4WRITER_OutputFileType* Type,
+                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                             M4WRITER_DataInterface** SrcDataInterface);
+#endif
+#ifdef M4VSS_SUPPORT_WRITER_MP3
+extern M4OSA_ERR M4WRITER_MP3_getInterfaces( M4WRITER_OutputFileType* Type,
+                                             M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                             M4WRITER_DataInterface** SrcDataInterface);
+#endif
+
+/* ______________________________ */
+/*|                              |*/
+/*|  video encoder subscription  |*/
+/*|______________________________|*/
+#include "VideoEditorAudioEncoder.h"
+#include "VideoEditorVideoEncoder.h"
+
+
+/* Include files for each video encoder to subscribe */
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+//#include "M4MP4E_interface.h"
+#endif
+
+
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) \
+    if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_SubscribeMediaAndCodec(M4MCS_Context pContext);
+ * @brief    This function registers the reader, decoders, writers and encoders
+ *          in the MCS.
+ * @note
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext is NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_subscribeMediaAndCodec(M4MCS_Context pContext)
+{
+    M4OSA_ERR                   err = M4NO_ERROR;
+
+    M4READER_MediaType          readerMediaType;
+    M4READER_GlobalInterface*   pReaderGlobalInterface;
+    M4READER_DataInterface*     pReaderDataInterface;
+
+    M4WRITER_OutputFileType     writerMediaType;
+    M4WRITER_GlobalInterface*   pWriterGlobalInterface;
+    M4WRITER_DataInterface*     pWriterDataInterface;
+
+    M4AD_Type                   audioDecoderType;
+    M4ENCODER_AudioFormat       audioCodecType;
+    M4ENCODER_AudioGlobalInterface* pAudioCodecInterface;
+    M4AD_Interface*             pAudioDecoderInterface;
+
+    M4DECODER_VideoType         videoDecoderType;
+    M4ENCODER_Format            videoCodecType;
+    M4ENCODER_GlobalInterface*  pVideoCodecInterface;
+    M4DECODER_VideoInterface*   pVideoDecoderInterface;
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
+
+    /* _______________________ */
+    /*|                       |*/
+    /*|  reader subscription  |*/
+    /*|_______________________|*/
+
+    /* --- 3GP --- */
+
+#ifdef M4VSS_SUPPORT_READER_3GP
+    err = VideoEditor3gpReader_getInterface(&readerMediaType,
+                                            &pReaderGlobalInterface,
+                                            &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_3GP interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerReader( pContext, readerMediaType,
+                                pReaderGlobalInterface,
+                                pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register 3GP reader");
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+    /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_READER_AMR
+    err = M4READER_AMR_getInterfaces(   &readerMediaType,
+                                        &pReaderGlobalInterface,
+                                        &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerReader( pContext, readerMediaType,
+                                pReaderGlobalInterface,
+                                pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register AMR reader");
+#endif /* M4VSS_SUPPORT_READER_AMR */
+
+    /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_READER_MP3
+
+    err = VideoEditorMp3Reader_getInterface(&readerMediaType,
+                                            &pReaderGlobalInterface,
+                                            &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_MP3 interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerReader( pContext, readerMediaType,
+                                pReaderGlobalInterface,
+                                pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register MP3 reader");
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  video decoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- MPEG4 & H263 --- */
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+
+    err = VideoEditorVideoDecoder_getInterface_MPEG4( &videoDecoderType,
+                                (M4OSA_Void *)&pVideoDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4DECODER_MPEG4 interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerVideoDecoder(   pContext, videoDecoderType,
+                                        pVideoDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register MPEG4 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+
+    err = VideoEditorVideoDecoder_getInterface_H264( &videoDecoderType,
+                                (M4OSA_Void *)&pVideoDecoderInterface);
+
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4DECODER_AVC interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerVideoDecoder(   pContext, videoDecoderType,
+                                        pVideoDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register AVC decoder");
+#endif /* M4VSS_SUPPORT_VIDEO_AVC */
+
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  audio decoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- AMRNB --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
+    err = VideoEditorAudioDecoder_getInterface_AMRNB(&audioDecoderType,
+                                                &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AD PHILIPS AMRNB interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerAudioDecoder(   pContext, audioDecoderType,
+                                        pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register PHILIPS AMRNB decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AMRNB */
+
+    /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AAC
+
+    err = VideoEditorAudioDecoder_getInterface_AAC(&audioDecoderType,
+                                            &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AD PHILIPS AAC interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerAudioDecoder(   pContext, audioDecoderType,
+                                        pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register PHILIPS AAC decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AAC */
+
+    /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_MP3
+
+    err = VideoEditorAudioDecoder_getInterface_MP3(&audioDecoderType,
+                                            &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AD PHILIPS MP3 interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerAudioDecoder(   pContext, audioDecoderType,
+                                        pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register PHILIPS MP3 decoder");
+#endif  /* M4VSS_SUPPORT_AUDEC_MP3 */
+
+    /* --- EVRC --- */
+
+
+    /* _______________________ */
+    /*|                       |*/
+    /*|  writer subscription  |*/
+    /*|_______________________|*/
+
+    /* --- PCM --- */
+
+
+    /* --- 3GPP --- */
+
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+    /* retrieves the 3GPP writer media type and pointer to functions*/
+    err = M4WRITER_3GP_getInterfaces(   &writerMediaType,
+                                        &pWriterGlobalInterface,
+                                        &pWriterDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4WRITER_3GP interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerWriter( pContext, writerMediaType,
+                                pWriterGlobalInterface,
+                                pWriterDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register 3GPP writer");
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  video encoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- MPEG4 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+       /* retrieves the MPEG4 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_MPEG4(&videoCodecType,
+                                                &pVideoCodecInterface,
+                                                M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4MP4E_MPEG4 interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerVideoEncoder(   pContext, videoCodecType,
+                                        pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register video MPEG4 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+    /* --- H263 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+    /* retrieves the H263 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_H263(&videoCodecType,
+                                                &pVideoCodecInterface,
+                                                M4ENCODER_OPEN_ADVANCED);
+
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4MP4E_H263 interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerVideoEncoder( pContext, videoCodecType,
+                                      pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register video H263 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+    /* retrieves the H263 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_H264(&videoCodecType,
+                                                &pVideoCodecInterface,
+                                                M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4H264E interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register video H264 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AVC */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  audio encoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AMR
+       /* retrieves the AMR encoder type and pointer to functions*/
+    err = VideoEditorAudioEncoder_getInterface_AMRNB(&audioCodecType,
+                                                &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AMR interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerAudioEncoder(   pContext, audioCodecType,
+                                        pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register audio AMR encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AMR */
+
+    /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AAC
+    /* retrieves the AAC encoder type and pointer to functions*/
+    err = VideoEditorAudioEncoder_getInterface_AAC(&audioCodecType,
+                                                &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AAC interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerAudioEncoder(   pContext, audioCodecType,
+                                        pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register audio AAC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AAC */
+
+
+
+    /* --- MP3 --- */
+#ifdef M4VSS_SUPPORT_ENCODER_MP3
+    /* retrieves the MP3 encoder type and pointer to functions*/
+    err = VideoEditorAudioEncoder_getInterface_MP3(&audioCodecType,
+                                                &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4MP3E interface allocation error");
+        return err;
+    }
+    err = M4MCS_registerAudioEncoder( pContext, audioCodecType,
+                                      pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+         "M4MCS_subscribeMediaAndCodec: can't register audio MP3 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MP3 */
+
+    return err;
+}
+
diff --git a/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c b/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c
new file mode 100755
index 0000000..f9dffdb
--- /dev/null
+++ b/libvideoeditor/vss/mcs/src/M4MCS_VideoPreProcessing.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4MCS_VideoPreProcessing.c
+ * @brief  MCS implementation
+ * @note   This file implements the encoder callback of the MCS.
+ *************************************************************************
+ **/
+
+/**
+ ********************************************************************
+ * Includes
+ ********************************************************************
+ */
+/* OSAL headers */
+#include "M4OSA_Memory.h"       /* OSAL memory management */
+#include "M4OSA_Debug.h"        /* OSAL debug management */
+
+
+/* Core headers */
+#include "M4MCS_InternalTypes.h"
+#include "M4MCS_ErrorCodes.h"
+
+/**
+ * Video preprocessing interface definition */
+#include "M4VPP_API.h"
+
+/**
+ * Video filters */
+#include "M4VIFI_FiltersAPI.h" /**< for M4VIFI_ResizeBilinearYUV420toYUV420() */
+
+#ifndef M4MCS_AUDIOONLY
+#include "M4AIR_API.h"
+#endif /*M4MCS_AUDIOONLY*/
+/**/
+
+
+
+
+/*
+ ******************************************************************************
+ * M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ *                               M4VIFI_ImagePlane* pPlaneOut)
+ * @brief    Do the video rendering and the resize (if needed)
+ * @note    It is called by the video encoder
+ * @param    pContext    (IN) VPP context, which actually is the MCS internal context in our case
+ * @param    pPlaneIn    (IN) Contains the image
+ * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the output
+ *                                  YUV420 image
+ * @return    M4NO_ERROR:    No error
+ * @return    M4MCS_ERR_VIDEO_DECODE_ERROR: the video decoding failed
+ * @return    M4MCS_ERR_RESIZE_ERROR: the resizing failed
+ * @return    Any error returned by an underlaying module
+ ******************************************************************************
+ */
+M4OSA_ERR M4MCS_intApplyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+                             M4VIFI_ImagePlane* pPlaneOut)
+{
+    M4OSA_ERR        err = M4NO_ERROR;
+
+/* This part is used only if video codecs are compiled*/
+#ifndef M4MCS_AUDIOONLY
+    /**
+     * The VPP context is actually the MCS context! */
+    M4MCS_InternalContext *pC = (M4MCS_InternalContext*)(pContext);
+
+    M4_MediaTime mtCts = pC->dViDecCurrentCts;
+
+    /**
+     * When Closing after an error occured, it may happen that pReaderVideoAU->m_dataAddress has
+     * not been allocated yet. When closing in pause mode, the decoder can be null.
+     * We don't want an error to be returned because it would interrupt the close process and
+     * thus some resources would be locked. So we return M4NO_ERROR.
+     */
+    /* Initialize to black plane the output plane if the media rendering
+     is black borders */
+    if(pC->MediaRendering == M4MCS_kBlackBorders)
+    {
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[0].pac_data,
+            (pPlaneOut[0].u_height*pPlaneOut[0].u_stride),
+            Y_PLANE_BORDER_VALUE);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[1].pac_data,
+            (pPlaneOut[1].u_height*pPlaneOut[1].u_stride),
+            U_PLANE_BORDER_VALUE);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[2].pac_data,
+            (pPlaneOut[2].u_height*pPlaneOut[2].u_stride),
+            V_PLANE_BORDER_VALUE);
+    }
+    else if ((M4OSA_NULL == pC->ReaderVideoAU.m_dataAddress) ||
+             (M4OSA_NULL == pC->pViDecCtxt))
+    {
+        /**
+         * We must fill the input of the encoder with a dummy image, because
+         * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[0].pac_data,
+             pPlaneOut[0].u_stride * pPlaneOut[0].u_height, 0);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[1].pac_data,
+             pPlaneOut[1].u_stride * pPlaneOut[1].u_height, 0);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[2].pac_data,
+             pPlaneOut[2].u_stride * pPlaneOut[2].u_height, 0);
+
+        M4OSA_TRACE1_0("M4MCS_intApplyVPP: pReaderVideoAU->m_dataAddress is M4OSA_NULL,\
+                       returning M4NO_ERROR");
+        return M4NO_ERROR;
+    }
+
+    if(pC->isRenderDup == M4OSA_FALSE)
+    {
+        /**
+         *    m_pPreResizeFrame different than M4OSA_NULL means that resizing is needed */
+        if (M4OSA_NULL != pC->pPreResizeFrame)
+        {
+            /** FB 2008/10/20:
+            Used for cropping and black borders*/
+            M4AIR_Params Params;
+
+            M4OSA_TRACE3_0("M4MCS_intApplyVPP: Need to resize");
+            err = pC->m_pVideoDecoder->m_pFctRender(pC->pViDecCtxt, &mtCts,
+                pC->pPreResizeFrame, M4OSA_TRUE);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4MCS_intApplyVPP: m_pFctRender returns 0x%x!", err);
+                return err;
+            }
+
+            if(pC->MediaRendering == M4MCS_kResizing)
+            {
+                /*
+                 * Call the resize filter. From the intermediate frame to the encoder
+                 * image plane
+                 */
+                err = M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL,
+                    pC->pPreResizeFrame, pPlaneOut);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4MCS_intApplyVPP: M4ViFilResizeBilinearYUV420toYUV420\
+                                   returns 0x%x!", err);
+                    return err;
+                }
+            }
+            else
+            {
+                M4VIFI_ImagePlane pImagePlanesTemp[3];
+                M4VIFI_ImagePlane* pPlaneTemp;
+                M4OSA_UInt8* pOutPlaneY = pPlaneOut[0].pac_data +
+                                          pPlaneOut[0].u_topleft;
+                M4OSA_UInt8* pOutPlaneU = pPlaneOut[1].pac_data +
+                                          pPlaneOut[1].u_topleft;
+                M4OSA_UInt8* pOutPlaneV = pPlaneOut[2].pac_data +
+                                          pPlaneOut[2].u_topleft;
+                M4OSA_UInt8* pInPlaneY = M4OSA_NULL;
+                M4OSA_UInt8* pInPlaneU = M4OSA_NULL;
+                M4OSA_UInt8* pInPlaneV = M4OSA_NULL;
+                M4OSA_UInt32 i = 0;
+
+                /*FB 2008/10/20: to keep media aspect ratio*/
+                /*Initialize AIR Params*/
+                Params.m_inputCoord.m_x = 0;
+                Params.m_inputCoord.m_y = 0;
+                Params.m_inputSize.m_height = pC->pPreResizeFrame->u_height;
+                Params.m_inputSize.m_width = pC->pPreResizeFrame->u_width;
+                Params.m_outputSize.m_width = pPlaneOut->u_width;
+                Params.m_outputSize.m_height = pPlaneOut->u_height;
+                Params.m_bOutputStripe = M4OSA_FALSE;
+                Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+
+                /**
+                Media rendering: Black borders*/
+                if(pC->MediaRendering == M4MCS_kBlackBorders)
+                {
+                    pImagePlanesTemp[0].u_width = pPlaneOut[0].u_width;
+                    pImagePlanesTemp[0].u_height = pPlaneOut[0].u_height;
+                    pImagePlanesTemp[0].u_stride = pPlaneOut[0].u_width;
+                    pImagePlanesTemp[0].u_topleft = 0;
+
+                    pImagePlanesTemp[1].u_width = pPlaneOut[1].u_width;
+                    pImagePlanesTemp[1].u_height = pPlaneOut[1].u_height;
+                    pImagePlanesTemp[1].u_stride = pPlaneOut[1].u_width;
+                    pImagePlanesTemp[1].u_topleft = 0;
+
+                    pImagePlanesTemp[2].u_width = pPlaneOut[2].u_width;
+                    pImagePlanesTemp[2].u_height = pPlaneOut[2].u_height;
+                    pImagePlanesTemp[2].u_stride = pPlaneOut[2].u_width;
+                    pImagePlanesTemp[2].u_topleft = 0;
+
+                    /* Allocates plan in local image plane structure */
+                    pImagePlanesTemp[0].pac_data =
+                        (M4OSA_UInt8*)M4OSA_malloc(pImagePlanesTemp[0]\
+                        .u_width * pImagePlanesTemp[0].u_height, M4VS,
+                        (M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferY") ;
+                    if(pImagePlanesTemp[0].pac_data == M4OSA_NULL)
+                    {
+                        M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
+                        return M4ERR_ALLOC;
+                    }
+                    pImagePlanesTemp[1].pac_data =
+                        (M4OSA_UInt8*)M4OSA_malloc(pImagePlanesTemp[1]\
+                        .u_width * pImagePlanesTemp[1].u_height, M4VS,
+                        (M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferU") ;
+                    if(pImagePlanesTemp[1].pac_data == M4OSA_NULL)
+                    {
+                        M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
+                        return M4ERR_ALLOC;
+                    }
+                    pImagePlanesTemp[2].pac_data =
+                        (M4OSA_UInt8*)M4OSA_malloc(pImagePlanesTemp[2]\
+                        .u_width * pImagePlanesTemp[2].u_height,
+                        M4VS,(M4OSA_Char *)"M4xVSS_PictureCallbackFct: temporary plane bufferV") ;
+                    if(pImagePlanesTemp[2].pac_data == M4OSA_NULL)
+                    {
+                        M4OSA_TRACE1_0("Error alloc in M4MCS_intApplyVPP");
+                        return M4ERR_ALLOC;
+                    }
+
+                    pInPlaneY = pImagePlanesTemp[0].pac_data ;
+                    pInPlaneU = pImagePlanesTemp[1].pac_data ;
+                    pInPlaneV = pImagePlanesTemp[2].pac_data ;
+
+                    M4OSA_memset((M4OSA_MemAddr8)pImagePlanesTemp[0].pac_data,
+                        (pImagePlanesTemp[0].u_height*pImagePlanesTemp[0].u_stride),
+                        Y_PLANE_BORDER_VALUE);
+                    M4OSA_memset((M4OSA_MemAddr8)pImagePlanesTemp[1].pac_data,
+                        (pImagePlanesTemp[1].u_height*pImagePlanesTemp[1].u_stride),
+                        U_PLANE_BORDER_VALUE);
+                    M4OSA_memset((M4OSA_MemAddr8)pImagePlanesTemp[2].pac_data,
+                        (pImagePlanesTemp[2].u_height*pImagePlanesTemp[2].u_stride),
+                        V_PLANE_BORDER_VALUE);
+
+                    if((M4OSA_UInt32)((pC->pPreResizeFrame->u_height * pPlaneOut->u_width)\
+                         /pC->pPreResizeFrame->u_width) <= pPlaneOut->u_height)
+                         //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+                    {
+                        /*it is height so black borders will be on the top and on the bottom side*/
+                        Params.m_outputSize.m_width = pPlaneOut->u_width;
+                        Params.m_outputSize.m_height =
+                             (M4OSA_UInt32)
+                             ((pC->pPreResizeFrame->u_height * pPlaneOut->u_width)\
+                             /pC->pPreResizeFrame->u_width);
+                        /*number of lines at the top*/
+                        pImagePlanesTemp[0].u_topleft =
+                             (M4MCS_ABS((M4OSA_Int32)
+                             (pImagePlanesTemp[0].u_height\
+                             -Params.m_outputSize.m_height)>>1)) *
+                             pImagePlanesTemp[0].u_stride;
+                        pImagePlanesTemp[0].u_height = Params.m_outputSize.m_height;
+                        pImagePlanesTemp[1].u_topleft =
+                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_height\
+                             -(Params.m_outputSize.m_height>>1)))>>1)\
+                             * pImagePlanesTemp[1].u_stride;
+                        pImagePlanesTemp[1].u_height = Params.m_outputSize.m_height>>1;
+                        pImagePlanesTemp[2].u_topleft =
+                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_height\
+                             -(Params.m_outputSize.m_height>>1)))>>1)\
+                             * pImagePlanesTemp[2].u_stride;
+                        pImagePlanesTemp[2].u_height = Params.m_outputSize.m_height>>1;
+                    }
+                    else
+                    {
+                        /*it is width so black borders will be on the left and right side*/
+                        Params.m_outputSize.m_height = pPlaneOut->u_height;
+                        Params.m_outputSize.m_width =
+                             (M4OSA_UInt32)((pC->pPreResizeFrame->u_width
+                             * pPlaneOut->u_height)\
+                             /pC->pPreResizeFrame->u_height);
+
+                        pImagePlanesTemp[0].u_topleft =
+                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[0].u_width-\
+                                Params.m_outputSize.m_width)>>1));
+                        pImagePlanesTemp[0].u_width = Params.m_outputSize.m_width;
+                        pImagePlanesTemp[1].u_topleft =
+                             (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[1].u_width-\
+                                (Params.m_outputSize.m_width>>1)))>>1);
+                        pImagePlanesTemp[1].u_width = Params.m_outputSize.m_width>>1;
+                        pImagePlanesTemp[2].u_topleft =
+                            (M4MCS_ABS((M4OSA_Int32)(pImagePlanesTemp[2].u_width-\
+                                (Params.m_outputSize.m_width>>1)))>>1);
+                        pImagePlanesTemp[2].u_width = Params.m_outputSize.m_width>>1;
+                    }
+
+                    /*Width and height have to be even*/
+                    Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+                    Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+                    Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+                    Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+                    pImagePlanesTemp[0].u_width = (pImagePlanesTemp[0].u_width>>1)<<1;
+                    pImagePlanesTemp[1].u_width = (pImagePlanesTemp[1].u_width>>1)<<1;
+                    pImagePlanesTemp[2].u_width = (pImagePlanesTemp[2].u_width>>1)<<1;
+                    pImagePlanesTemp[0].u_height = (pImagePlanesTemp[0].u_height>>1)<<1;
+                    pImagePlanesTemp[1].u_height = (pImagePlanesTemp[1].u_height>>1)<<1;
+                    pImagePlanesTemp[2].u_height = (pImagePlanesTemp[2].u_height>>1)<<1;
+
+                    /*Check that values are coherent*/
+                    if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
+                    {
+                        Params.m_inputSize.m_width = Params.m_outputSize.m_width;
+                    }
+                    else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
+                    {
+                        Params.m_inputSize.m_height = Params.m_outputSize.m_height;
+                    }
+                    pPlaneTemp = pImagePlanesTemp;
+                }
+
+                /**
+                Media rendering: Cropping*/
+                if(pC->MediaRendering == M4MCS_kCropping)
+                {
+                    Params.m_outputSize.m_height = pPlaneOut->u_height;
+                    Params.m_outputSize.m_width = pPlaneOut->u_width;
+                    if((Params.m_outputSize.m_height * Params.m_inputSize.m_width)\
+                         /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
+                    {
+                        /*height will be cropped*/
+                        Params.m_inputSize.m_height =
+                             (M4OSA_UInt32)((Params.m_outputSize.m_height \
+                             * Params.m_inputSize.m_width) /
+                             Params.m_outputSize.m_width);
+                        Params.m_inputSize.m_height =
+                            (Params.m_inputSize.m_height>>1)<<1;
+                        Params.m_inputCoord.m_y =
+                            (M4OSA_Int32)((M4OSA_Int32)
+                            ((pC->pPreResizeFrame->u_height\
+                            - Params.m_inputSize.m_height))>>1);
+                    }
+                    else
+                    {
+                        /*width will be cropped*/
+                        Params.m_inputSize.m_width =
+                             (M4OSA_UInt32)((Params.m_outputSize.m_width\
+                                 * Params.m_inputSize.m_height) /
+                                 Params.m_outputSize.m_height);
+                        Params.m_inputSize.m_width =
+                             (Params.m_inputSize.m_width>>1)<<1;
+                        Params.m_inputCoord.m_x =
+                            (M4OSA_Int32)((M4OSA_Int32)
+                            ((pC->pPreResizeFrame->u_width\
+                            - Params.m_inputSize.m_width))>>1);
+                    }
+                    pPlaneTemp = pPlaneOut;
+                }
+                /**
+                 * Call AIR functions */
+                if(M4OSA_NULL == pC->m_air_context)
+                {
+                    err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
+                    if(err != M4NO_ERROR)
+                    {
+                        M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+                         Error when initializing AIR: 0x%x", err);
+                        return err;
+                    }
+                }
+
+                err = M4AIR_configure(pC->m_air_context, &Params);
+                if(err != M4NO_ERROR)
+                {
+                    M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+                     Error when configuring AIR: 0x%x", err);
+                    M4AIR_cleanUp(pC->m_air_context);
+                    return err;
+                }
+
+                err = M4AIR_get(pC->m_air_context, pC->pPreResizeFrame,
+                                pPlaneTemp);
+                if(err != M4NO_ERROR)
+                {
+                    M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+                     Error when getting AIR plane: 0x%x", err);
+                    M4AIR_cleanUp(pC->m_air_context);
+                    return err;
+                }
+
+                if(pC->MediaRendering == M4MCS_kBlackBorders)
+                {
+                    for(i=0; i<pPlaneOut[0].u_height; i++)
+                    {
+                        M4OSA_memcpy(   (M4OSA_MemAddr8)pOutPlaneY,
+                                        (M4OSA_MemAddr8)pInPlaneY,
+                                        pPlaneOut[0].u_width);
+                        pInPlaneY += pPlaneOut[0].u_width;
+                        pOutPlaneY += pPlaneOut[0].u_stride;
+                    }
+                    for(i=0; i<pPlaneOut[1].u_height; i++)
+                    {
+                        M4OSA_memcpy(   (M4OSA_MemAddr8)pOutPlaneU,
+                                        (M4OSA_MemAddr8)pInPlaneU,
+                                        pPlaneOut[1].u_width);
+                        pInPlaneU += pPlaneOut[1].u_width;
+                        pOutPlaneU += pPlaneOut[1].u_stride;
+                    }
+                    for(i=0; i<pPlaneOut[2].u_height; i++)
+                    {
+                        M4OSA_memcpy(   (M4OSA_MemAddr8)pOutPlaneV,
+                                        (M4OSA_MemAddr8)pInPlaneV,
+                                        pPlaneOut[2].u_width);
+                        pInPlaneV += pPlaneOut[2].u_width;
+                        pOutPlaneV += pPlaneOut[2].u_stride;
+                    }
+
+                    for(i=0; i<3; i++)
+                    {
+                        if(pImagePlanesTemp[i].pac_data != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)
+                                        pImagePlanesTemp[i].pac_data);
+                            pImagePlanesTemp[i].pac_data = M4OSA_NULL;
+                        }
+                    }
+                }
+            }
+        }
+        else
+        {
+            M4OSA_TRACE3_0("M4MCS_intApplyVPP: Don't need resizing");
+            err = pC->m_pVideoDecoder->m_pFctRender(pC->pViDecCtxt,
+                                                    &mtCts, pPlaneOut,
+                                                    M4OSA_TRUE);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4MCS_intApplyVPP: m_pFctRender returns 0x%x!", err);
+                return err;
+            }
+        }
+        pC->lastDecodedPlane = pPlaneOut;
+    }
+    else
+    {
+        /* Copy last decoded plane to output plane */
+        M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[0].pac_data,
+                        (M4OSA_MemAddr8)pC->lastDecodedPlane[0].pac_data,
+                         (pPlaneOut[0].u_height * pPlaneOut[0].u_width));
+        M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[1].pac_data,
+                        (M4OSA_MemAddr8)pC->lastDecodedPlane[1].pac_data,
+                          (pPlaneOut[1].u_height * pPlaneOut[1].u_width));
+        M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[2].pac_data,
+                        (M4OSA_MemAddr8)pC->lastDecodedPlane[2].pac_data,
+                          (pPlaneOut[2].u_height * pPlaneOut[2].u_width));
+        pC->lastDecodedPlane = pPlaneOut;
+    }
+
+
+#endif /*M4MCS_AUDIOONLY*/
+    M4OSA_TRACE3_0("M4MCS_intApplyVPP: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+
diff --git a/libvideoeditor/vss/src/Android.mk b/libvideoeditor/vss/src/Android.mk
new file mode 100755
index 0000000..ae0778d
--- /dev/null
+++ b/libvideoeditor/vss/src/Android.mk
@@ -0,0 +1,92 @@
+#
+# Copyright (C) 2011 NXP Software
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvss
+#
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_core
+
+LOCAL_SRC_FILES:=          \
+      M4PTO3GPP_API.c \
+      M4PTO3GPP_VideoPreProcessing.c \
+      M4VIFI_xVSS_RGB565toYUV420.c \
+      M4xVSS_API.c \
+      M4xVSS_internal.c \
+      M4VSS3GPP_AudioMixing.c \
+      M4VSS3GPP_Clip.c \
+      M4VSS3GPP_ClipAnalysis.c \
+      M4VSS3GPP_Codecs.c \
+      M4VSS3GPP_Edit.c \
+      M4VSS3GPP_EditAudio.c \
+      M4VSS3GPP_EditVideo.c \
+      M4VSS3GPP_MediaAndCodecSubscription.c \
+      glvaudioresampler.c \
+      M4ChannelCoverter.c \
+      M4VD_EXTERNAL_BitstreamParser.c \
+      M4VD_EXTERNAL_Interface.c \
+      M4AIR_API.c \
+      M4READER_Pcm.c \
+      M4PCMR_CoreReader.c \
+      M4AD_Null.c \
+      M4AMRR_CoreReader.c \
+      M4READER_Amr.c \
+      M4VD_Tools.c
+
+
+LOCAL_MODULE_TAGS := development
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+    libvideoeditor_osal \
+    libvideoeditor_3gpwriter \
+    libvideoeditor_mcs \
+    libvideoeditor_videofilters \
+    libvideoeditor_stagefrightshells
+
+LOCAL_C_INCLUDES += \
+    $(TOP)/frameworks/media/libvideoeditor/osal/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/mcs/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/common/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/stagefrightshells/inc
+
+ifeq ($(TARGET_SIMULATOR),true)
+else
+    LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+    -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar \
+    -DM4xVSS_RESERVED_MOOV_DISK_SPACEno \
+    -DDECODE_GIF_ON_SAVING
+
+# Don't prelink this library.  For more efficient code, you may want
+# to add this library to the prelink map and set this to true.
+LOCAL_PRELINK_MODULE := false
+
+
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/src/M4AD_Null.c b/libvideoeditor/vss/src/M4AD_Null.c
new file mode 100755
index 0000000..faac43b
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AD_Null.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+ * @file    M4AD_Null.c
+ * @brief   Implementation of the MP3 decoder public interface
+ * @note    This file implements a "null" audio decoder, that is a decoder
+ *          that do nothing except getting AU from the reader
+*************************************************************************
+*/
+#include "M4OSA_Debug.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Debug.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4AD_Common.h"
+#include "M4AD_Null.h"
+
+#define M4AD_FORCE_16BITS
+
+/**
+ ************************************************************************
+ * NULL Audio Decoder version information
+ ************************************************************************
+*/
+/* CHANGE_VERSION_HERE */
+#define M4AD_NULL_MAJOR    1
+#define M4AD_NULL_MINOR    1
+#define M4AD_NULL_REVISION 4
+
+/**
+ ************************************************************************
+ * structure    M4AD_NullContext
+ * @brief        Internal null decoder context
+ ************************************************************************
+*/
+typedef struct
+{
+    /**< Pointer to the stream handler provided by the user */
+    M4_AudioStreamHandler*    m_pAudioStreamhandler;
+} M4AD_NullContext;
+
+
+/**
+ ************************************************************************
+ * NXP MP3 decoder functions definition
+ ************************************************************************
+*/
+
+/**
+ ************************************************************************
+ * @brief   Creates an instance of the null decoder
+ * @note    Allocates the context
+ *
+ * @param    pContext:        (OUT)    Context of the decoder
+ * @param    pStreamHandler: (IN)    Pointer to an audio stream description
+ * @param    pUserData:        (IN)    Pointer to User data
+ *
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_STATE             State automaton is not applied
+ * @return    M4ERR_ALLOC             a memory allocation has failed
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_create(  M4AD_Context* pContext,
+                                M4_AudioStreamHandler *pStreamHandler,
+                                void* pUserData)
+{
+    M4AD_NullContext* pC;
+
+    M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
+                "M4AD_NULL_create: invalid context pointer");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+                "M4AD_NULL_create: invalid pointer pStreamHandler");
+
+    pC = (M4AD_NullContext*)M4OSA_malloc(sizeof(M4AD_NullContext),
+                 M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_NullContext");
+    if (pC == (M4AD_NullContext*)0)
+    {
+        M4OSA_TRACE1_0("Can not allocate null decoder context");
+        return M4ERR_ALLOC;
+    }
+
+    *pContext = pC;
+
+    pC->m_pAudioStreamhandler = pStreamHandler;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief    Destroys the instance of the null decoder
+ * @note     After this call the context is invalid
+ *
+ * @param    context:    (IN)    Context of the decoder
+ *
+ * @return   M4NO_ERROR            There is no error
+ * @return   M4ERR_PARAMETER     The context is invalid (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_destroy(M4AD_Context context)
+{
+    M4AD_NullContext* pC = (M4AD_NullContext*)context;
+
+    M4OSA_DEBUG_IF1((context == M4OSA_NULL), M4ERR_PARAMETER, "M4AD_NULL_destroy: invalid context");
+
+    M4OSA_free((M4OSA_MemAddr32)pC);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Simply output the given audio data
+ * @note
+ *
+ * @param   context:          (IN)    Context of the decoder
+ * @param   pInputBuffer:     (IN/OUT)Input Data buffer. It contains at least one audio frame.
+ *                                    The size of the buffer must be updated inside the function
+ *                                    to reflect the size of the actually decoded data.
+ *                                    (e.g. the first frame in pInputBuffer)
+ * @param   pDecodedPCMBuffer: (OUT)  Output PCM buffer (decoded data).
+ * @param   jumping:           (IN)   M4OSA_TRUE if a jump was just done, M4OSA_FALSE otherwise.
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_step(M4AD_Context context, M4AD_Buffer *pInputBuffer,
+                            M4AD_Buffer *pDecodedPCMBuffer, M4OSA_Bool jumping)
+{
+    M4AD_NullContext* pC = (M4AD_NullContext*)context;
+
+    /*The VPS sends a zero buffer at the end*/
+    if (0 == pInputBuffer->m_bufferSize)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    if (pInputBuffer->m_bufferSize > pDecodedPCMBuffer->m_bufferSize)
+    {
+        return M4ERR_PARAMETER;
+    }
+#ifdef M4AD_FORCE_16BITS
+    /*if read samples are 8 bits, complete them to 16 bits*/
+    if (pC->m_pAudioStreamhandler->m_byteSampleSize == 1)
+    {
+        M4OSA_UInt32 i;
+        M4OSA_Int16  val;
+
+        for (i = 0; i < pInputBuffer->m_bufferSize; i++)
+        {
+            val = (M4OSA_Int16)((M4OSA_UInt8)(pInputBuffer->m_dataAddress[i]) - 128);
+
+            pDecodedPCMBuffer->m_dataAddress[i*2]   = (M4OSA_Int8)(val>>8);
+            pDecodedPCMBuffer->m_dataAddress[i*2+1] = (M4OSA_Int8)(val&0x00ff);
+        }
+    }
+    else
+    {
+        M4OSA_memcpy(pDecodedPCMBuffer->m_dataAddress, pInputBuffer->m_dataAddress,
+                    pInputBuffer->m_bufferSize );
+    }
+#else /*M4AD_FORCE_16BITS*/
+    M4OSA_memcpy(pDecodedPCMBuffer->m_dataAddress, pInputBuffer->m_dataAddress,
+                    pInputBuffer->m_bufferSize );
+#endif /*M4AD_FORCE_16BITS*/
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Gets the decoder version
+ * @note    The version is given in a M4_VersionInfo structure
+ *
+ * @param   pValue:     (OUT)       Pointer to the version structure
+ *
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         pVersionInfo pointer is null (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR    M4AD_NULL_getVersion(M4_VersionInfo* pVersionInfo)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_DEBUG_IF1((pVersionInfo == 0), M4ERR_PARAMETER,
+        "M4AD_NULL_getVersion: invalid pointer pVersionInfo");
+
+    /* Up until now, the null decoder version is not available */
+
+    /* CHANGE_VERSION_HERE */
+    pVersionInfo->m_major        = M4AD_NULL_MAJOR;      /*major version of the component*/
+    pVersionInfo->m_minor        = M4AD_NULL_MINOR;      /*minor version of the component*/
+    pVersionInfo->m_revision    = M4AD_NULL_REVISION;    /*revision version of the component*/
+    pVersionInfo->m_structSize=sizeof(M4_VersionInfo);
+
+    return err;
+}
+
+
+/**
+ ************************************************************************
+ * getInterface function definitions of NXP MP3 decoder
+ ************************************************************************
+*/
+
+/**
+ ************************************************************************
+ * @brief Retrieves the interface implemented by the decoder
+ * @param pDecoderType        : pointer on an M4AD_Type (allocated by the caller)
+ *                              that will be filled with the decoder type supported by
+ *                              this decoder
+ * @param pDecoderInterface   : address of a pointer that will be set to the interface
+ *                              implemented by this decoder. The interface is a structure
+ *                              allocated by the function and must be un-allocated by the
+ *                              caller.
+ *
+ * @return    M4NO_ERROR  if OK
+ * @return    M4ERR_ALLOC if allocation failed
+ ************************************************************************
+*/
+M4OSA_ERR M4AD_NULL_getInterface( M4AD_Type *pDecoderType, M4AD_Interface **pDecoderInterface)
+{
+    *pDecoderInterface = (  M4AD_Interface*)M4OSA_malloc( sizeof(M4AD_Interface),
+                            M4DECODER_AUDIO, (M4OSA_Char *)"M4AD_Interface" );
+    if (M4OSA_NULL == *pDecoderInterface)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    *pDecoderType = M4AD_kTypePCM;
+
+    (*pDecoderInterface)->m_pFctCreateAudioDec       = M4AD_NULL_create;
+    (*pDecoderInterface)->m_pFctDestroyAudioDec      = M4AD_NULL_destroy;
+    (*pDecoderInterface)->m_pFctStepAudioDec         = M4AD_NULL_step;
+    (*pDecoderInterface)->m_pFctGetVersionAudioDec   = M4AD_NULL_getVersion;
+    (*pDecoderInterface)->m_pFctStartAudioDec        = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctResetAudioDec        = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctSetOptionAudioDec    = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctGetOptionAudioDec    = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4AIR_API.c b/libvideoeditor/vss/src/M4AIR_API.c
new file mode 100755
index 0000000..6a3546d
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AIR_API.c
@@ -0,0 +1,969 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4AIR_API.c
+ * @brief  Area of Interest Resizer  API
+ *************************************************************************
+ */
+
+#define M4AIR_YUV420_FORMAT_SUPPORTED
+#define M4AIR_YUV420A_FORMAT_SUPPORTED
+
+/************************* COMPILATION CHECKS ***************************/
+#ifndef M4AIR_YUV420_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR565_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB565_FORMAT_SUPPORTED
+#ifndef M4AIR_BGR888_FORMAT_SUPPORTED
+#ifndef M4AIR_RGB888_FORMAT_SUPPORTED
+#ifndef M4AIR_JPG_FORMAT_SUPPORTED
+
+#error "Please define at least one input format for the AIR component"
+
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+/******************************* INCLUDES *******************************/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Mutex.h"
+#include "M4OSA_Memory.h"
+#include "M4VIFI_FiltersAPI.h"
+#include "M4AIR_API.h"
+
+/************************ M4AIR INTERNAL TYPES DEFINITIONS ***********************/
+
+/**
+ ******************************************************************************
+ * enum         M4AIR_States
+ * @brief       The following enumeration defines the internal states of the AIR.
+ ******************************************************************************
+ */
+typedef enum
+{
+    M4AIR_kCreated,        /**< State after M4AIR_create has been called */
+    M4AIR_kConfigured      /**< State after M4AIR_configure has been called */
+}M4AIR_States;
+
+
+/**
+ ******************************************************************************
+ * struct         M4AIR_InternalContext
+ * @brief         The following structure is the internal context of the AIR.
+ ******************************************************************************
+ */
+typedef struct
+{
+    M4AIR_States            m_state;        /**< Internal state */
+    M4AIR_InputFormatType   m_inputFormat;  /**< Input format like YUV420Planar,
+                                                 RGB565, JPG, etc ... */
+    M4AIR_Params            m_params;       /**< Current input Parameter of  the processing */
+    M4OSA_UInt32            u32_x_inc[4];   /**< ratio between input and ouput width for YUV */
+    M4OSA_UInt32            u32_y_inc[4];   /**< ratio between input and ouput height for YUV */
+    M4OSA_UInt32            u32_x_accum_start[4];    /**< horizontal initial accumulator value */
+    M4OSA_UInt32            u32_y_accum_start[4];    /**< Vertical initial accumulator value */
+    M4OSA_UInt32            u32_x_accum[4]; /**< save of horizontal accumulator value */
+    M4OSA_UInt32            u32_y_accum[4]; /**< save of vertical accumulator value */
+    M4OSA_UInt8*            pu8_data_in[4]; /**< Save of input plane pointers
+                                                             in case of stripe mode */
+    M4OSA_UInt32            m_procRows;     /**< Number of processed rows,
+                                                     used in stripe mode only */
+    M4OSA_Bool                m_bOnlyCopy;  /**< Flag to know if we just perform a copy
+                                                        or a bilinear interpolation */
+    M4OSA_Bool                m_bFlipX;     /**< Depend on output orientation, used during
+                                                processing to revert processing order in X
+                                                coordinates */
+    M4OSA_Bool                m_bFlipY;     /**< Depend on output orientation, used during
+                                                processing to revert processing order in Y
+                                                coordinates */
+    M4OSA_Bool                m_bRevertXY;  /**< Depend on output orientation, used during
+                                                processing to revert X and Y processing order
+                                                 (+-90° rotation) */
+}M4AIR_InternalContext;
+
+/********************************* MACROS *******************************/
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer)\
+     if ((pointer) == M4OSA_NULL) return ((M4OSA_ERR)(retval));
+
+
+/********************** M4AIR PUBLIC API IMPLEMENTATION ********************/
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+ * @brief    This function initialize an instance of the AIR.
+ * @param    pContext:      (IN/OUT) Address of the context to create
+ * @param    inputFormat:   (IN) input format type.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only). Invalid formatType
+ * @return    M4ERR_ALLOC: No more memory is available
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_create(M4OSA_Context* pContext,M4AIR_InputFormatType inputFormat)
+{
+    M4OSA_ERR err = M4NO_ERROR ;
+    M4AIR_InternalContext* pC = M4OSA_NULL ;
+
+    /* Check that the address on the context is not NULL */
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    *pContext = M4OSA_NULL ;
+
+    /* Internal Context creation */
+    pC = (M4AIR_InternalContext*)M4OSA_malloc(sizeof(M4AIR_InternalContext),
+         M4AIR,(M4OSA_Char *)"AIR internal context") ;
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_ALLOC, pC) ;
+
+
+    /* Check if the input format is supported */
+    switch(inputFormat)
+    {
+#ifdef M4AIR_YUV420_FORMAT_SUPPORTED
+        case M4AIR_kYUV420P:
+        break ;
+#endif
+#ifdef M4AIR_YUV420A_FORMAT_SUPPORTED
+        case M4AIR_kYUV420AP:
+        break ;
+#endif
+        default:
+            err = M4ERR_AIR_FORMAT_NOT_SUPPORTED;
+            goto M4AIR_create_cleanup ;
+    }
+
+    /**< Save input format and update state */
+    pC->m_inputFormat = inputFormat;
+    pC->m_state = M4AIR_kCreated;
+
+    /* Return the context to the caller */
+    *pContext = pC ;
+
+    return M4NO_ERROR ;
+
+M4AIR_create_cleanup:
+    /* Error management : we destroy the context if needed */
+    if(M4OSA_NULL != pC)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC) ;
+    }
+
+    *pContext = M4OSA_NULL ;
+
+    return err ;
+}
+
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+ * @brief    This function destroys an instance of the AIR component
+ * @param    pContext:    (IN) Context identifying the instance to destroy
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return    M4ERR_STATE: Internal state is incompatible with this function call.
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_cleanUp(M4OSA_Context pContext)
+{
+    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    /**< Check state */
+    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+    {
+        return M4ERR_STATE;
+    }
+    M4OSA_free((M4OSA_MemAddr32)pC) ;
+
+    return M4NO_ERROR ;
+
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+ * @brief   This function will configure the AIR.
+ * @note    It will set the input and output coordinates and sizes,
+ *          and indicates if we will proceed in stripe or not.
+ *          In case a M4AIR_get in stripe mode was on going, it will cancel this previous
+ *          processing and reset the get process.
+ * @param    pContext:                (IN) Context identifying the instance
+ * @param    pParams->m_bOutputStripe:(IN) Stripe mode.
+ * @param    pParams->m_inputCoord:    (IN) X,Y coordinates of the first valid pixel in input.
+ * @param    pParams->m_inputSize:    (IN) input ROI size.
+ * @param    pParams->m_outputSize:    (IN) output size.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ * @return    M4ERR_AIR_FORMAT_NOT_SUPPORTED: the requested input format is not supported.
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_configure(M4OSA_Context pContext, M4AIR_Params* pParams)
+{
+    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+    M4OSA_UInt32    i,u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+    M4OSA_UInt32    nb_planes;
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    if(M4AIR_kYUV420AP == pC->m_inputFormat)
+    {
+        nb_planes = 4;
+    }
+    else
+    {
+        nb_planes = 3;
+    }
+
+    /**< Check state */
+    if((M4AIR_kCreated != pC->m_state)&&(M4AIR_kConfigured != pC->m_state))
+    {
+        return M4ERR_STATE;
+    }
+
+    /** Save parameters */
+    pC->m_params = *pParams;
+
+    /* Check for the input&output width and height are even */
+        if( ((pC->m_params.m_inputSize.m_height)&0x1)    ||
+            ((pC->m_params.m_inputSize.m_height)&0x1))
+        {
+            return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+        }
+
+     if( ((pC->m_params.m_inputSize.m_width)&0x1)    ||
+            ((pC->m_params.m_inputSize.m_width)&0x1))
+        {
+            return M4ERR_AIR_ILLEGAL_FRAME_SIZE;
+        }
+    if(((pC->m_params.m_inputSize.m_width) == (pC->m_params.m_outputSize.m_width))
+        &&((pC->m_params.m_inputSize.m_height) == (pC->m_params.m_outputSize.m_height)))
+    {
+        /**< No resize in this case, we will just copy input in output */
+        pC->m_bOnlyCopy = M4OSA_TRUE;
+    }
+    else
+    {
+        pC->m_bOnlyCopy = M4OSA_FALSE;
+
+        /**< Initialize internal variables used for resize filter */
+        for(i=0;i<nb_planes;i++)
+        {
+
+            u32_width_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_width:\
+                (pC->m_params.m_inputSize.m_width+1)>>1;
+            u32_height_in = ((i==0)||(i==3))?pC->m_params.m_inputSize.m_height:\
+                (pC->m_params.m_inputSize.m_height+1)>>1;
+            u32_width_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_width:\
+                (pC->m_params.m_outputSize.m_width+1)>>1;
+            u32_height_out = ((i==0)||(i==3))?pC->m_params.m_outputSize.m_height:\
+                (pC->m_params.m_outputSize.m_height+1)>>1;
+
+                /* Compute horizontal ratio between src and destination width.*/
+                if (u32_width_out >= u32_width_in)
+                {
+                    pC->u32_x_inc[i]   = ((u32_width_in-1) * 0x10000) / (u32_width_out-1);
+                }
+                else
+                {
+                    pC->u32_x_inc[i]   = (u32_width_in * 0x10000) / (u32_width_out);
+                }
+
+                /* Compute vertical ratio between src and destination height.*/
+                if (u32_height_out >= u32_height_in)
+                {
+                    pC->u32_y_inc[i]   = ((u32_height_in - 1) * 0x10000) / (u32_height_out-1);
+                }
+                else
+                {
+                    pC->u32_y_inc[i] = (u32_height_in * 0x10000) / (u32_height_out);
+                }
+
+                /*
+                Calculate initial accumulator value : u32_y_accum_start.
+                u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+                */
+                if (pC->u32_y_inc[i] >= 0x10000)
+                {
+                    /*
+                        Keep the fractionnal part, assimung that integer  part is coded
+                        on the 16 high bits and the fractionnal on the 15 low bits
+                    */
+                    pC->u32_y_accum_start[i] = pC->u32_y_inc[i] & 0xffff;
+
+                    if (!pC->u32_y_accum_start[i])
+                    {
+                        pC->u32_y_accum_start[i] = 0x10000;
+                    }
+
+                    pC->u32_y_accum_start[i] >>= 1;
+                }
+                else
+                {
+                    pC->u32_y_accum_start[i] = 0;
+                }
+                /**< Take into account that Y coordinate can be odd
+                    in this case we have to put a 0.5 offset
+                    for U and V plane as there a 2 times sub-sampled vs Y*/
+                if((pC->m_params.m_inputCoord.m_y&0x1)&&((i==1)||(i==2)))
+                {
+                    pC->u32_y_accum_start[i] += 0x8000;
+                }
+
+                /*
+                    Calculate initial accumulator value : u32_x_accum_start.
+                    u32_x_accum_start is coded on 15 bits, and represents a value between
+                    0 and 0.5
+                */
+
+                if (pC->u32_x_inc[i] >= 0x10000)
+                {
+                    pC->u32_x_accum_start[i] = pC->u32_x_inc[i] & 0xffff;
+
+                    if (!pC->u32_x_accum_start[i])
+                    {
+                        pC->u32_x_accum_start[i] = 0x10000;
+                    }
+
+                    pC->u32_x_accum_start[i] >>= 1;
+                }
+                else
+                {
+                    pC->u32_x_accum_start[i] = 0;
+                }
+                /**< Take into account that X coordinate can be odd
+                    in this case we have to put a 0.5 offset
+                    for U and V plane as there a 2 times sub-sampled vs Y*/
+                if((pC->m_params.m_inputCoord.m_x&0x1)&&((i==1)||(i==2)))
+                {
+                    pC->u32_x_accum_start[i] += 0x8000;
+                }
+        }
+    }
+
+    /**< Reset variable used for stripe mode */
+    pC->m_procRows = 0;
+
+    /**< Initialize var for X/Y processing order according to orientation */
+    pC->m_bFlipX = M4OSA_FALSE;
+    pC->m_bFlipY = M4OSA_FALSE;
+    pC->m_bRevertXY = M4OSA_FALSE;
+    switch(pParams->m_outputOrientation)
+    {
+        case M4COMMON_kOrientationTopLeft:
+            break;
+        case M4COMMON_kOrientationTopRight:
+            pC->m_bFlipX = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationBottomRight:
+            pC->m_bFlipX = M4OSA_TRUE;
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationBottomLeft:
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationLeftTop:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationRightTop:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationRightBottom:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            pC->m_bFlipX = M4OSA_TRUE;
+            pC->m_bFlipY = M4OSA_TRUE;
+            break;
+        case M4COMMON_kOrientationLeftBottom:
+            pC->m_bRevertXY = M4OSA_TRUE;
+            pC->m_bFlipX = M4OSA_TRUE;
+            break;
+        default:
+        return M4ERR_PARAMETER;
+    }
+    /**< Update state */
+    pC->m_state = M4AIR_kConfigured;
+
+    return M4NO_ERROR ;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+ * @brief   This function will provide the requested resized area of interest according to
+ *          settings  provided in M4AIR_configure.
+ * @note    In case the input format type is JPEG, input plane(s)
+ *          in pIn is not used. In normal mode, dimension specified in output plane(s) structure
+ *          must be the same than the one specified in M4AIR_configure. In stripe mode, only the
+ *          width will be the same, height will be taken as the stripe height (typically 16).
+ *          In normal mode, this function is call once to get the full output picture.
+ *          In stripe mode, it is called for each stripe till the whole picture has been
+ *          retrieved,and  the position of the output stripe in the output picture
+ *          is internally incremented at each step.
+ *          Any call to M4AIR_configure during stripe process will reset this one to the
+ *          beginning of the output picture.
+ * @param    pContext:    (IN) Context identifying the instance
+ * @param    pIn:            (IN) Plane structure containing input Plane(s).
+ * @param    pOut:        (IN/OUT)  Plane structure containing output Plane(s).
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_ALLOC: No more memory space to add a new effect.
+ * @return    M4ERR_PARAMETER: pContext is M4OSA_NULL (debug only).
+ ******************************************************************************
+ */
+M4OSA_ERR M4AIR_get(M4OSA_Context pContext, M4VIFI_ImagePlane* pIn, M4VIFI_ImagePlane* pOut)
+{
+    M4AIR_InternalContext* pC = (M4AIR_InternalContext*)pContext ;
+    M4OSA_UInt32 i,j,k,u32_x_frac,u32_y_frac,u32_x_accum,u32_y_accum,u32_shift;
+        M4OSA_UInt8    *pu8_data_in, *pu8_data_in_org, *pu8_data_in_tmp, *pu8_data_out;
+        M4OSA_UInt8    *pu8_src_top;
+        M4OSA_UInt8    *pu8_src_bottom;
+    M4OSA_UInt32    u32_temp_value;
+    M4OSA_Int32    i32_tmp_offset;
+    M4OSA_UInt32    nb_planes;
+
+
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext) ;
+
+    /**< Check state */
+    if(M4AIR_kConfigured != pC->m_state)
+    {
+        return M4ERR_STATE;
+    }
+
+    if(M4AIR_kYUV420AP == pC->m_inputFormat)
+    {
+        nb_planes = 4;
+    }
+    else
+    {
+        nb_planes = 3;
+    }
+
+    /**< Loop on each Plane */
+    for(i=0;i<nb_planes;i++)
+    {
+
+         /* Set the working pointers at the beginning of the input/output data field */
+
+        u32_shift = ((i==0)||(i==3))?0:1; /**< Depend on Luma or Chroma */
+
+        if((M4OSA_FALSE == pC->m_params.m_bOutputStripe)\
+            ||((M4OSA_TRUE == pC->m_params.m_bOutputStripe)&&(0 == pC->m_procRows)))
+        {
+            /**< For input, take care about ROI */
+            pu8_data_in     = pIn[i].pac_data + pIn[i].u_topleft \
+                + (pC->m_params.m_inputCoord.m_x>>u32_shift)
+                        + (pC->m_params.m_inputCoord.m_y >> u32_shift) * pIn[i].u_stride;
+
+            /** Go at end of line/column in case X/Y scanning is flipped */
+            if(M4OSA_TRUE == pC->m_bFlipX)
+            {
+                pu8_data_in += ((pC->m_params.m_inputSize.m_width)>>u32_shift) -1 ;
+            }
+            if(M4OSA_TRUE == pC->m_bFlipY)
+            {
+                pu8_data_in += ((pC->m_params.m_inputSize.m_height>>u32_shift) -1)\
+                     * pIn[i].u_stride;
+            }
+
+            /**< Initialize accumulators in case we are using it (bilinear interpolation) */
+            if( M4OSA_FALSE == pC->m_bOnlyCopy)
+            {
+                pC->u32_x_accum[i] = pC->u32_x_accum_start[i];
+                pC->u32_y_accum[i] = pC->u32_y_accum_start[i];
+            }
+
+        }
+        else
+        {
+            /**< In case of stripe mode for other than first stripe, we need to recover input
+                 pointer from internal context */
+            pu8_data_in = pC->pu8_data_in[i];
+        }
+
+        /**< In every mode, output data are at the beginning of the output plane */
+        pu8_data_out    = pOut[i].pac_data + pOut[i].u_topleft;
+
+        /**< Initialize input offset applied after each pixel */
+        if(M4OSA_FALSE == pC->m_bFlipY)
+        {
+            i32_tmp_offset = pIn[i].u_stride;
+        }
+        else
+        {
+            i32_tmp_offset = -pIn[i].u_stride;
+        }
+
+        /**< In this case, no bilinear interpolation is needed as input and output dimensions
+            are the same */
+        if( M4OSA_TRUE == pC->m_bOnlyCopy)
+        {
+            /**< No +-90° rotation */
+            if(M4OSA_FALSE == pC->m_bRevertXY)
+            {
+                /**< No flip on X abscissa */
+                if(M4OSA_FALSE == pC->m_bFlipX)
+                {
+                    /**< Loop on each row */
+                    for(j=0;j<pOut[i].u_height;j++)
+                    {
+                        /**< Copy one whole line */
+                        M4OSA_memcpy((M4OSA_MemAddr8)pu8_data_out, (M4OSA_MemAddr8)pu8_data_in,
+                             pOut[i].u_width);
+
+                        /**< Update pointers */
+                        pu8_data_out += pOut[i].u_stride;
+                        if(M4OSA_FALSE == pC->m_bFlipY)
+                        {
+                            pu8_data_in += pIn[i].u_stride;
+                        }
+                        else
+                        {
+                            pu8_data_in -= pIn[i].u_stride;
+                        }
+                    }
+                }
+                else
+                {
+                    /**< Loop on each row */
+                    for(j=0;j<pOut[i].u_height;j++)
+                    {
+                        /**< Loop on each pixel of 1 row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+                            *pu8_data_out++ = *pu8_data_in--;
+                        }
+
+                        /**< Update pointers */
+                        pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+
+                        pu8_data_in += pOut[i].u_width + i32_tmp_offset;
+
+                    }
+                }
+            }
+            /**< Here we have a +-90° rotation */
+            else
+            {
+
+                /**< Loop on each row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    pu8_data_in_tmp = pu8_data_in;
+
+                    /**< Loop on each pixel of 1 row */
+                    for(k=0;k<pOut[i].u_width;k++)
+                    {
+                        *pu8_data_out++ = *pu8_data_in_tmp;
+
+                        /**< Update input pointer in order to go to next/past line */
+                        pu8_data_in_tmp += i32_tmp_offset;
+                    }
+
+                    /**< Update pointers */
+                    pu8_data_out += (pOut[i].u_stride - pOut[i].u_width);
+                    if(M4OSA_FALSE == pC->m_bFlipX)
+                    {
+                        pu8_data_in ++;
+                    }
+                    else
+                    {
+                        pu8_data_in --;
+                    }
+                }
+            }
+        }
+        /**< Bilinear interpolation */
+        else
+        {
+
+        if(3 != i)    /**< other than alpha plane */
+        {
+            /**No +-90° rotation */
+            if(M4OSA_FALSE == pC->m_bRevertXY)
+            {
+
+                /**< Loop on each row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* Vertical weight factor */
+                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+                    /* Reinit horizontal weight factor */
+                    u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+                        if(M4OSA_TRUE ==  pC->m_bFlipX)
+                        {
+
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                        weight factor */
+
+                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                   pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                   (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+                        }
+
+                        else
+                        {
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                        weight factor */
+
+                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                   pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                   (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                                    *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+
+                        }
+
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update vertical accumulator */
+                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
+                      if (pC->u32_y_accum[i]>>16)
+                    {
+                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+                          pC->u32_y_accum[i] &= 0xffff;
+                       }
+                }
+        }
+            /** +-90° rotation */
+            else
+            {
+                pu8_data_in_org = pu8_data_in;
+
+                /**< Loop on each output row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* horizontal weight factor */
+                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+                    /* Reinit accumulator */
+                    u32_y_accum = pC->u32_y_accum_start[i];
+
+                    if(M4OSA_TRUE ==  pC->m_bFlipX)
+                    {
+
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+
+                        }
+                    }
+                    else
+                    {
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+                        }
+                    }
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update horizontal accumulator */
+                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+                    pu8_data_in = pu8_data_in_org;
+                }
+
+            }
+            }/** 3 != i */
+            else
+            {
+            /**No +-90° rotation */
+            if(M4OSA_FALSE == pC->m_bRevertXY)
+            {
+
+                /**< Loop on each row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* Vertical weight factor */
+                    u32_y_frac = (pC->u32_y_accum[i]>>12)&15;
+
+                    /* Reinit horizontal weight factor */
+                    u32_x_accum = pC->u32_x_accum_start[i];
+
+
+
+                        if(M4OSA_TRUE ==  pC->m_bFlipX)
+                        {
+
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                         weight factor */
+
+                                pu8_src_top = (pu8_data_in - (u32_x_accum >> 16)) -1 ;
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                   pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                  (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                                u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+                        }
+
+                        else
+                        {
+                            /**< Loop on each output pixel in a row */
+                            for(k=0;k<pOut[i].u_width;k++)
+                            {
+                                u32_x_frac = (u32_x_accum >> 12)&15; /* Fraction of Horizontal
+                                                                        weight factor */
+
+                                pu8_src_top = pu8_data_in + (u32_x_accum >> 16);
+
+                                pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                                /* Weighted combination */
+                                u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                   pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                   (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                   pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                                u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                                *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                                /* Update horizontal accumulator */
+                                u32_x_accum += pC->u32_x_inc[i];
+                            }
+
+                        }
+
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update vertical accumulator */
+                    pC->u32_y_accum[i] += pC->u32_y_inc[i];
+                      if (pC->u32_y_accum[i]>>16)
+                    {
+                        pu8_data_in = pu8_data_in + (pC->u32_y_accum[i] >> 16) * i32_tmp_offset;
+                          pC->u32_y_accum[i] &= 0xffff;
+                       }
+                }
+
+            } /**< M4OSA_FALSE == pC->m_bRevertXY */
+            /** +-90° rotation */
+            else
+            {
+                pu8_data_in_org = pu8_data_in;
+
+                /**< Loop on each output row */
+                for(j=0;j<pOut[i].u_height;j++)
+                {
+                    /* horizontal weight factor */
+                    u32_x_frac = (pC->u32_x_accum[i]>>12)&15;
+
+                    /* Reinit accumulator */
+                    u32_y_accum = pC->u32_y_accum_start[i];
+
+                    if(M4OSA_TRUE ==  pC->m_bFlipX)
+                    {
+
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+
+                            pu8_src_top = (pu8_data_in - (pC->u32_x_accum[i] >> 16)) - 1;
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[1]*(16-u32_x_frac) +
+                                                 pu8_src_top[0]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[1]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[0]*u32_x_frac)*u32_y_frac )>>8);
+
+                            u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+
+                        }
+                    }
+                    else
+                    {
+                        /**< Loop on each output pixel in a row */
+                        for(k=0;k<pOut[i].u_width;k++)
+                        {
+
+                            u32_y_frac = (u32_y_accum >> 12)&15; /* Vertical weight factor */
+
+                            pu8_src_top = pu8_data_in + (pC->u32_x_accum[i] >> 16);
+
+                            pu8_src_bottom = pu8_src_top + i32_tmp_offset;
+
+                            /* Weighted combination */
+                            u32_temp_value = (M4VIFI_UInt8)(((pu8_src_top[0]*(16-u32_x_frac) +
+                                                 pu8_src_top[1]*u32_x_frac)*(16-u32_y_frac) +
+                                                (pu8_src_bottom[0]*(16-u32_x_frac) +
+                                                 pu8_src_bottom[1]*u32_x_frac)*u32_y_frac )>>8);
+
+                            u32_temp_value= (u32_temp_value >> 7)*0xff;
+
+                            *pu8_data_out++ = (M4VIFI_UInt8)u32_temp_value;
+
+                            /* Update vertical accumulator */
+                            u32_y_accum += pC->u32_y_inc[i];
+                              if (u32_y_accum>>16)
+                            {
+                                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * i32_tmp_offset;
+                                  u32_y_accum &= 0xffff;
+                               }
+                        }
+                    }
+                    pu8_data_out += pOut[i].u_stride - pOut[i].u_width;
+
+                    /* Update horizontal accumulator */
+                    pC->u32_x_accum[i] += pC->u32_x_inc[i];
+
+                    pu8_data_in = pu8_data_in_org;
+
+                }
+                } /**< M4OSA_TRUE == pC->m_bRevertXY */
+        }/** 3 == i */
+            }
+        /**< In case of stripe mode, save current input pointer */
+        if(M4OSA_TRUE == pC->m_params.m_bOutputStripe)
+        {
+            pC->pu8_data_in[i] = pu8_data_in;
+        }
+    }
+
+    /**< Update number of processed rows, reset it if we have finished
+         with the whole processing */
+    pC->m_procRows += pOut[0].u_height;
+    if(M4OSA_FALSE == pC->m_bRevertXY)
+    {
+        if(pC->m_params.m_outputSize.m_height <= pC->m_procRows)    pC->m_procRows = 0;
+    }
+    else
+    {
+        if(pC->m_params.m_outputSize.m_width <= pC->m_procRows)    pC->m_procRows = 0;
+    }
+
+    return M4NO_ERROR ;
+
+}
+
+
+
diff --git a/libvideoeditor/vss/src/M4AMRR_CoreReader.c b/libvideoeditor/vss/src/M4AMRR_CoreReader.c
new file mode 100755
index 0000000..14f5271
--- /dev/null
+++ b/libvideoeditor/vss/src/M4AMRR_CoreReader.c
@@ -0,0 +1,910 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file        M4AMRR_CoreReader.c
+ * @brief       Implementation of AMR parser
+ * @note        This file contains the API Implementation for
+ *              AMR Parser.
+ ******************************************************************************
+*/
+#include "M4AMRR_CoreReader.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+
+/**
+ ******************************************************************************
+ * Maximum bitrate per amr type
+ ******************************************************************************
+*/
+#define M4AMRR_NB_MAX_BIT_RATE    12200
+#define M4AMRR_WB_MAX_BIT_RATE    23850
+
+/**
+ ******************************************************************************
+ * AMR reader context ID
+ ******************************************************************************
+*/
+#define M4AMRR_CONTEXTID    0x414d5252
+
+/**
+ ******************************************************************************
+ * An AMR frame is 20ms
+ ******************************************************************************
+*/
+#define M4AMRR_FRAME_LENGTH     20
+
+/**
+ ******************************************************************************
+ * For the seek, the file is splitted in 40 segments for faster search
+ ******************************************************************************
+*/
+#define    M4AMRR_NUM_SEEK_ENTRIES 40
+
+#define M4AMRR_NB_SAMPLE_FREQUENCY 8000        /**< Narrow band sampling rate */
+#define M4AMRR_WB_SAMPLE_FREQUENCY 16000    /**< Wide band sampling rate */
+
+/**
+ ******************************************************************************
+ * AMR reader version numbers
+ ******************************************************************************
+*/
+/* CHANGE_VERSION_HERE */
+#define M4AMRR_VERSION_MAJOR 1
+#define M4AMRR_VERSION_MINOR 11
+#define M4AMRR_VERSION_REVISION 3
+
+/**
+ ******************************************************************************
+ * structure    M4_AMRR_Context
+ * @brief        Internal AMR reader context structure
+ ******************************************************************************
+*/
+typedef struct
+{
+    M4OSA_UInt32             m_contextId ;      /* Fixed Id. to check for valid Context*/
+    M4OSA_FileReadPointer*   m_pOsaFilePtrFct;  /* File function pointer */
+    M4SYS_StreamDescription* m_pStreamHandler;  /* Stream Description */
+    M4OSA_UInt32*            m_pSeekIndex;      /* Seek Index Table */
+    M4OSA_UInt32             m_seekInterval;    /* Stores the seek Interval stored in the Index */
+    M4OSA_UInt32             m_maxAuSize;       /* Stores the max Au Size */
+    M4OSA_MemAddr32          m_pdataAddress;    /* Pointer to store AU data */
+    M4SYS_StreamType         m_streamType;      /* Stores the stream type AMR NB or WB */
+    M4OSA_Context            m_pAMRFile;        /* Data storage */
+    M4AMRR_State             m_status;          /* AMR Reader Status */
+    M4OSA_Int32              m_structSize;      /* size of structure*/
+} M4_AMRR_Context;
+
+/**
+ ******************************************************************************
+ * Parser internal functions, not usable from outside the reader context
+ ******************************************************************************
+*/
+M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType);
+M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType);
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+ * @brief    Internal function to the AMR Parser, returns the AU size of the Frame
+ * @note     This function takes the stream type and the frametype and returns the
+ *           frame lenght
+ * @param    frameType(IN)    : AMR frame type
+ * @param    streamType(IN)    : AMR stream type NB or WB
+ * @returns  The frame size based on the frame type.
+ ******************************************************************************
+ */
+M4OSA_UInt32    M4AMRR_getAuSize(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+{
+    const M4OSA_UInt32    M4AMRR_NB_AUSIZE[]={13,14,16,18,20,21,27,32,6,6,6};
+    const M4OSA_UInt32    M4AMRR_WB_AUSIZE[]={18,24,33,37,41,47,51,59,61,6};
+
+    if ( streamType == M4SYS_kAMR )
+    {
+            return M4AMRR_NB_AUSIZE[frameType];
+    }
+    else /* M4SYS_kAMR_WB */
+    {
+            return M4AMRR_WB_AUSIZE[frameType];
+    }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+ * @brief    Internal function to the AMR Parser, returns the Bit rate of the Frame
+ * @note     This function takes the stream type and the frametype and returns the
+ *           bit rate for the given frame.
+ * @param    frameType(IN)    : AMR frame type
+ * @param    streamType(IN)    : AMR stream type NB or WB
+ * @returns  The frame's bit rate based on the frame type.
+ ******************************************************************************
+ */
+M4OSA_UInt32    M4AMRR_getBitrate(M4OSA_UInt32 frameType,  M4SYS_StreamType streamType)
+{
+    const M4OSA_UInt32    M4AMRR_NB_BITRATE[]=
+        {4750,5150,5900,6700,7400,7950,10200,12200,12200,12200,12200};
+    const M4OSA_UInt32    M4AMRR_WB_BITRATE[]=
+        {6600,8850,12650,14250,15850,18250,19850,23050,23850,12200};
+
+    if ( streamType == M4SYS_kAMR )
+    {
+            return M4AMRR_NB_BITRATE[frameType];
+    }
+    else /* M4SYS_kAMR_WB */
+    {
+            return M4AMRR_WB_BITRATE[frameType];
+    }
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_openRead(M4OSA_Context* pContext, M4OSA_Void* pFileDescriptor,
+                        M4OSA_FileReadPointer* pFileFunction)
+/*********************************************************/
+{
+    M4_AMRR_Context*    pStreamContext;
+    M4OSA_FilePosition  filePos;
+
+    M4OSA_ERR err = M4ERR_FILE_NOT_FOUND ;
+    M4OSA_UInt32 size ;
+    M4OSA_UInt32 data ;
+    M4OSA_Char *M4_Token;
+    M4OSA_UInt32 *tokenPtr;
+
+    /* Header for AMR NB */
+    M4OSA_UInt32 M4_AMR_1       = 0x4d412123;
+    M4OSA_UInt32 M4_AMR_NB_2    = 0x00000a52;
+
+    /* Header for AMR WB */
+    M4OSA_UInt32 M4_AMR_WB_2    = 0x42572d52;
+    M4OSA_UInt32 M4_AMR_WB_3    = 0x0000000a;
+    *pContext = M4OSA_NULL ;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileDescriptor),M4ERR_PARAMETER,"File Desc. M4OSA_NULL");
+
+    M4_Token = (M4OSA_Char*)M4OSA_malloc(sizeof(M4OSA_MemAddr32)*3, M4AMR_READER,
+                 (M4OSA_Char *)("M4_Token"));
+    if(M4OSA_NULL == M4_Token)
+    {
+        M4OSA_DEBUG_IF3((M4OSA_NULL == M4_Token),M4ERR_ALLOC,"Mem Alloc failed - M4_Token");
+        return M4ERR_ALLOC ;
+    }
+
+    pStreamContext= (M4_AMRR_Context*)M4OSA_malloc(sizeof(M4_AMRR_Context), M4AMR_READER,
+                     (M4OSA_Char *)("pStreamContext"));
+    if(M4OSA_NULL == pStreamContext)
+    {
+        M4OSA_free((M4OSA_MemAddr32)M4_Token);
+        *pContext = M4OSA_NULL ;
+        return M4ERR_ALLOC ;
+    }
+
+    /* Initialize the context */
+    pStreamContext->m_contextId = M4AMRR_CONTEXTID;
+    pStreamContext->m_structSize=sizeof(M4_AMRR_Context);
+    pStreamContext->m_pOsaFilePtrFct=pFileFunction ;
+    pStreamContext->m_pStreamHandler = M4OSA_NULL ;
+    pStreamContext->m_pAMRFile = M4OSA_NULL ;
+    pStreamContext->m_status = M4AMRR_kOpening ;
+    pStreamContext->m_pSeekIndex = M4OSA_NULL ;
+    pStreamContext->m_seekInterval = 0;
+    pStreamContext->m_maxAuSize = 0 ;
+    pStreamContext->m_pdataAddress = M4OSA_NULL;
+    err=pStreamContext->m_pOsaFilePtrFct->openRead(&pStreamContext->m_pAMRFile,
+        (M4OSA_Char*)pFileDescriptor,M4OSA_kFileRead );
+    if ( err != M4NO_ERROR )
+    {
+        /* M4OSA_DEBUG_IF3((err != M4NO_ERROR),err,"File open failed"); */
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+        M4OSA_free((M4OSA_MemAddr32)M4_Token);

+        *pContext = M4OSA_NULL ;
+        return err ;
+    }
+
+    pStreamContext->m_status = M4AMRR_kOpening ;
+
+    size = 6;
+    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+                (M4OSA_MemAddr8)M4_Token, &size);
+    if(size != 6)
+    {
+        goto cleanup;
+    }
+
+    tokenPtr = (M4OSA_UInt32*)M4_Token ;
+    /* Check for the first 4 bytes of the header common to WB and NB*/
+    if (*tokenPtr != M4_AMR_1)
+    {
+        goto cleanup;
+    }
+
+    tokenPtr++;
+    data = *tokenPtr & 0x0000FFFF ;
+    /* Check if the next part is Narrow band header */
+    if (data!= M4_AMR_NB_2)
+    {
+        /* Stream is AMR Wide Band */
+        filePos = 4;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+        size = 5;
+        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+             (M4OSA_MemAddr8)M4_Token, &size);
+        if(size != 5)
+            goto cleanup;
+        tokenPtr=(M4OSA_UInt32*)M4_Token;
+        /* Check for the Wide band hader */
+        if(*tokenPtr!= M4_AMR_WB_2)
+            goto cleanup;
+        tokenPtr++;
+        data = *tokenPtr & 0x000000FF ;
+        if(data!= M4_AMR_WB_3)
+            goto cleanup;
+        pStreamContext->m_streamType = M4SYS_kAMR_WB ;
+    }
+    else
+    {
+        /* Stream is a Narrow band stream */
+        pStreamContext->m_streamType = M4SYS_kAMR ;
+    }
+    /*  No Profile level defined */
+    pStreamContext->m_status = M4AMRR_kOpened;
+
+    M4OSA_free((M4OSA_MemAddr32)M4_Token);
+    *pContext = pStreamContext ;
+    return M4NO_ERROR;
+
+cleanup:
+
+    if(M4OSA_NULL != pStreamContext->m_pAMRFile)
+    {
+        pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)M4_Token);
+    M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+
+    *pContext = M4OSA_NULL ;
+
+    return (M4OSA_ERR)M4ERR_AMR_NOT_COMPLIANT;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getNextStream(M4OSA_Context Context, M4SYS_StreamDescription* pStreamDesc )
+/*********************************************************/
+{
+    M4_AMRR_Context*    pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_Char            frameHeader, frameType ;
+    M4OSA_UInt32        size, auCount=0;
+    M4OSA_FilePosition  filePos;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamDesc),M4ERR_PARAMETER,"Stream Desc. M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+
+    if (M4OSA_NULL != pStreamContext->m_pStreamHandler)
+    {
+        return M4WAR_NO_MORE_STREAM ;
+    }
+
+    size = 1;
+    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+         (M4OSA_MemAddr8)&frameHeader, &size);
+
+    /* XFFF FXXX -> F is the Frame type */
+    frameType = ( frameHeader & 0x78 ) >> 3 ;
+
+    if ( frameType == 15 )
+    {
+        return M4WAR_NO_DATA_YET ;
+    }
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 11 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    /* Average bit rate is assigned the bitrate of the first frame */
+    pStreamDesc->averageBitrate = M4AMRR_getBitrate(frameType,pStreamContext->m_streamType);
+
+    filePos = -1;
+    pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekCurrent,
+         &filePos);
+
+    /* Initialize pStreamDesc */
+    pStreamDesc->profileLevel = 0xFF ;
+    pStreamDesc->decoderSpecificInfoSize = 0 ;
+    pStreamDesc->decoderSpecificInfo = M4OSA_NULL ;
+    pStreamDesc->maxBitrate = (pStreamContext->m_streamType ==
+        M4SYS_kAMR )?M4AMRR_NB_MAX_BIT_RATE:M4AMRR_WB_MAX_BIT_RATE;
+    pStreamDesc->profileLevel = 0xFF ;
+    pStreamDesc->streamID = 1;
+    pStreamDesc->streamType = pStreamContext->m_streamType;
+
+    /* Timescale equals Sampling Frequency: NB-8000 Hz, WB-16000 Hz */
+    pStreamDesc->timeScale = (pStreamContext->m_streamType == M4SYS_kAMR )?8000:16000;
+    M4OSA_TIME_SET_UNKNOWN(pStreamDesc->duration);
+
+    pStreamContext->m_pStreamHandler =
+         (M4SYS_StreamDescription*)M4OSA_malloc(sizeof(M4SYS_StreamDescription),
+             M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pStreamHandler"));
+    if(M4OSA_NULL == pStreamContext->m_pStreamHandler)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    /* Copy the Stream Desc. into the Context */
+    pStreamContext->m_pStreamHandler->averageBitrate = pStreamDesc->averageBitrate;
+    pStreamContext->m_pStreamHandler->decoderSpecificInfo = M4OSA_NULL ;
+    pStreamContext->m_pStreamHandler->decoderSpecificInfoSize = 0 ;
+    M4OSA_TIME_SET_UNKNOWN(pStreamContext->m_pStreamHandler->duration);
+    pStreamContext->m_pStreamHandler->profileLevel = 0xFF ;
+    pStreamContext->m_pStreamHandler->streamID = 1;
+    pStreamContext->m_pStreamHandler->streamType = pStreamDesc->streamType ;
+    pStreamContext->m_pStreamHandler->timeScale = pStreamDesc->timeScale ;
+
+    /* Count the number of Access Unit in the File to get the */
+    /* duration of the stream = 20 ms * number of access unit */
+    while(1)
+    {
+        size = 1;
+        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+             (M4OSA_MemAddr8)&frameHeader, &size);
+        if ( size == 0)
+            break ;
+        frameType = (frameHeader & 0x78) >> 3 ;
+        /* Get the frame size and skip so many bytes */
+        if(frameType != 15){
+            /* GLA 20050628 when frametype is >10 we read over a table */
+            if(frameType > 10)
+                continue ;
+
+            size = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+            if(size > pStreamContext->m_maxAuSize )
+            {
+                pStreamContext->m_maxAuSize = size ;
+            }
+            filePos = size-1;
+            pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+                 M4OSA_kFileSeekCurrent, &filePos);
+            auCount++;
+        }
+    }
+
+    /* Each Frame is 20 m Sec. */
+    pStreamContext->m_pStreamHandler->duration = auCount * M4AMRR_FRAME_LENGTH ;
+    pStreamDesc->duration = pStreamContext->m_pStreamHandler->duration ;
+
+    /* Put the file pointer back at the first Access unit */
+    if( pStreamContext->m_streamType == M4SYS_kAMR )
+    {
+        filePos = 6;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+    }
+    if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )
+    {
+        filePos = 9;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+    }
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_startReading(M4OSA_Context Context, M4SYS_StreamID* pStreamIDs )
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_Int32 size = 0 ;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pStreamIDs),M4ERR_PARAMETER,"Stream Ids. M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+
+    while( pStreamIDs[size] != 0 )
+    {
+        if( pStreamIDs[size++] != 1 )
+        {
+            return M4ERR_BAD_STREAM_ID ;
+        }
+    }
+
+    /* Allocate memory for data Address for use in NextAU() */
+    if(M4OSA_NULL == pStreamContext->m_pdataAddress)
+    {
+        size = pStreamContext->m_maxAuSize ;
+        /* dataAddress is owned by Parser, application should not delete or free it */
+        pStreamContext->m_pdataAddress =(M4OSA_MemAddr32)M4OSA_malloc(size + (4 - size % 4),
+            M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pdataAddress"));
+        if(M4OSA_NULL == pStreamContext->m_pdataAddress)
+        {
+                M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pdataAddress),M4ERR_ALLOC,
+                    "Mem Alloc failed - dataAddress");
+                return M4ERR_ALLOC;
+        }
+    }
+
+    /* Set the state of context to Reading */
+    pStreamContext->m_status = M4AMRR_kReading ;
+
+    return M4NO_ERROR ;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_nextAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_Char        frameHeader ;
+    M4OSA_Char        frameType ;
+    M4OSA_Int32        auSize;
+    M4OSA_UInt32    size ;
+    M4OSA_FilePosition  filePos;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading), M4ERR_STATE, "Invalid State");
+
+    if ( StreamID != 1 )
+    {
+            return M4ERR_BAD_STREAM_ID;
+    }
+
+    /* Read the frame header byte */
+    size = pStreamContext->m_maxAuSize;
+    pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+         (M4OSA_MemAddr8)pStreamContext->m_pdataAddress, &size);
+    if(size != pStreamContext->m_maxAuSize)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    frameHeader = ((M4OSA_MemAddr8)pStreamContext->m_pdataAddress)[0];
+
+    frameType = ( frameHeader & 0x78 ) >> 3 ;
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR ) &&
+        ( frameType > 11 ) && ( frameType != 15 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) &&
+        ( frameType > 9 ) && ( frameType != 15 ))
+    {
+        return (M4OSA_ERR)M4ERR_AMR_INVALID_FRAME_TYPE;
+    }
+
+    /* Get the frame size */
+    if(frameType == 15)
+    {
+        auSize = 1;
+    }
+    else
+    {
+        auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+    }
+
+    size -= auSize ;
+    if(size != 0)
+    {
+        filePos = -((M4OSA_FilePosition)size);
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekCurrent, &filePos);
+    }
+
+    pAu->size = auSize ;
+
+    /* even when frameType == 15 (no data frame), ARM core decoder outputs full PCM buffer */
+    /*if(frameType == 15 )
+    {
+        pAu->CTS += 0;
+    }*/
+    /*else*/
+    {
+        pAu->CTS += M4AMRR_FRAME_LENGTH ;
+    }
+
+
+    pAu->DTS = pAu->CTS ;
+    pAu->attribute = M4SYS_kFragAttrOk;
+
+    pAu->stream = pStreamContext->m_pStreamHandler;
+    pAu->dataAddress = pStreamContext->m_pdataAddress ;
+
+    if(frameHeader & 0x80)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    /* Change the state to implement NextAu->freeAu->NextAu FSM */
+    pStreamContext->m_status = M4AMRR_kReading_nextAU ;
+
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_freeAU(M4OSA_Context Context, M4SYS_StreamID StreamID, M4SYS_AccessUnit* pAu)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pAu),M4ERR_PARAMETER,"Access Unit . M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading_nextAU), M4ERR_STATE,
+         "Invalid State");
+
+    if (( StreamID != 1 ) && ( StreamID != 0))
+    {
+            return M4ERR_BAD_STREAM_ID;
+    }
+
+    /* Change the state to Reading so as to allow access to next AU */
+    pStreamContext->m_status = M4AMRR_kReading ;
+
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_seek(M4OSA_Context Context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+                         M4SYS_SeekAccessMode seekMode, M4OSA_Time* pObtainCTS)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_UInt32 count, prevAU, nextAU ;
+    M4OSA_UInt32 size ;
+    M4OSA_UInt32 auSize ;
+    M4OSA_UInt32 position, partSeekTime;
+    M4OSA_UInt32 auCount = 0, skipAuCount = 0 ;
+    M4OSA_Char    frameHeader ;
+    M4OSA_Char    frameType ;
+    M4OSA_FilePosition  filePos;
+    M4OSA_Double time_double;
+
+    /*Make explicit time cast, but take care that timescale is not used !!!*/
+    M4OSA_TIME_TO_MS(time_double, time, 1000);
+
+    M4OSA_INT64_FROM_INT32(*pObtainCTS, 0);
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+    M4OSA_DEBUG_IF1(( pStreamContext->m_status != M4AMRR_kReading) && \
+        ( pStreamContext->m_status != M4AMRR_kOpened), M4ERR_STATE, "Invalid State");
+    M4OSA_DEBUG_IF1((time_double < 0),M4ERR_PARAMETER,"negative time");
+
+    /* Coming to seek for the first time, need to build the seekIndex Table */
+    if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
+    {
+        M4OSA_Double duration_double;
+
+        count = 0 ;
+        pStreamContext->m_pSeekIndex =
+             (M4OSA_UInt32*)M4OSA_malloc(M4AMRR_NUM_SEEK_ENTRIES * sizeof(M4OSA_UInt32),
+                 M4AMR_READER, (M4OSA_Char *)("pStreamContext->m_pSeekIndex"));
+
+        if(M4OSA_NULL == pStreamContext->m_pSeekIndex)
+        {
+            M4OSA_DEBUG_IF3((M4OSA_NULL == pStreamContext->m_pSeekIndex),M4ERR_ALLOC,
+                "Mem Alloc Failed - SeekIndex");
+            return M4ERR_ALLOC ;
+        }
+
+        /* point to the first AU */
+        if( pStreamContext->m_streamType == M4SYS_kAMR )
+        {
+            filePos = 6;
+        }
+        else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
+        {
+            filePos = 9;
+        }
+
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos);
+
+        /* Set the postion to begining of first AU */
+        position = (pStreamContext->m_streamType != M4SYS_kAMR)?9:6;
+
+        /*Make explicit time cast, but take care that timescale is not used !!!*/
+        M4OSA_TIME_TO_MS(duration_double, pStreamContext->m_pStreamHandler->duration, 1000);
+
+        /* Calculate the seek Interval duration based on total dutation */
+        /* Interval = (duration / ENTRIES) in multiples of AU frame length */
+        pStreamContext->m_seekInterval =
+             (M4OSA_UInt32)(duration_double / M4AMRR_NUM_SEEK_ENTRIES) ;
+        pStreamContext->m_seekInterval /= M4AMRR_FRAME_LENGTH ;
+        pStreamContext->m_seekInterval *= M4AMRR_FRAME_LENGTH ;
+        skipAuCount = pStreamContext->m_seekInterval / M4AMRR_FRAME_LENGTH ;
+
+        pStreamContext->m_pSeekIndex[count++]=position;
+        while(count < M4AMRR_NUM_SEEK_ENTRIES )
+        {
+            size = 1;
+            pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+                 (M4OSA_MemAddr8)&frameHeader, &size);
+            if ( size == 0)
+            {
+                break ;
+            }
+            frameType = (frameHeader & 0x78) >> 3 ;
+            if(frameType != 15)
+            {
+                /**< bugfix Ronan Cousyn 05/04/2006: In the core reader AMR, the
+                 * function M4AMRR_seek doesn't check the frameType */
+                if (( pStreamContext->m_streamType == M4SYS_kAMR ) && ( frameType > 10 ))
+                {
+                    return M4ERR_AMR_INVALID_FRAME_TYPE;
+                }
+                if (( pStreamContext->m_streamType == M4SYS_kAMR_WB ) && ( frameType > 9 ))
+                {
+                    return M4ERR_AMR_INVALID_FRAME_TYPE;
+                }
+                auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+                position += auSize ;
+                filePos = auSize-1;
+                pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+                     M4OSA_kFileSeekCurrent, &filePos);
+                auCount++;
+            }
+            else
+            {
+                position ++;
+            }
+            /* Skip the number of AU's as per interval and store in the Index table */
+            if ( (skipAuCount != 0) && !(auCount % skipAuCount))
+            {
+                pStreamContext->m_pSeekIndex[count++] = position;
+            }
+        }
+    }/* End of Building the seek table */
+
+    /* Use the seek table to seek the required time in the stream */
+
+    /* If we are seeking the begining of the file point to first AU */
+    if ( seekMode == M4SYS_kBeginning )
+    {
+        if( pStreamContext->m_streamType == M4SYS_kAMR )
+        {
+            filePos = 6;
+        }
+        else /*if ( pStreamContext->m_streamType == M4SYS_kAMR_WB )*/
+        {
+            filePos = 9;
+        }
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekBeginning, &filePos );
+        return M4NO_ERROR ;
+    }
+
+    /* Get the Nearest Second */
+    if (0 != pStreamContext->m_seekInterval)
+    {
+        position = (M4OSA_UInt32)(time_double / pStreamContext->m_seekInterval);
+    }
+    else
+    {
+        /*avoid division by 0*/
+        position = 0;
+    }
+
+    /* We have only 40 seek Index. */
+    position=(position >= M4AMRR_NUM_SEEK_ENTRIES)?M4AMRR_NUM_SEEK_ENTRIES-1:position;
+
+    /* SeekIndex will point to nearest Au, we need to search for the
+    required time form that position */
+    partSeekTime = (M4OSA_UInt32)time_double - position * pStreamContext->m_seekInterval;
+
+    position = pStreamContext->m_pSeekIndex[position];
+
+    if(!position)
+    {
+        return M4WAR_INVALID_TIME ;
+    }
+
+    /* point the file pointer to nearest AU */
+    filePos = position;
+    pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile, M4OSA_kFileSeekBeginning,
+         &filePos );
+
+    if ( partSeekTime == 0)
+    {
+        M4OSA_TIME_SET(*pObtainCTS, time);
+        return M4NO_ERROR;
+    }
+
+    M4OSA_INT64_FROM_DOUBLE(*pObtainCTS, (time_double - (M4OSA_Double)partSeekTime)) ;
+
+    switch(seekMode)
+    {
+        /* Get the AU before the target time */
+        case M4SYS_kPreviousRAP:
+        case M4SYS_kNoRAPprevious:
+            position = partSeekTime / M4AMRR_FRAME_LENGTH ;
+            if ( !(partSeekTime % M4AMRR_FRAME_LENGTH) )
+            {
+                position -- ;
+            }
+        break;
+        /* Get the Closest AU following the target time */
+        case M4SYS_kNextRAP:
+        case M4SYS_kNoRAPnext:
+            position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
+        break;
+        /*  Get the closest AU to target time */
+        case M4SYS_kClosestRAP:
+        case M4SYS_kNoRAPclosest:
+            prevAU = partSeekTime-(partSeekTime/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH;
+            nextAU =
+                 ((partSeekTime+M4AMRR_FRAME_LENGTH)/M4AMRR_FRAME_LENGTH)*M4AMRR_FRAME_LENGTH -\
+                     partSeekTime ;
+            if(prevAU < nextAU)
+            {
+                position = partSeekTime / M4AMRR_FRAME_LENGTH ;
+            }
+            else
+            {
+                position = (partSeekTime + M4AMRR_FRAME_LENGTH )/ M4AMRR_FRAME_LENGTH ;
+            }
+        break;
+        case M4SYS_kBeginning:
+        break;
+    }
+
+    count = 0 ;
+    /* Skip the Access unit in the stream to skip the part seek time,
+       to reach the required target time */
+    while(count < position )
+    {
+        size = 1;
+        pStreamContext->m_pOsaFilePtrFct->readData(pStreamContext->m_pAMRFile,
+             (M4OSA_MemAddr8)&frameHeader, &size);
+        if ( size == 0)
+        {
+            /* If the target time is invalid, point to begining and return */
+            M4OSA_INT64_FROM_INT32(*pObtainCTS, 0);
+            filePos = pStreamContext->m_pSeekIndex[0];
+            pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+                 M4OSA_kFileSeekBeginning, &filePos);
+            return M4WAR_INVALID_TIME ;
+        }
+        *pObtainCTS += M4AMRR_FRAME_LENGTH; /*Should use M4OSA_INT64_ADD !!*/
+        count++;
+        frameType = (frameHeader & 0x78) >> 3 ;
+        if(frameType == 15)
+        {
+            auSize = 1 ;
+        }
+        else
+        {
+            auSize = M4AMRR_getAuSize(frameType, pStreamContext->m_streamType);
+        }
+
+        filePos = auSize-1;
+        pStreamContext->m_pOsaFilePtrFct->seek(pStreamContext->m_pAMRFile,
+             M4OSA_kFileSeekCurrent, &filePos);
+    }
+
+    return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_closeRead(M4OSA_Context Context)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+
+    /* Close the AMR stream */
+    pStreamContext->m_pOsaFilePtrFct->closeRead(pStreamContext->m_pAMRFile);
+
+    pStreamContext->m_status=M4AMRR_kClosed ;
+
+    /* Check if AU data Address is allocated memory and free it */
+    if(M4OSA_NULL != pStreamContext->m_pdataAddress)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pdataAddress);
+    }
+
+    /* Check if the stream handler is allocated memory */
+    if(M4OSA_NULL != pStreamContext->m_pStreamHandler)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pStreamHandler);
+    }
+
+    /* Seek table is created only when seek is used, so check if memory is allocated */
+    if(M4OSA_NULL != pStreamContext->m_pSeekIndex)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pSeekIndex);
+    }
+
+    /* Free the context */
+    M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+
+    return M4NO_ERROR ;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getState(M4OSA_Context Context, M4AMRR_State* pState, M4SYS_StreamID streamId)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+    M4OSA_DEBUG_IF2((M4OSA_NULL == Context),M4ERR_PARAMETER,"Context M4OSA_NULL");
+    M4OSA_DEBUG_IF2((pStreamContext->m_contextId != M4AMRR_CONTEXTID),M4ERR_BAD_CONTEXT,
+         "Bad Context");
+
+    if (( streamId != 1 ) && ( streamId != 0))
+    {
+            return M4ERR_BAD_STREAM_ID;
+    }
+
+    *pState = pStreamContext->m_status ;
+
+    return M4NO_ERROR ;
+}
+
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getVersion    (M4_VersionInfo *pVersion)
+/*********************************************************/
+{
+    M4OSA_TRACE1_1("M4AMRR_getVersion called with pVersion: 0x%x\n", pVersion);
+    M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
+         "pVersion is NULL in M4AMRR_getVersion");
+
+    pVersion->m_major = M4AMRR_VERSION_MAJOR;
+    pVersion->m_minor = M4AMRR_VERSION_MINOR;
+    pVersion->m_revision = M4AMRR_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4AMRR_getmaxAUsize(M4OSA_Context Context, M4OSA_UInt32 *pMaxAuSize)
+/*********************************************************/
+{
+    M4_AMRR_Context* pStreamContext=(M4_AMRR_Context*)Context;
+
+    /**
+     * Check input parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == Context),  M4ERR_PARAMETER,
+                "M4AMRR_getmaxAUsize: Context is M4OSA_NULL");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pMaxAuSize),M4ERR_PARAMETER,
+                "M4AMRR_getmaxAUsize: pMaxAuSize is M4OSA_NULL");
+
+    *pMaxAuSize = pStreamContext->m_maxAuSize;
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4ChannelCoverter.c b/libvideoeditor/vss/src/M4ChannelCoverter.c
new file mode 100755
index 0000000..5d89820
--- /dev/null
+++ b/libvideoeditor/vss/src/M4ChannelCoverter.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4ChannelCoverter.c
+ * @brief
+ * @note
+ ******************************************************************************
+ */
+
+void MonoTo2I_16( const short *src,
+                        short *dst,
+                        short n)
+{
+    short ii;
+    src += n-1;
+    dst += (n*2)-1;
+
+    for (ii = n; ii != 0; ii--){
+        *dst-- = *src;
+        *dst-- = *src--;
+    }
+
+    return;
+}
+
+void From2iToMono_16( const short *src,
+                            short *dst,
+                            short n)
+{
+    short ii;
+    long Temp;
+    for (ii = n; ii != 0; ii--){
+        Temp = (long)*(src++);
+        Temp += (long)*(src++);
+        *(dst++) = (short)(Temp >>1);
+    }
+
+    return;
+}
+
diff --git a/libvideoeditor/vss/src/M4PCMR_CoreReader.c b/libvideoeditor/vss/src/M4PCMR_CoreReader.c
new file mode 100755
index 0000000..15fd9c8
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PCMR_CoreReader.c
@@ -0,0 +1,717 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file    M4PCM_PCMReader.c
+ * @brief   PCM reader implementation
+ * @note    This file implements functions of the PCM reader
+ ************************************************************************
+ */
+#include "M4OSA_CharStar.h"
+#include "M4PCMR_CoreReader.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CharStar.h"
+/**
+ ******************************************************************************
+ * PCM reader version numbers
+ ******************************************************************************
+ */
+/* CHANGE_VERSION_HERE */
+#define M4PCMR_VERSION_MAJOR 1
+#define M4PCMR_VERSION_MINOR 0
+#define M4PCMR_VERSION_REVISION 0
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+ *                             M4OSA_FileReaderPointer* pFileFunction)
+ * @brief   This function opens a PCM file
+ * @note    This function :
+ *          - opens a PCM file
+ *          - initializes PCM context,
+ *          - verifies PCM file format
+ *          - Fill decoder config structure
+ *          - Changes state of the reader in 'Opening'
+ * @param   pContext: (OUT) Pointer on the PCM Reader context
+ * @param   pUrl: (IN) Name of the PCM file
+ * @param   pFileFunctions: (IN) Pointer on the file access functions
+ * @return  M4NO_ERROR                      there is no error during the opening
+ * @return  M4ERR_PARAMETER                 pContext and/or pUrl and/or pFileFunction is NULL
+ * @return  M4ERR_ALLOC                     there is no more memory available
+ * @return  M4ERR_FILE_NOT_FOUND            the file cannot be found
+ * @return  M4PCMC_ERR_PCM_NOT_COMPLIANT    the file does not seem to be compliant, no RIFF,
+ *                                             or lack of any mandatory chunk.
+ * @return  M4PCMC_ERR_PCM_NOT_SUPPORTED    the PCM format of this file is not supported by the
+ *                                           reader
+ * @return  Any M4OSA_FILE errors           see OSAL File specification for detailed errors
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_openRead(M4OSA_Context* pContext, M4OSA_Void* pUrl,
+                             M4OSA_FileReadPointer* pFileFunction)
+{
+    M4OSA_ERR       err;
+    M4PCMR_Context *context;
+    M4OSA_Char*        pTempURL;
+    M4OSA_Char        value[6];
+
+    /* Check parameters */
+    if((M4OSA_NULL == pContext)|| (M4OSA_NULL == pUrl) ||(M4OSA_NULL == pFileFunction))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Allocates the context */
+    context = M4OSA_NULL;
+    context = (M4PCMR_Context *)M4OSA_malloc(sizeof(M4PCMR_Context), M4WAV_READER,
+         (M4OSA_Char *)"M4PCMR_openRead");
+    if (M4OSA_NULL == context)
+    {
+        return M4ERR_ALLOC;
+    }
+    *pContext = (M4OSA_Context)context;
+
+    /* Initialize the context */
+    context->m_offset = 0;
+
+    context->m_state            = M4PCMR_kInit;
+    context->m_microState       = M4PCMR_kInit;
+    context->m_pFileReadFunc    = M4OSA_NULL;
+    context->m_fileContext      = M4OSA_NULL;
+    context->m_pAuBuffer        = M4OSA_NULL;
+    context->m_pDecoderSpecInfo = M4OSA_NULL;
+
+    /* Set sample frequency */
+    pTempURL = (M4OSA_Char*)pUrl + (M4OSA_chrLength((M4OSA_Char*)pUrl)-11);
+    M4OSA_chrNCopy(value, pTempURL, 5);
+    M4OSA_chrGetUInt32(pTempURL, &(context->m_decoderConfig.SampleFrequency),
+         M4OSA_NULL, M4OSA_kchrDec);
+
+    /* Set number of channels */
+    pTempURL += 6;
+    M4OSA_chrNCopy(value, pTempURL, 1);
+    M4OSA_chrGetUInt16(pTempURL, &(context->m_decoderConfig.nbChannels),
+         M4OSA_NULL, M4OSA_kchrDec);
+
+    M4OSA_chrNCopy(pUrl,pUrl, (M4OSA_chrLength((M4OSA_Char*)pUrl)-12));
+    /* Open the file */
+    context->m_fileContext = M4OSA_NULL;
+    err = pFileFunction->openRead(&(context->m_fileContext), pUrl, M4OSA_kFileRead);
+    if(M4NO_ERROR != err)
+    {
+        return err;
+    }
+    context->m_decoderConfig.BitsPerSample = 16;
+    context->m_decoderConfig.AvgBytesPerSec = context->m_decoderConfig.SampleFrequency * 2 \
+        * context->m_decoderConfig.nbChannels;
+    err = pFileFunction->getOption(context->m_fileContext, M4OSA_kFileReadGetFileSize,
+         (M4OSA_DataOption*)&(context->m_decoderConfig.DataLength));
+    if(M4NO_ERROR != err)
+    {
+        return err;
+    }
+    context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels;  // Raw PCM.  Hence, get a
+                                                                        // chunk of data
+
+    if(context->m_decoderConfig.SampleFrequency == 8000)
+    {
+        /* AMR case, no pb */
+        context->m_blockSize = context->m_decoderConfig.nbChannels *\
+             (context->m_decoderConfig.SampleFrequency / 50) * \
+                (context->m_decoderConfig.BitsPerSample / 8);
+    }
+    if(context->m_decoderConfig.SampleFrequency == 16000)
+    {
+        /* AAC case, we can't read only 20 ms blocks */
+        context->m_blockSize = 2048 * context->m_decoderConfig.nbChannels;
+    }
+    context->m_dataStartOffset = 0;
+    context->m_pFileReadFunc = pFileFunction;
+
+    context->m_pAuBuffer = (M4OSA_MemAddr32)M4OSA_malloc(context->m_blockSize, M4WAV_READER,
+         (M4OSA_Char *)"Core PCM reader Access Unit");
+    if (M4OSA_NULL == context->m_pAuBuffer)
+    {
+        err = M4ERR_ALLOC;
+        goto cleanup;
+    }
+
+    /* Change state */
+    context->m_state = M4PCMR_kOpening;
+
+    return M4NO_ERROR;
+
+cleanup:
+
+    /* Close the file */
+    if(context->m_pFileReadFunc != M4OSA_NULL)
+        context->m_pFileReadFunc->closeRead(context->m_fileContext);
+
+    /* Free internal context */
+    M4OSA_free((M4OSA_MemAddr32)context);
+    *pContext = M4OSA_NULL;
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
+ * @brief   This function get the (unique) stream of a PCM file
+ * @note    This function :
+ *          - Allocates and fills the decoder specific info structure
+ *          - Fills decoder specific infos structure
+ *          - Fills pStreamDesc structure allocated by the caller
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   pStreamDesc: (IN) Stream Description context
+ * @return  M4NO_ERROR          there is no error
+ * @return  M4ERR_PARAMETER     at least one parameter is NULL
+ * @return  M4ERR_ALLOC         there is no more memory available
+ * @return  M4ERR_STATE         this function cannot be called now
+ * @return  Any M4OSA_FILE      errors see OSAL File specification for detailed errors
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_getNextStream(M4OSA_Context context, M4SYS_StreamDescription* pStreamDesc)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context)|| (M4OSA_NULL == pStreamDesc))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    if (c->m_state == M4PCMR_kOpening_streamRetrieved)
+    {
+        return M4WAR_NO_MORE_STREAM;
+    }
+    /* Check Reader's m_state */
+    if(c->m_state != M4PCMR_kOpening)
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Only one stream is contained in PCM file */
+    pStreamDesc->streamID = 1;
+    /* Not used */
+    pStreamDesc->profileLevel = 0;
+    pStreamDesc->decoderSpecificInfoSize = sizeof(M4PCMC_DecoderSpecificInfo);
+
+    /* Allocates decoder specific info structure */
+    pStreamDesc->decoderSpecificInfo = M4OSA_NULL;
+    pStreamDesc->decoderSpecificInfo =
+        (M4OSA_MemAddr32)M4OSA_malloc( sizeof(M4PCMC_DecoderSpecificInfo), M4WAV_READER,
+             (M4OSA_Char *)"M4PCMR_getNextStream");
+    if(pStreamDesc->decoderSpecificInfo == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+    /* Fill decoderSpecificInfo structure, with decoder config structure filled in 'openread'
+         function */
+    M4OSA_memcpy((M4OSA_MemAddr8)pStreamDesc->decoderSpecificInfo,
+         (M4OSA_MemAddr8)&c->m_decoderConfig, sizeof(M4PCMC_DecoderSpecificInfo));
+
+    /* Fill other fields of pStreamDesc structure */
+    pStreamDesc->timeScale = 1000;
+    pStreamDesc->duration = (M4OSA_Time)(((M4OSA_Double)(c->m_decoderConfig.DataLength)\
+         / (M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec))*pStreamDesc->timeScale);
+    pStreamDesc->averageBitrate = c->m_decoderConfig.AvgBytesPerSec * 8;/* in bits, multiply by 8*/
+    pStreamDesc->maxBitrate = pStreamDesc->averageBitrate; /* PCM stream has constant bitrate */
+
+    /* Determines Stream type */
+    switch(c->m_decoderConfig.BitsPerSample)
+    {
+        case 8:
+            switch(c->m_decoderConfig.nbChannels)
+            {
+                case 1:
+                    pStreamDesc->streamType = M4SYS_kPCM_8bitsU;
+                    break;
+//                case 2:
+//                    pStreamDesc->streamType = M4SYS_kPCM_8bitsS; /* ??? 8bits stereo not
+                                                                  //   defined ? */
+//                    break;
+                default:
+                    pStreamDesc->streamType = M4SYS_kAudioUnknown;
+            }
+            break;
+
+        case 16:
+            switch(c->m_decoderConfig.nbChannels)
+            {
+                case 1:
+                    pStreamDesc->streamType = M4SYS_kPCM_16bitsU;
+                    break;
+                case 2:
+                    pStreamDesc->streamType = M4SYS_kPCM_16bitsS;
+                    break;
+                default:
+                    pStreamDesc->streamType = M4SYS_kAudioUnknown;
+            }
+            break;
+
+        default:
+            pStreamDesc->streamType = M4SYS_kAudioUnknown;
+    }
+
+    c->m_pDecoderSpecInfo = pStreamDesc->decoderSpecificInfo;
+
+    c->m_state = M4PCMR_kOpening_streamRetrieved;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
+ * @brief   This function starts reading the unique stream of a PCM file
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to start reading a stream
+ *          - Check that provided StreamId is correct (always true, only one stream...)
+ *            In the player application, a StreamId table is initialized as follow:
+ *              M4SYS_StreamID pStreamID[2]={1,0};
+ *          - Change state of the reader in 'Reading'
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   streamID: (IN) Stream selection
+ * @return  M4NO_ERROR          there is no error
+ * @return  M4ERR_PARAMETER     at least one parameter is NULL
+ * @return  M4ERR_STATE         this function cannot be called now
+ * @return  M4ERR_BAD_STREAM_ID at least one of the streamID does not exist
+ *          (should never happen if table pStreamID is correctly initialized as above)
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_startReading(M4OSA_Context context, M4SYS_StreamID* pStreamIDs)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamIDs))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kOpening_streamRetrieved)
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Check pStreamID and if they're OK, change reader's state */
+    if(pStreamIDs[0] == 1 || pStreamIDs[0] == 0)
+    /* First and unique stream contained in PCM file */
+    {
+        c->m_state = M4PCMR_kReading;
+        c->m_microState = M4PCMR_kReading;
+    }
+    else
+    {
+        return M4ERR_BAD_STREAM_ID;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+ * @brief   This function reads the next AU contained in the PCM file
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to read an AU
+ *          - Allocates memory to store read AU
+ *          - Read data from file and store them into previously allocated memory
+ *          - Fill AU structure fileds (CTS...)
+ *          - Change state of the reader in 'Reading' (not useful...)
+ *          - Change Micro state 'Reading' in M4PCMR_kReading_nextAU
+ *            (AU is read and can be deleted)
+ *          - Check if the last AU has been read or if we're about to read it
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   streamID: (IN) Stream selection
+ * @param   pAU: (IN/OUT) Acces Unit Structure
+ * @return  M4NO_ERROR          there is no error
+ * @return  M4ERR_PARAMETER     at least one parameter is NULL
+ * @return  M4ERR_ALLOC         there is no more memory available
+ * @return  M4ERR_STATE         this function cannot be called now
+ * @return  M4M4WAR_NO_DATA_YET there is no enough data in the file to provide a new access unit.
+ * @return  M4WAR_END_OF_STREAM There is no more access unit in the stream,
+ *                              or the sample number is bigger the maximum one.
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_nextAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 size_read;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context) || (M4OSA_NULL == pAU))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading)
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Allocates AU dataAdress */
+    pAU->dataAddress = c->m_pAuBuffer;
+    size_read        = c->m_blockSize;
+
+    if((c->m_offset + size_read) >= c->m_decoderConfig.DataLength)
+    {
+        size_read = c->m_decoderConfig.DataLength - c->m_offset;
+    }
+
+    /* Read data in file, and copy it to AU Structure */
+    err = c->m_pFileReadFunc->readData(c->m_fileContext, (M4OSA_MemAddr8)pAU->dataAddress,
+         (M4OSA_UInt32 *)&size_read);
+    if(M4NO_ERROR != err)
+    {
+        return err;
+    }
+
+    /* Calculates the new m_offset, used to determine whether we're at end of reading or not */
+    c->m_offset = c->m_offset + size_read;
+
+    /* Fill others parameters of AU structure */
+    pAU->CTS =
+         (M4OSA_Time)(((M4OSA_Double)c->m_offset/(M4OSA_Double)c->m_decoderConfig.AvgBytesPerSec)\
+            *1000);
+    pAU->DTS = pAU->CTS;
+
+    pAU->attribute  = 0;
+    pAU->frag       = M4OSA_NULL;
+    pAU->nbFrag     = 0;
+    pAU->stream     = M4OSA_NULL;
+    pAU->size       = size_read;
+
+    /* Change states */
+    c->m_state = M4PCMR_kReading; /* Not changed ... */
+    c->m_microState = M4PCMR_kReading_nextAU; /* AU is read and can be deleted */
+
+    /* Check if there is another AU to read */
+    /* ie: if decoded nb of bytes = nb of bytes to decode,
+         it means there is no more AU to decode */
+    if(c->m_offset >= c->m_decoderConfig.DataLength)
+    {
+        return M4WAR_NO_MORE_AU;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+ * @brief   This function frees the AU provided in parameter
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to free an AU
+ *          - Free dataAddress field of AU structure
+ *          - Change state of the reader in 'Reading' (not useful...)
+ *          - Change Micro state 'Reading' in M4PCMR_kReading (another AU can be read)
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   streamID: (IN) Stream selection
+ * @param   pAU: (IN) Acces Unit Structure
+ * @return  M4NO_ERROR  there is no error
+ * @return  M4ERR_PARAMETER at least one parameter is NULL
+ * @return  M4ERR_STATE this function cannot be called now
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_freeAU(M4OSA_Context context, M4SYS_StreamID streamID, M4SYS_AccessUnit* pAU)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context ) || (M4OSA_NULL == pAU))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kReading && c->m_microState != M4PCMR_kReading_nextAU)
+    {
+        return M4ERR_STATE;
+    }
+
+    pAU->dataAddress = M4OSA_NULL;
+
+    /* Change states */
+    c->m_state = M4PCMR_kReading; /* Not changed ... */
+    c->m_microState = M4PCMR_kReading; /* AU is deleted, another AU can be read */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID,
+                         M4OSA_Time time, M4SYS_seekAccessMode seekAccessMode,
+                         M4OSA_Time* pObtainCTS[])
+ * @brief   This function seeks into the PCM file at the provided time
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to seek
+ *          - Determines from provided time m_offset to seek in file
+ *          - If m_offset is correct, seek in file
+ *          - Update new m_offset in PCM reader context
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   pStreamID: (IN) Stream selection (not used, only 1 stream)
+ * @param   time: (IN) Targeted time
+ * @param   seekMode: (IN) Selects the seek access mode
+ * @param   pObtainCTS[]: (OUT) Returned Time (not used)
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL
+ * @return  M4ERR_ALLOC             there is no more memory available
+ * @return  M4ERR_STATE             this function cannot be called now
+ * @return  M4WAR_INVALID_TIME      Specified time is not reachable
+ * @param   M4ERR_NOT_IMPLEMENTED   This seek mode is not implemented yet
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_seek(M4OSA_Context context, M4SYS_StreamID* pStreamID, M4OSA_Time time,
+                      M4SYS_SeekAccessMode seekAccessMode, M4OSA_Time* pObtainCTS)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 offset;
+    M4OSA_UInt32 alignment;
+    M4OSA_UInt32 size_read;
+
+    /* Check parameters */
+    if((M4OSA_NULL == context) || (M4OSA_NULL == pStreamID))
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kOpening_streamRetrieved && c->m_state != M4PCMR_kReading)
+    {
+        return M4ERR_STATE;
+    }
+
+    switch(seekAccessMode)
+    {
+        case M4SYS_kBeginning:
+            /* Determine m_offset from time*/
+            offset =
+                (M4OSA_UInt32)(time * ((M4OSA_Double)(c->m_decoderConfig.AvgBytesPerSec) / 1000));
+            /** check the alignment on sample boundary */
+            alignment = c->m_decoderConfig.nbChannels*c->m_decoderConfig.BitsPerSample/8;
+            if (offset%alignment != 0)
+            {
+                offset -= offset%alignment;
+            }
+            /*add the header offset*/
+            offset += c->m_dataStartOffset;
+            /* If m_offset is over file size -> Invalid time */
+            if (offset > (c->m_dataStartOffset + c->m_decoderConfig.DataLength))
+            {
+                return M4WAR_INVALID_TIME;
+            }
+            else
+            {
+                /* Seek file */
+                size_read = offset;
+                err = c->m_pFileReadFunc->seek(c->m_fileContext, M4OSA_kFileSeekBeginning,
+                    (M4OSA_FilePosition *) &size_read);
+                if(M4NO_ERROR != err)
+                {
+                    return err;
+                }
+                /* Update m_offset in M4PCMR_context */
+                c->m_offset = offset - c->m_dataStartOffset;
+            }
+            break;
+
+        default:
+            return M4ERR_NOT_IMPLEMENTED;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
+ * @brief   This function closes PCM file, and frees context
+ * @note    This function :
+ *          - Verifies that the current reader's state allows close the PCM file
+ *          - Closes the file
+ *          - Free structures
+ * @param   context: (IN/OUT) PCM Reader context
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL
+ * @return  M4ERR_STATE             this function cannot be called now
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_closeRead(M4OSA_Context context)
+{
+    M4PCMR_Context *c = (M4PCMR_Context *)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check parameters */
+    if(M4OSA_NULL == context)
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    if(c->m_pDecoderSpecInfo != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)c->m_pDecoderSpecInfo);
+    }
+
+    /* Check Reader's state */
+    if(c->m_state != M4PCMR_kReading)
+    {
+        return M4ERR_STATE;
+    }
+    else if(c->m_microState == M4PCMR_kReading_nextAU)
+    {
+        return M4ERR_STATE;
+    }
+
+    if (M4OSA_NULL != c->m_pAuBuffer)
+    {
+        M4OSA_free((M4OSA_MemAddr32)c->m_pAuBuffer);
+    }
+
+    /* Close the file */
+    if (M4OSA_NULL != c->m_pFileReadFunc)
+    {
+        err = c->m_pFileReadFunc->closeRead(c->m_fileContext);
+    }
+
+    /* Free internal context */
+    if (M4OSA_NULL != c)
+    {
+        M4OSA_free((M4OSA_MemAddr32)c);
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ *                                M4OSA_DataOption* pValue)
+ * @brief   This function get option of the PCM Reader
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to get an option
+ *          - Return corresponding option value
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   optionID: (IN) ID of the option to get
+ * @param   pValue: (OUT) Variable where the option value is returned
+ * @return  M4NO_ERROR              there is no error.
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL.
+ * @return  M4ERR_BAD_OPTION_ID     the optionID is not a valid one.
+ * @return  M4ERR_STATE             this option is not available now.
+ * @return  M4ERR_NOT_IMPLEMENTED   this option is not implemented
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_getOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+                             M4OSA_DataOption* pValue)
+{
+    M4PCMR_Context *c =(M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if(M4OSA_NULL == context)
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check reader's state */
+    if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
+         && (c->m_state != M4PCMR_kReading))
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Depend of the OptionID, the value to return is different */
+    switch(optionID)
+    {
+        case M4PCMR_kPCMblockSize:
+            *pValue = &c->m_blockSize;
+            break;
+
+        default:
+            return M4ERR_BAD_OPTION_ID;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID,
+ *                                 M4OSA_DataOption Value)
+ * @brief   This function set option of the PCM Reader
+ * @note    This function :
+ *          - Verifies that the current reader's state allows to set an option
+ *          - Set corresponding option value
+ * @param   context: (IN/OUT) PCM Reader context
+ * @param   optionID: (IN) ID of the option to get
+ * @param   Value: (IN) Variable where the option value is stored
+ * @return  M4NO_ERROR              there is no error.
+ * @return  M4ERR_PARAMETER         at least one parameter is NULL.
+ * @return  M4ERR_BAD_OPTION_ID     the optionID is not a valid one.
+ * @return  M4ERR_STATE             this option is not available now.
+ * @return  M4ERR_NOT_IMPLEMENTED   this option is not implemented
+ ************************************************************************
+ */
+M4OSA_ERR M4PCMR_setOption(M4OSA_Context context, M4PCMR_OptionID optionID, M4OSA_DataOption Value)
+{
+    M4PCMR_Context *c =(M4PCMR_Context *)context;
+
+    /* Check parameters */
+    if(context == M4OSA_NULL)
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check reader's state */
+    if((c->m_state != M4PCMR_kOpening) && (c->m_state != M4PCMR_kOpening_streamRetrieved)\
+         && (c->m_state != M4PCMR_kReading))
+    {
+        return M4ERR_STATE;
+    }
+
+    /* Depend of the OptionID, the value to set is different */
+    switch(optionID)
+    {
+        case M4PCMR_kPCMblockSize:
+            c->m_blockSize = (M4OSA_UInt32)Value;
+            break;
+
+        default:
+            return M4ERR_BAD_OPTION_ID;
+    }
+
+    return M4NO_ERROR;
+}
+
+/*********************************************************/
+M4OSA_ERR M4PCMR_getVersion (M4_VersionInfo *pVersion)
+/*********************************************************/
+{
+    M4OSA_TRACE1_1("M4PCMR_getVersion called with pVersion: 0x%x", pVersion);
+    M4OSA_DEBUG_IF1(((M4OSA_UInt32) pVersion == 0),M4ERR_PARAMETER,
+         "pVersion is NULL in M4PCMR_getVersion");
+
+    pVersion->m_major = M4PCMR_VERSION_MAJOR;
+    pVersion->m_minor = M4PCMR_VERSION_MINOR;
+    pVersion->m_revision = M4PCMR_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_API.c b/libvideoeditor/vss/src/M4PTO3GPP_API.c
new file mode 100755
index 0000000..5581cbd
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PTO3GPP_API.c
@@ -0,0 +1,2060 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4PTO3GPP_API.c
+ * @brief   Picture to 3gpp Service implementation.
+ * @note
+ ******************************************************************************
+*/
+
+/*16 bytes signature to be written in the generated 3gp files */
+#define M4PTO3GPP_SIGNATURE     "NXP-SW : PTO3GPP"
+
+/****************/
+/*** Includes ***/
+/****************/
+
+/**
+ *  Our header */
+#include "M4PTO3GPP_InternalTypes.h"
+#include "M4PTO3GPP_API.h"
+
+/**
+ *  Our errors */
+#include "M4PTO3GPP_ErrorCodes.h"
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+#include "VideoEditorVideoEncoder.h"
+#endif
+
+
+/**
+ *  OSAL headers */
+#include "M4OSA_Memory.h"       /* OSAL memory management */
+#include "M4OSA_Debug.h"        /* OSAL debug management */
+
+
+/************************/
+/*** Various Magicals ***/
+/************************/
+
+#define M4PTO3GPP_WRITER_AUDIO_STREAM_ID                1
+#define M4PTO3GPP_WRITER_VIDEO_STREAM_ID                2
+#define M4PTO3GPP_QUANTIZER_STEP                        4       /**< Quantizer step */
+#define M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL            0xFF    /**< No specific profile and
+                                                                     level */
+#define M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE           8000    /**< AMR */
+#define M4PTO3GPP_BITRATE_REGULATION_CTS_PERIOD_IN_MS   500     /**< MAGICAL */
+#define M4PTO3GPP_MARGE_OF_FILE_SIZE                    25000   /**< MAGICAL */
+/**
+ ******************************************************************************
+ * define   AMR 12.2 kbps silence frame
+ ******************************************************************************
+*/
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE     32
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_DURATION 20
+const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_122_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE]=
+{ 0x3C, 0x91, 0x17, 0x16, 0xBE, 0x66, 0x78, 0x00, 0x00, 0x01, 0xE7, 0xAF,
+  0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
+#define M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_DURATION 20
+const M4OSA_UInt8 M4PTO3GPP_AMR_AU_SILENCE_048_FRAME[M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+{ 0x04, 0xFF, 0x18, 0xC7, 0xF0, 0x0D, 0x04, 0x33, 0xFF, 0xE0, 0x00, 0x00, 0x00 };
+
+/***************************/
+/*** "Private" functions ***/
+/***************************/
+static M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
+
+/****************************/
+/*** "External" functions ***/
+/****************************/
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces(M4WRITER_OutputFileType* pType,
+                                            M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                            M4WRITER_DataInterface** SrcDataInterface);
+extern M4OSA_ERR M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+                                            M4READER_GlobalInterface **pRdrGlobalInterface,
+                                            M4READER_DataInterface **pRdrDataInterface);
+extern M4OSA_ERR M4READER_3GP_getInterfaces(M4READER_MediaType *pMediaType,
+                                            M4READER_GlobalInterface **pRdrGlobalInterface,
+                                            M4READER_DataInterface **pRdrDataInterface);
+
+/****************************/
+/*** "Static" functions ***/
+/****************************/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(
+                                    M4WRITER_DataInterface* pWriterDataIntInterface,
+                                    M4WRITER_Context* pWriterContext,
+                                    M4SYS_AccessUnit* pWriterAudioAU,
+                                    M4OSA_Time mtIncCts);
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(
+                                   M4WRITER_DataInterface* pWriterDataIntInterface,
+                                   M4WRITER_Context* pWriterContext,
+                                   M4SYS_AccessUnit* pWriterAudioAU,
+                                   M4OSA_Time mtIncCts);
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo);
+ * @brief   Get the M4PTO3GPP version.
+ * @note    Can be called anytime. Do not need any context.
+ * @param   pVersionInfo        (OUT) Pointer to a version info structure
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+*/
+
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_GetVersion(M4_VersionInfo* pVersionInfo)
+/*********************************************************/
+{
+    M4OSA_TRACE3_1("M4PTO3GPP_GetVersion called with pVersionInfo=0x%x", pVersionInfo);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pVersionInfo),M4ERR_PARAMETER,
+            "M4PTO3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
+
+    pVersionInfo->m_major       = M4PTO3GPP_VERSION_MAJOR;
+    pVersionInfo->m_minor       = M4PTO3GPP_VERSION_MINOR;
+    pVersionInfo->m_revision    = M4PTO3GPP_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Init(M4PTO3GPP_Context* pContext);
+ * @brief   Initializes the M4PTO3GPP (allocates an execution context).
+ * @note
+ * @param   pContext            (OUT) Pointer on the M4PTO3GPP context to allocate
+ * @param   pFileReadPtrFct     (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (If Debug Level >= 2)
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Init(   M4PTO3GPP_Context* pContext,
+                            M4OSA_FileReadPointer* pFileReadPtrFct,
+                            M4OSA_FileWriterPointer* pFileWritePtrFct)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext *pC;
+    M4OSA_UInt32 i;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Init called with pContext=0x%x", pContext);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+            "M4PTO3GPP_Init: pContext is M4OSA_NULL");
+
+    /**
+     *  Allocate the M4PTO3GPP context and return it to the user */
+    pC = (M4PTO3GPP_InternalContext*)M4OSA_malloc(sizeof(M4PTO3GPP_InternalContext), M4PTO3GPP,
+        (M4OSA_Char *)"M4PTO3GPP_InternalContext");
+    *pContext = pC;
+    if (M4OSA_NULL == pC)
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Step(): unable to allocate M4PTO3GPP_InternalContext,\
+                       returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**
+     *  Init the context. All pointers must be initialized to M4OSA_NULL because CleanUp()
+        can be called just after Init(). */
+    pC->m_State = M4PTO3GPP_kState_CREATED;
+    pC->m_VideoState = M4PTO3GPP_kStreamState_NOSTREAM;
+    pC->m_AudioState = M4PTO3GPP_kStreamState_NOSTREAM;
+
+    /**
+     *  Reader stuff */
+    pC->m_pReaderAudioAU        = M4OSA_NULL;
+    pC->m_pReaderAudioStream    = M4OSA_NULL;
+
+    /**
+     *  Writer stuff */
+    pC->m_pEncoderHeader        = M4OSA_NULL;
+    pC->m_pWriterVideoStream    = M4OSA_NULL;
+    pC->m_pWriterAudioStream    = M4OSA_NULL;
+    pC->m_pWriterVideoStreamInfo= M4OSA_NULL;
+    pC->m_pWriterAudioStreamInfo= M4OSA_NULL;
+
+    /**
+     *  Contexts of the used modules  */
+    pC->m_pAudioReaderContext    = M4OSA_NULL;
+    pC->m_p3gpWriterContext  = M4OSA_NULL;
+    pC->m_pMp4EncoderContext = M4OSA_NULL;
+    pC->m_eEncoderState = M4PTO3GPP_kNoEncoder;
+
+    /**
+     *  Interfaces of the used modules */
+    pC->m_pReaderGlobInt    = M4OSA_NULL;
+    pC->m_pReaderDataInt    = M4OSA_NULL;
+    pC->m_pWriterGlobInt    = M4OSA_NULL;
+    pC->m_pWriterDataInt    = M4OSA_NULL;
+    pC->m_pEncoderInt       = M4OSA_NULL;
+    pC->m_pEncoderExternalAPI = M4OSA_NULL;
+    pC->m_pEncoderUserData = M4OSA_NULL;
+
+    /**
+     * Fill the OSAL file function set */
+    pC->pOsalFileRead = pFileReadPtrFct;
+    pC->pOsalFileWrite = pFileWritePtrFct;
+
+    /**
+     *  Video rate control stuff */
+    pC->m_mtCts             = 0.0F;
+    pC->m_mtNextCts         = 0.0F;
+    pC->m_mtAudioCts        = 0.0F;
+    pC->m_AudioOffSet       = 0.0F;
+    pC->m_dLastVideoRegulCts= 0.0F;
+    pC->m_PrevAudioCts      = 0.0F;
+    pC->m_DeltaAudioCts     = 0.0F;
+
+    pC->m_MaxFileSize       = 0;
+    pC->m_CurrentFileSize   = 0;
+
+    pC->m_IsLastPicture         = M4OSA_FALSE;
+    pC->m_bAudioPaddingSilence  = M4OSA_FALSE;
+    pC->m_bLastInternalCallBack = M4OSA_FALSE;
+    pC->m_NbCurrentFrame        = 0;
+
+    pC->pSavedPlane = M4OSA_NULL;
+    pC->uiSavedDuration = 0;
+
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        pC->registeredExternalEncs[i].pEncoderInterface = M4OSA_NULL;
+        pC->registeredExternalEncs[i].pUserData = M4OSA_NULL;
+        pC->registeredExternalEncs[i].registered = M4OSA_FALSE;
+    }
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Init(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams);
+ * @brief   Set the M4PTO3GPP input and output files.
+ * @note    It opens the input file, but the output file may not be created yet.
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @param   pParams             (IN) Pointer to the parameters for the PTO3GPP.
+ * @note    The pointed structure can be de-allocated after this function returns because
+ *          it is internally copied by the PTO3GPP
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return  M4ERR_STATE:        M4PTO3GPP is not in an appropriate state for this function to be
+                                 called
+ * @return  M4ERR_ALLOC:        There is no more available memory
+ * @return  ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263 The output video frame
+ *                              size parameter is incompatible with H263 encoding
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT       The output video format
+                                                            parameter is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE      The output video bit-rate parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE   The output video frame size parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE          The output file size parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING             The output audio padding parameter
+                                                            is undefined
+ * @return  ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE    The input audio file contains
+                                                            a track format not handled by PTO3GPP
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Open(M4PTO3GPP_Context pContext, M4PTO3GPP_Params* pParams)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext   *pC = (M4PTO3GPP_InternalContext*)(pContext);
+    M4OSA_ERR                   err = M4NO_ERROR;
+
+    M4READER_MediaFamily    mediaFamily;
+    M4_StreamHandler*       pStreamHandler;
+    M4READER_MediaType      readerMediaType;
+
+    M4OSA_TRACE2_2("M4PTO3GPP_Open called with pContext=0x%x, pParams=0x%x", pContext, pParams);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER, \
+                    "M4PTO3GPP_Open: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams),  M4ERR_PARAMETER, \
+                    "M4PTO3GPP_Open: pParams is M4OSA_NULL");
+
+    /**
+     *  Check parameters correctness */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackFct),
+               M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackFct is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pPictureCallbackCtxt),
+                M4ERR_PARAMETER,
+                 "M4PTO3GPP_Open: pC->m_Params.pPictureCallbackCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pOutput3gppFile),
+                M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pOutput3gppFile is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pParams->pTemporaryFile),
+                M4ERR_PARAMETER, "M4PTO3GPP_Open: pC->m_Params.pTemporaryFile is M4OSA_NULL");
+
+    /**
+     * Video Format */
+    if( (M4VIDEOEDITING_kH263 != pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kMPEG4 != pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kMPEG4_EMP != pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kH264 != pParams->OutputVideoFormat))
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video format");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+     }
+
+     /**
+     * Video Bitrate */
+    if(!((M4VIDEOEDITING_k16_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k24_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k32_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k48_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k64_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k96_KBPS       == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k128_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k192_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k256_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k288_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k384_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k512_KBPS      == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k800_KBPS      == pParams->OutputVideoBitrate) ||
+         /*+ New Encoder bitrates */
+         (M4VIDEOEDITING_k2_MBPS        == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k5_MBPS        == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_k8_MBPS        == pParams->OutputVideoBitrate) ||
+         (M4VIDEOEDITING_kVARIABLE_KBPS == pParams->OutputVideoBitrate)))
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video bitrate");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+    }
+
+    /**
+     * Video frame size */
+    if (!((M4VIDEOEDITING_kSQCIF == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kQQVGA == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kQCIF == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kQVGA == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kCIF  == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kVGA  == pParams->OutputVideoFrameSize) ||
+
+          (M4VIDEOEDITING_kNTSC == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kWVGA == pParams->OutputVideoFrameSize) ||
+
+          (M4VIDEOEDITING_k640_360 == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_k854_480 == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kHD1280  == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kHD1080  == pParams->OutputVideoFrameSize) ||
+          (M4VIDEOEDITING_kHD960   == pParams->OutputVideoFrameSize)))
+
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output video frame size");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+    }
+
+    /**
+     * Maximum size of the output 3GPP file */
+    if (!((M4PTO3GPP_k50_KB     == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k75_KB     == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k100_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k150_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k200_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k300_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k400_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_k500_KB    == pParams->OutputFileMaxSize) ||
+          (M4PTO3GPP_kUNLIMITED == pParams->OutputFileMaxSize)))
+
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined output 3GPP file size");
+        return ERR_PTO3GPP_UNDEFINED_OUTPUT_FILE_SIZE;
+    }
+
+    /* Audio padding */
+    if (M4OSA_NULL != pParams->pInputAudioTrackFile)
+    {
+        if ((!( (M4PTO3GPP_kAudioPaddingMode_None   == pParams->AudioPaddingMode) ||
+                (M4PTO3GPP_kAudioPaddingMode_Silence== pParams->AudioPaddingMode) ||
+                (M4PTO3GPP_kAudioPaddingMode_Loop   == pParams->AudioPaddingMode))))
+        {
+            M4OSA_TRACE1_0("M4PTO3GPP_Open: Undefined audio padding");
+            return ERR_PTO3GPP_UNDEFINED_AUDIO_PADDING;
+        }
+    }
+
+    /**< Size check for H263 (only valid sizes are CIF, QCIF and SQCIF) */
+    if ((M4VIDEOEDITING_kH263 == pParams->OutputVideoFormat) &&
+        (M4VIDEOEDITING_kSQCIF != pParams->OutputVideoFrameSize) &&
+        (M4VIDEOEDITING_kQCIF != pParams->OutputVideoFrameSize) &&
+        (M4VIDEOEDITING_kCIF != pParams->OutputVideoFrameSize))
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Open():\
+             returning ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263");
+        return ERR_PTO3GPP_INVALID_VIDEO_FRAME_SIZE_FOR_H263;
+    }
+
+    /**
+     *  Check state automaton */
+    if (M4PTO3GPP_kState_CREATED != pC->m_State)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Open(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+        return M4ERR_STATE;
+    }
+
+    /**
+     * Copy the M4PTO3GPP_Params structure */
+    M4OSA_memcpy((M4OSA_MemAddr8)(&pC->m_Params),
+                (M4OSA_MemAddr8)pParams, sizeof(M4PTO3GPP_Params));
+    M4OSA_TRACE1_1("M4PTO3GPP_Open: outputVideoBitrate = %d", pC->m_Params.OutputVideoBitrate);
+
+    /***********************************/
+    /* Open input file with the reader */
+    /***********************************/
+    if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile)
+    {
+        /**
+         * Get the reader interface according to the input audio file type */
+        switch(pC->m_Params.AudioFileFormat)
+        {
+#ifdef M4VSS_SUPPORT_READER_AMR
+        case M4VIDEOEDITING_kFileType_AMR:
+        err = M4READER_AMR_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
+                &pC->m_pReaderDataInt);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_AMR_getInterfaces returns 0x%x", err);
+            return err;
+        }
+            break;
+#endif
+
+#ifdef AAC_SUPPORTED
+        case M4VIDEOEDITING_kFileType_3GPP:
+            err = M4READER_3GP_getInterfaces( &readerMediaType, &pC->m_pReaderGlobInt,
+                    &pC->m_pReaderDataInt);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_Open(): M4READER_3GP_getInterfaces returns 0x%x", err);
+                return err;
+            }
+            break;
+#endif
+
+        default:
+            return ERR_PTO3GPP_UNHANDLED_AUDIO_TRACK_INPUT_FILE;
+        }
+
+        /**
+         *  Initializes the reader shell */
+        err = pC->m_pReaderGlobInt->m_pFctCreate(&pC->m_pAudioReaderContext);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctCreate returns 0x%x", err);
+            return err;
+        }
+
+        pC->m_pReaderDataInt->m_readerContext = pC->m_pAudioReaderContext;
+        /**< Link the reader interface to the reader context */
+
+        /**
+         *  Set the reader shell file access functions */
+        err = pC->m_pReaderGlobInt->m_pFctSetOption(pC->m_pAudioReaderContext,
+            M4READER_kOptionID_SetOsaFileReaderFctsPtr,  (M4OSA_DataOption)pC->pOsalFileRead);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctSetOption returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         *  Open the input audio file */
+        err = pC->m_pReaderGlobInt->m_pFctOpen(pC->m_pAudioReaderContext,
+            pC->m_Params.pInputAudioTrackFile);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderGlobInt->m_pFctOpen returns 0x%x", err);
+            pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
+            pC->m_pAudioReaderContext = M4OSA_NULL;
+            return err;
+        }
+
+        /**
+         *  Get the audio streams from the input file */
+        err = M4NO_ERROR;
+        while (M4NO_ERROR == err)
+        {
+            err = pC->m_pReaderGlobInt->m_pFctGetNextStream(pC->m_pAudioReaderContext,
+                &mediaFamily, &pStreamHandler);
+
+            if((err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE)) ||
+                   (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)))
+            {
+                err = M4NO_ERROR;
+                continue;
+            }
+
+            if (M4NO_ERROR == err) /**< One stream found */
+            {
+                /**< Found an audio stream */
+                if ((M4READER_kMediaFamilyAudio == mediaFamily)
+                    && (M4OSA_NULL == pC->m_pReaderAudioStream))
+                {
+                    pC->m_pReaderAudioStream = (M4_AudioStreamHandler*)pStreamHandler;
+                    /**< Keep pointer to the audio stream */
+                    M4OSA_TRACE3_0("M4PTO3GPP_Open(): Found an audio stream in input");
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                     *  Allocate audio AU used for read operations */
+                    pC->m_pReaderAudioAU = (M4_AccessUnit*)M4OSA_malloc(sizeof(M4_AccessUnit),
+                        M4PTO3GPP,(M4OSA_Char *)"pReaderAudioAU");
+                    if (M4OSA_NULL == pC->m_pReaderAudioAU)
+                    {
+                        M4OSA_TRACE1_0("M4PTO3GPP_Open(): unable to allocate pReaderAudioAU, \
+                                       returning M4ERR_ALLOC");
+                        return M4ERR_ALLOC;
+                    }
+
+                    /**
+                     *  Initializes an access Unit */
+                    err = pC->m_pReaderGlobInt->m_pFctFillAuStruct(pC->m_pAudioReaderContext,
+                            pStreamHandler, pC->m_pReaderAudioAU);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Open():\
+                         pReaderGlobInt->m_pFctFillAuStruct(audio)returns 0x%x", err);
+                        return err;
+                    }
+                }
+                else
+                {
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+            else if (M4WAR_NO_MORE_STREAM != err) /**< Unexpected error code */
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_Open():\
+                     pReaderGlobInt->m_pFctGetNextStream returns 0x%x",
+                    err);
+                return err;
+            }
+        } /* while*/
+    } /*if (M4OSA_NULL != pC->m_Params.pInputAudioTrackFile)*/
+
+    pC->m_VideoState = M4PTO3GPP_kStreamState_STARTED;
+
+    /**
+     * Init the audio stream */
+    if (M4OSA_NULL != pC->m_pReaderAudioStream)
+    {
+        pC->m_AudioState = M4PTO3GPP_kStreamState_STARTED;
+        err = pC->m_pReaderGlobInt->m_pFctReset(pC->m_pAudioReaderContext,
+            (M4_StreamHandler*)pC->m_pReaderAudioStream);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Open(): pReaderDataInt->m_pFctReset(audio returns 0x%x",
+                 err);
+            return err;
+        }
+    }
+
+    /**
+     *  Update state automaton */
+    pC->m_State = M4PTO3GPP_kState_OPENED;
+
+    /**
+     * Get the max File size */
+    switch(pC->m_Params.OutputFileMaxSize)
+    {
+    case M4PTO3GPP_k50_KB:  pC->m_MaxFileSize = 50000;  break;
+    case M4PTO3GPP_k75_KB:  pC->m_MaxFileSize = 75000;  break;
+    case M4PTO3GPP_k100_KB: pC->m_MaxFileSize = 100000; break;
+    case M4PTO3GPP_k150_KB: pC->m_MaxFileSize = 150000; break;
+    case M4PTO3GPP_k200_KB: pC->m_MaxFileSize = 200000; break;
+    case M4PTO3GPP_k300_KB: pC->m_MaxFileSize = 300000; break;
+    case M4PTO3GPP_k400_KB: pC->m_MaxFileSize = 400000; break;
+    case M4PTO3GPP_k500_KB: pC->m_MaxFileSize = 500000; break;
+    case M4PTO3GPP_kUNLIMITED:
+    default:                                            break;
+    }
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Open(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext);
+ * @brief   Perform one step of trancoding.
+ * @note
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @return  M4NO_ERROR          No error
+ * @return  M4ERR_PARAMETER     pContext is M4OSA_NULL
+ * @return  M4ERR_STATE:    M4PTO3GPP is not in an appropriate state for this function
+ *                           to be called
+ * @return  M4PTO3GPP_WAR_END_OF_PROCESSING Encoding completed
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Step(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 l_uiAudioStepCount = 0;
+    M4OSA_Int32  JumpToTime = 0;
+    M4OSA_Time  mtIncCts;
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER,
+                "M4PTO3GPP_Step: pContext is M4OSA_NULL");
+
+    /**
+     *  Check state automaton */
+    if ( !((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)) )
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Step(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+        return M4ERR_STATE;
+    }
+
+    /******************************************************************/
+    /**
+     *  In case this is the first step, we prepare the decoder, the encoder and the writer */
+    if (M4PTO3GPP_kState_OPENED == pC->m_State)
+    {
+        M4OSA_TRACE2_0("M4PTO3GPP_Step(): This is the first step, \
+                       calling M4PTO3GPP_Ready4Processing");
+
+        /**
+         *  Prepare the reader, the decoder, the encoder, the writer... */
+        err = M4PTO3GPP_Ready4Processing(pC);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_Ready4Processing() returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         *  Update state automaton */
+        pC->m_State = M4PTO3GPP_kState_READY;
+
+        M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (a)");
+        return M4NO_ERROR; /**< we only do that in the first step, \
+                           first REAL step will be the next one */
+    }
+
+
+    /*
+     * Check if we reached the targeted file size.
+     * We do that before the encoding, because the core encoder has to know if this is
+     * the last frame to encode */
+    err = pC->m_pWriterGlobInt->pFctGetOption(pC->m_p3gpWriterContext,
+        M4WRITER_kFileSizeAudioEstimated, (M4OSA_DataOption) &pC->m_CurrentFileSize);
+    if ((0 != pC->m_MaxFileSize) &&
+        /**< Add a marge to the file size in order to never exceed the max file size */
+       ((pC->m_CurrentFileSize + M4PTO3GPP_MARGE_OF_FILE_SIZE) >= pC->m_MaxFileSize))
+    {
+        pC->m_IsLastPicture = M4OSA_TRUE;
+    }
+
+    /******************************************************************
+    *  At that point we are in M4PTO3GPP_kState_READY state
+    *  We perform one step of video encoding
+    ******************************************************************/
+
+    /************* VIDEO ENCODING ***************/
+    if (M4PTO3GPP_kStreamState_STARTED == pC->m_VideoState) /**<If the video encoding is going on*/
+    {   /**
+         * Call the encoder  */
+        pC->m_NbCurrentFrame++;
+
+        /* Check if it is the last frame the to encode */
+        if((pC->m_Params.NbVideoFrames > 0) \
+            && (pC->m_NbCurrentFrame >= pC->m_Params.NbVideoFrames))
+        {
+            pC->m_IsLastPicture = M4OSA_TRUE;
+        }
+
+        M4OSA_TRACE2_2("M4PTO3GPP_Step(): Calling pEncoderInt->pFctEncode with videoCts = %.2f\
+                       nb = %lu", pC->m_mtCts, pC->m_NbCurrentFrame);
+
+        err = pC->m_pEncoderInt->pFctEncode(pC->m_pMp4EncoderContext, M4OSA_NULL,
+            /**< The input plane is null because the input Picture will be obtained by the\
+            VPP filter from the context */
+                                        pC->m_mtCts,
+                                        (pC->m_IsLastPicture ?
+                                        M4ENCODER_kLastFrame : M4ENCODER_kNormalFrame) );
+        /**< Last param set to M4OSA_TRUE signals that this is the last frame to be encoded,\
+        M4OSA_FALSE else */
+
+        M4OSA_TRACE3_2("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns 0x%x, vidFormat =0x%x",
+            err, pC->m_Params.OutputVideoFormat);
+        if((M4NO_ERROR == err) && (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
+        {
+            /* Check if last frame.*
+            *                  */
+            if(M4OSA_TRUE == pC->m_IsLastPicture)
+            {
+                M4OSA_TRACE3_0("M4PTO3GPP_Step(): Last picture");
+                pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
+            }
+
+        }
+
+        if (M4WAR_NO_MORE_AU == err) /**< The video encoding is finished */
+        {
+            M4OSA_TRACE3_0("M4PTO3GPP_Step(): pEncoderInt->pFctEncode returns M4WAR_NO_MORE_AU");
+            pC->m_VideoState = M4PTO3GPP_kStreamState_FINISHED;
+        }
+        else if (M4NO_ERROR != err)     /**< Unexpected error code */
+        {
+            if( (((M4OSA_UInt32)M4WAR_WRITER_STOP_REQ) == err) ||
+                    (((M4OSA_UInt32)M4ERR_ALLOC) == err) )
+            {
+                M4OSA_TRACE1_0("M4PTO3GPP_Step: returning ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR");
+                return ERR_PTO3GPP_ENCODER_ACCES_UNIT_ERROR;
+            }
+            else
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_Step(): pEncoderInt->pFctEncode(last) (a) returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+    } /**< End of video encoding */
+
+
+    /****** AUDIO TRANSCODING (read + null encoding + write) ******/
+    if (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState)
+    {
+        while ( (M4PTO3GPP_kStreamState_STARTED == pC->m_AudioState) &&
+                (pC->m_mtAudioCts < pC->m_mtNextCts))
+
+        {
+            l_uiAudioStepCount++;
+            if (M4OSA_FALSE == pC->m_bAudioPaddingSilence)
+            {
+                /**< Read the next audio AU in the input Audio file */
+                err = pC->m_pReaderDataInt->m_pFctGetNextAu(pC->m_pAudioReaderContext,
+                    (M4_StreamHandler*)pC->m_pReaderAudioStream, pC->m_pReaderAudioAU);
+                pC->m_mtAudioCts = pC->m_pReaderAudioAU->m_CTS + pC->m_AudioOffSet;
+
+                if (M4WAR_NO_MORE_AU == err)    /* The audio transcoding is finished */
+                {
+                    M4OSA_TRACE2_0("M4PTO3GPP_Step():\
+                                  pReaderDataInt->m_pFctGetNextAu(audio) returns \
+                                    M4WAR_NO_MORE_AU");
+                    switch(pC->m_Params.AudioPaddingMode)
+                    {
+                        case M4PTO3GPP_kAudioPaddingMode_None:
+
+                            pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+                            break;
+
+                        case M4PTO3GPP_kAudioPaddingMode_Silence:
+
+                            if (M4DA_StreamTypeAudioAmrNarrowBand
+                                != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                                /**< Do nothing if the input audio file format is not AMR */
+                            {
+                                pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+                            }
+                            else
+                            {
+                                pC->m_bAudioPaddingSilence = M4OSA_TRUE;
+                            }
+                            break;
+
+                        case M4PTO3GPP_kAudioPaddingMode_Loop:
+
+                            /**< Jump to the beginning of the audio file */
+                            err = pC->m_pReaderGlobInt->m_pFctJump(pC->m_pAudioReaderContext,
+                                (M4_StreamHandler*)pC->m_pReaderAudioStream, &JumpToTime);
+
+                            if (M4NO_ERROR != err)
+                            {
+                                M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
+                                              pReaderDataInt->m_pFctReset(audio returns 0x%x",
+                                               err);
+                                return err;
+                            }
+
+                            if (M4DA_StreamTypeAudioAmrNarrowBand
+                                == pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                            {
+                                pC->m_mtAudioCts += 20; /*< SEMC bug fixed at Lund */
+                                pC->m_AudioOffSet = pC->m_mtAudioCts;
+
+                                /**
+                                 * 'BZZZ' bug fix:
+                                 * add a silence frame */
+                                mtIncCts = (M4OSA_Time)((pC->m_mtAudioCts) *
+                                    (pC->m_pWriterAudioStream->timeScale / 1000.0));
+                                err = M4PTO3GPP_writeAmrSilence122Frame(pC->m_pWriterDataInt,
+                                    pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
+
+                                if (M4NO_ERROR != err)
+                                {
+                                    M4OSA_TRACE1_1("M4PTO3GPP_Step(): \
+                                                   M4PTO3GPP_AddAmrSilenceSid returns 0x%x", err);
+                                    return err;
+                                }/**< Add => no audio cts increment...*/
+                            }
+                            else
+                            {
+                                pC->m_AudioOffSet = pC->m_mtAudioCts + pC->m_DeltaAudioCts;
+                            }
+                            break;
+                    } /* end of: switch */
+                }
+                else if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4PTO3GPP_Step(): pReaderDataInt->m_pFctGetNextAu(Audio)\
+                                   returns 0x%x", err);
+                    return err;
+                }
+                else
+                {
+                    /**
+                     * Save the delta Cts (AAC only) */
+                    pC->m_DeltaAudioCts = pC->m_pReaderAudioAU->m_CTS - pC->m_PrevAudioCts;
+                    pC->m_PrevAudioCts  = pC->m_pReaderAudioAU->m_CTS;
+
+                    /**
+                     *  Prepare the writer AU */
+                    err = pC->m_pWriterDataInt->pStartAU(pC->m_p3gpWriterContext, 1,
+                        &pC->m_WriterAudioAU);
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pStartAU(Audio)\
+                                       returns 0x%x", err);
+                        return err;
+                    }
+
+                    /**
+                     *  Copy audio data from reader AU to writer AU */
+                    M4OSA_TRACE2_1("M4PTO3GPP_Step(): Copying audio AU: size=%d",
+                        pC->m_pReaderAudioAU->m_size);
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->m_WriterAudioAU.dataAddress,
+                        (M4OSA_MemAddr8)pC->m_pReaderAudioAU->m_dataAddress,
+                        pC->m_pReaderAudioAU->m_size);
+                    pC->m_WriterAudioAU.size = pC->m_pReaderAudioAU->m_size;
+
+                    /**
+                     *  Convert CTS unit from milliseconds to timescale */
+                    if (M4DA_StreamTypeAudioAmrNarrowBand
+                        != pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                    {
+                        pC->m_WriterAudioAU.CTS  = (M4OSA_Time)
+                            ((pC->m_AudioOffSet + pC->m_pReaderAudioAU->m_CTS)
+                            * pC->m_pWriterAudioStream->timeScale / 1000.0);
+                    }
+                    else
+                    {
+                        pC->m_WriterAudioAU.CTS = (M4OSA_Time)(pC->m_mtAudioCts *
+                            (pC->m_pWriterAudioStream->timeScale / 1000.0));
+                    }
+                    pC->m_WriterAudioAU.nbFrag = 0;
+                    M4OSA_TRACE2_1("M4PTO3GPP_Step(): audio AU: CTS=%d ms", pC->m_mtAudioCts
+                        /*pC->m_pReaderAudioAU->m_CTS*/);
+
+                    /**
+                     *  Write it to the output file */
+                    err = pC->m_pWriterDataInt->pProcessAU(pC->m_p3gpWriterContext, 1,
+                        &pC->m_WriterAudioAU);
+
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): pWriterDataInt->pProcessAU(Audio)\
+                                       returns 0x%x", err);
+                        return err;
+                    }
+                }
+            }
+            else /**< M4OSA_TRUE == pC->m_bAudioPaddingSilence */
+            {
+                if (M4DA_StreamTypeAudioAmrNarrowBand ==
+                    pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+                {
+                    /**
+                     * Fill in audio au with silence */
+                    pC->m_mtAudioCts += 20;
+
+                    /**
+                     * Padd with silence */
+                    mtIncCts = (M4OSA_Time)(pC->m_mtAudioCts
+                        * (pC->m_pWriterAudioStream->timeScale / 1000.0));
+                    err = M4PTO3GPP_writeAmrSilence048Frame(pC->m_pWriterDataInt,
+                        pC->m_p3gpWriterContext, &pC->m_WriterAudioAU, mtIncCts);
+
+                    if (M4NO_ERROR != err)
+                    {
+                        M4OSA_TRACE1_1("M4PTO3GPP_Step(): M4PTO3GPP_AddAmrSilenceSid returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< Do nothing if the input audio file format is not AMR */
+                {
+                    pC->m_AudioState = M4PTO3GPP_kStreamState_FINISHED;
+                }
+
+            }
+        } /**< while */
+    } /**< End of audio encoding */
+
+    pC->m_mtCts = pC->m_mtNextCts;
+
+    /**
+     *  The transcoding is finished when no stream is being encoded anymore */
+    if (M4PTO3GPP_kStreamState_FINISHED == pC->m_VideoState)
+    {
+        pC->m_State = M4PTO3GPP_kState_FINISHED;
+        M4OSA_TRACE2_0("M4PTO3GPP_Step(): transcoding finished, returning M4WAR_NO_MORE_AU");
+        return M4PTO3GPP_WAR_END_OF_PROCESSING;
+    }
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Step(): returning M4NO_ERROR (b)");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext);
+ * @brief   Finish the M4PTO3GPP transcoding.
+ * @note    The output 3GPP file is ready to be played after this call
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ * @return  M4ERR_STATE:    M4PTO3GPP is not in an appropriate state for this function to be called
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_Close(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+    M4OSA_ERR    osaErr = M4NO_ERROR;
+    M4OSA_UInt32 lastCTS;
+    M4ENCODER_Header* encHeader;
+    M4SYS_StreamIDmemAddr streamHeader;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Close called with pContext=0x%x", pContext);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext), M4ERR_PARAMETER, "M4PTO3GPP_Close:\
+                                                             pContext is M4OSA_NULL");
+
+    /* Check state automaton */
+    if ((pC->m_State != M4PTO3GPP_kState_OPENED) &&
+        (pC->m_State != M4PTO3GPP_kState_READY) &&
+        (pC->m_State != M4PTO3GPP_kState_FINISHED))
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Close(): Wrong State (%d), returning M4ERR_STATE", pC->m_State);
+        return M4ERR_STATE;
+    }
+
+    /*************************************/
+    /******** Finish the encoding ********/
+    /*************************************/
+    if (M4PTO3GPP_kState_READY == pC->m_State)
+    {
+        pC->m_State = M4PTO3GPP_kState_FINISHED;
+    }
+
+    if (M4PTO3GPP_kEncoderRunning == pC->m_eEncoderState)
+    {
+        if (pC->m_pEncoderInt->pFctStop != M4OSA_NULL)
+        {
+            osaErr = pC->m_pEncoderInt->pFctStop(pC->m_pMp4EncoderContext);
+            if (M4NO_ERROR != osaErr)
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctStop returns 0x%x", osaErr);
+                /* Well... how the heck do you handle a failed cleanup? */
+            }
+        }
+
+        pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
+    }
+
+    /* Has the encoder actually been opened? Don't close it if that's not the case. */
+    if (M4PTO3GPP_kEncoderStopped == pC->m_eEncoderState)
+    {
+        osaErr = pC->m_pEncoderInt->pFctClose(pC->m_pMp4EncoderContext);
+        if (M4NO_ERROR != osaErr)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_close: m_pEncoderInt->pFctClose returns 0x%x", osaErr);
+            /* Well... how the heck do you handle a failed cleanup? */
+        }
+
+        pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
+    }
+
+    /*******************************/
+    /******** Close 3GP out ********/
+    /*******************************/
+
+    if (M4OSA_NULL != pC->m_p3gpWriterContext)  /* happens in state _SET */
+    {
+        /* HW encoder: fetch the DSI from the shell video encoder, and feed it to the writer before
+        closing it. */
+        if ( (M4VIDEOEDITING_kMPEG4_EMP == pC->m_Params.OutputVideoFormat)
+            || (M4VIDEOEDITING_kMPEG4 == pC->m_Params.OutputVideoFormat)
+            || (M4VIDEOEDITING_kH264 == pC->m_Params.OutputVideoFormat))
+        {
+            osaErr = pC->m_pEncoderInt->pFctGetOption(pC->m_pMp4EncoderContext,
+                M4ENCODER_kOptionID_EncoderHeader,
+                                                            (M4OSA_DataOption)&encHeader);
+            if ( (M4NO_ERROR != osaErr) || (M4OSA_NULL == encHeader->pBuf) )
+            {
+                M4OSA_TRACE1_1("M4PTO3GPP_close: failed to get the encoder header (err 0x%x)",
+                    osaErr);
+                /**< no return here, we still have stuff to deallocate after close, even if \
+                it fails. */
+            }
+            else
+            {
+                /* set this header in the writer */
+                streamHeader.streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+                streamHeader.size = encHeader->Size;
+                streamHeader.addr = (M4OSA_MemAddr32)encHeader->pBuf;
+                osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                    M4WRITER_kDSI, &streamHeader);
+                if (M4NO_ERROR != osaErr)
+                {
+                    M4OSA_TRACE1_1("M4PTO3GPP_close: failed to set the DSI in the writer \
+                                (err 0x%x)   ", osaErr);
+                }
+            }
+        }
+
+        /* Update last Video CTS */
+        lastCTS = (M4OSA_UInt32)pC->m_mtCts;
+
+        osaErr = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+        if (M4NO_ERROR != osaErr)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Close: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+                osaErr);
+        }
+
+        /* Write and close the 3GP output file */
+        osaErr = pC->m_pWriterGlobInt->pFctCloseWrite(pC->m_p3gpWriterContext);
+        if (M4NO_ERROR != osaErr)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Close: pWriterGlobInt->pFctCloseWrite returns 0x%x", osaErr);
+            /**< don't return yet, we have to close other things */
+        }
+        pC->m_p3gpWriterContext = M4OSA_NULL;
+    }
+
+    /**
+     * State transition */
+    pC->m_State = M4PTO3GPP_kState_CLOSED;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Close(): returning 0x%x", osaErr);
+    return osaErr;
+}
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext);
+ * @brief   Free all resources used by the M4PTO3GPP.
+ * @note    The context is no more valid after this call
+ * @param   pContext            (IN) M4PTO3GPP context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    pContext is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+*/
+/*********************************************************/
+M4OSA_ERR M4PTO3GPP_CleanUp(M4PTO3GPP_Context pContext)
+/*********************************************************/
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+    M4OSA_TRACE3_1("M4PTO3GPP_CleanUp called with pContext=0x%x", pContext);
+
+    /**
+     *  Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pContext),M4ERR_PARAMETER, "M4PTO3GPP_CleanUp: pContext \
+                                                            is M4OSA_NULL");
+
+    /**
+     *  First call Close, if needed, to clean the video encoder */
+
+    if ((M4PTO3GPP_kState_OPENED == pC->m_State) || (M4PTO3GPP_kState_READY == pC->m_State)
+        || (M4PTO3GPP_kState_FINISHED == pC->m_State))
+    {
+        err = M4PTO3GPP_Close(pContext);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: M4PTO3GPP_Close returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+    }
+
+    /**
+     *  Free Audio reader stuff, if needed */
+
+    if (M4OSA_NULL != pC->m_pAudioReaderContext) /**< may be M4OSA_NULL if M4PTO3GPP_Open was not\
+                                                 called */
+    {
+
+        err = pC->m_pReaderGlobInt->m_pFctClose(pC->m_pAudioReaderContext);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctClose returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+        err = pC->m_pReaderGlobInt->m_pFctDestroy(pC->m_pAudioReaderContext);
+        pC->m_pAudioReaderContext = M4OSA_NULL;
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pReaderGlobInt->m_pFctDestroy returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+    }
+
+    if (M4OSA_NULL != pC->m_pReaderAudioAU)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderAudioAU);
+        pC->m_pReaderAudioAU = M4OSA_NULL;
+    }
+
+    /**
+     *  Free video encoder stuff, if needed */
+    if (M4OSA_NULL != pC->m_pMp4EncoderContext)
+    {
+        err = pC->m_pEncoderInt->pFctCleanup(pC->m_pMp4EncoderContext);
+        pC->m_pMp4EncoderContext = M4OSA_NULL;
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_CleanUp: pEncoderInt->pFctDestroy returns 0x%x", err);
+            /**< don't return, we have to free other components */
+        }
+    }
+
+    if (M4OSA_NULL != pC->m_pWriterVideoStream)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterVideoStream);
+        pC->m_pWriterVideoStream = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pWriterAudioStream)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterAudioStream);
+        pC->m_pWriterAudioStream = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pWriterVideoStreamInfo)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterVideoStreamInfo);
+        pC->m_pWriterVideoStreamInfo = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pWriterAudioStreamInfo)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterAudioStreamInfo);
+        pC->m_pWriterAudioStreamInfo = M4OSA_NULL;
+    }
+
+
+    /**
+     *  Free the shells interfaces */
+    if (M4OSA_NULL != pC->m_pReaderGlobInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderGlobInt);
+        pC->m_pReaderGlobInt = M4OSA_NULL;
+    }
+    if (M4OSA_NULL != pC->m_pReaderDataInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderDataInt);
+        pC->m_pReaderDataInt = M4OSA_NULL;
+    }
+
+    if(M4OSA_NULL != pC->m_pEncoderInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pEncoderInt);
+        pC->m_pEncoderInt = M4OSA_NULL;
+    }
+    if(M4OSA_NULL != pC->m_pWriterGlobInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterGlobInt);
+        pC->m_pWriterGlobInt = M4OSA_NULL;
+    }
+    if(M4OSA_NULL != pC->m_pWriterDataInt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pWriterDataInt);
+        pC->m_pWriterDataInt = M4OSA_NULL;
+    }
+    /**< Do not free pC->pOsaMemoryPtrFct and pC->pOsaMemoryPtrFct, because it's owned by the \
+    application */
+
+    /**
+     *  Free the context itself */
+    M4OSA_free((M4OSA_MemAddr32)pC);
+    pC = M4OSA_NULL;
+
+    M4OSA_TRACE3_0("M4PTO3GPP_CleanUp(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/********************* INTERNAL FUNCTIONS *********************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC);
+ * @brief   Prepare all resources and interfaces for the transcoding.
+ * @note    It is called by the first M4OSA_Step() call
+ * @param   pC          (IN) M4PTO3GPP private context
+ * @return  M4NO_ERROR: No error
+ * @return  Any error returned by an underlaying module
+ ******************************************************************************
+*/
+/******************************************************/
+M4OSA_ERR M4PTO3GPP_Ready4Processing(M4PTO3GPP_InternalContext* pC)
+/******************************************************/
+{
+    M4OSA_ERR               err = M4NO_ERROR;
+    M4WRITER_OutputFileType outputFileType;
+    M4OSA_UInt32            uiVersion;
+    M4ENCODER_Format        encFormat;
+    M4ENCODER_AdvancedParams   EncParams;    /**< Encoder advanced parameters */
+    M4SYS_StreamIDValue     optionValue;
+    M4OSA_Bool              bActivateEmp = M4OSA_FALSE;
+
+    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing called with pC=0x%x", pC);
+
+    /******************************/
+    /******************************/
+
+    /********************************************/
+    /********                            ********/
+    /******** Video Encoder Parames init ********/
+    /********                            ********/
+    /********************************************/
+
+    /**
+     *  Get the correct encoder interface */
+    switch(pC->m_Params.OutputVideoFormat)
+    {
+        case M4VIDEOEDITING_kMPEG4_EMP: bActivateEmp = M4OSA_TRUE; /* no break */
+        case M4VIDEOEDITING_kMPEG4:
+            if (pC->registeredExternalEncs[M4VE_kMpeg4VideoEnc].registered)
+            {
+#ifdef M4VSS_ENABLE_EXTERNAL_ENCODERS
+                pC->m_pEncoderExternalAPI = pC->registeredExternalEncs[M4VE_kMpeg4VideoEnc]
+                .pEncoderInterface;
+                pC->m_pEncoderUserData = pC->registeredExternalEncs[M4VE_kMpeg4VideoEnc].pUserData;
+
+                err = M4EGE_MPEG4_getInterfaces(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else
+                M4OSA_TRACE1_0("No external MPEG4 encoder available!\
+                               Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif
+            }
+            else
+            {
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+                err = VideoEditorVideoEncoder_getInterface_MPEG4(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else /* software MPEG4 encoder not available! */
+                M4OSA_TRACE1_0("No MPEG4 encoder available! Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif /* software MPEG4 encoder available? */
+            }
+            break;
+        case M4VIDEOEDITING_kH263:
+            if (pC->registeredExternalEncs[M4VE_kH263VideoEnc].registered)
+            {
+#ifdef M4VSS_ENABLE_EXTERNAL_ENCODERS
+                pC->m_pEncoderExternalAPI = pC->registeredExternalEncs[M4VE_kH263VideoEnc]
+                .pEncoderInterface;
+                pC->m_pEncoderUserData = pC->registeredExternalEncs[M4VE_kH263VideoEnc].pUserData;
+
+                err = M4EGE_H263_getInterfaces(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else
+                M4OSA_TRACE1_0("No external H263 encoder available! Did you forget to register\
+                               one?");
+                err = M4ERR_STATE;
+#endif
+            }
+            else
+            {
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+                err = VideoEditorVideoEncoder_getInterface_H263(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else /* software H263 encoder not available! */
+                M4OSA_TRACE1_0("No H263 encoder available! Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif /* software H263 encoder available? */
+            }
+            break;
+        case M4VIDEOEDITING_kH264:
+            if (pC->registeredExternalEncs[M4VE_kH264VideoEnc].registered)
+            {
+                M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing: No external H264 encoder available! \
+                               Did you forget to register one?");
+                err = M4ERR_STATE;
+            }
+            else
+            {
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+                err = VideoEditorVideoEncoder_getInterface_H264(&encFormat, &pC->m_pEncoderInt,
+                    M4ENCODER_OPEN_ADVANCED);
+#else /* software H264 encoder not available! */
+                M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing: No H264 encoder available!\
+                               Did you forget to register one?");
+                err = M4ERR_STATE;
+#endif /* software H264 encoder available? */
+            }
+            break;
+        default:
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                           pC->m_Params.OutputVideoFormat);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+    }
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("switch(pC->m_Params.OutputVideoFormat): getInterfaces returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     *  Fill encoder parameters according to M4PTO3GPP settings */
+
+    /**
+     * Video frame size */
+    switch(pC->m_Params.OutputVideoFrameSize)
+    {
+        case M4VIDEOEDITING_kSQCIF :
+            EncParams.FrameHeight = M4ENCODER_SQCIF_Height;
+            EncParams.FrameWidth  = M4ENCODER_SQCIF_Width;
+            break;
+        case M4VIDEOEDITING_kQQVGA :
+            EncParams.FrameHeight = M4ENCODER_QQVGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_QQVGA_Width;
+            break;
+        case M4VIDEOEDITING_kQCIF :
+            EncParams.FrameHeight = M4ENCODER_QCIF_Height;
+            EncParams.FrameWidth  = M4ENCODER_QCIF_Width;
+            break;
+        case M4VIDEOEDITING_kQVGA :
+            EncParams.FrameHeight = M4ENCODER_QVGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_QVGA_Width;
+            break;
+        case M4VIDEOEDITING_kCIF :
+            EncParams.FrameHeight = M4ENCODER_CIF_Height;
+            EncParams.FrameWidth  = M4ENCODER_CIF_Width;
+            break;
+        case M4VIDEOEDITING_kVGA :
+            EncParams.FrameHeight = M4ENCODER_VGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_VGA_Width;
+            break;
+/* +PR LV5807 */
+        case M4VIDEOEDITING_kWVGA :
+            EncParams.FrameHeight = M4ENCODER_WVGA_Height;
+            EncParams.FrameWidth  = M4ENCODER_WVGA_Width;
+            break;
+        case M4VIDEOEDITING_kNTSC:
+            EncParams.FrameHeight = M4ENCODER_NTSC_Height;
+            EncParams.FrameWidth  = M4ENCODER_NTSC_Width;
+            break;
+/* -PR LV5807 */
+/* +CR Google */
+        case M4VIDEOEDITING_k640_360:
+            EncParams.FrameHeight = M4ENCODER_640_360_Height;
+            EncParams.FrameWidth  = M4ENCODER_640_360_Width;
+            break;
+
+        case M4VIDEOEDITING_k854_480:
+            EncParams.FrameHeight = M4ENCODER_854_480_Height;
+            EncParams.FrameWidth  = M4ENCODER_854_480_Width;
+            break;
+
+        case M4VIDEOEDITING_kHD1280:
+            EncParams.FrameHeight = M4ENCODER_HD1280_Height;
+            EncParams.FrameWidth  = M4ENCODER_HD1280_Width;
+            break;
+
+        case M4VIDEOEDITING_kHD1080:
+            EncParams.FrameHeight = M4ENCODER_HD1080_Height;
+            EncParams.FrameWidth  = M4ENCODER_HD1080_Width;
+            break;
+
+        case M4VIDEOEDITING_kHD960:
+            EncParams.FrameHeight = M4ENCODER_HD960_Height;
+            EncParams.FrameWidth  = M4ENCODER_HD960_Width;
+            break;
+/* -CR Google */
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE",
+                           pC->m_Params.OutputVideoFrameSize);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FRAME_SIZE;
+    }
+
+    EncParams.InputFormat = M4ENCODER_kIYUV420;
+
+    /**
+     * Video bitrate */
+    switch(pC->m_Params.OutputVideoBitrate)
+    {
+        case M4VIDEOEDITING_k16_KBPS:
+        case M4VIDEOEDITING_k24_KBPS:
+        case M4VIDEOEDITING_k32_KBPS:
+        case M4VIDEOEDITING_k48_KBPS:
+        case M4VIDEOEDITING_k64_KBPS:
+        case M4VIDEOEDITING_k96_KBPS:
+        case M4VIDEOEDITING_k128_KBPS:
+        case M4VIDEOEDITING_k192_KBPS:
+        case M4VIDEOEDITING_k256_KBPS:
+        case M4VIDEOEDITING_k288_KBPS:
+        case M4VIDEOEDITING_k384_KBPS:
+        case M4VIDEOEDITING_k512_KBPS:
+        case M4VIDEOEDITING_k800_KBPS:
+/*+ New Encoder bitrates */
+        case M4VIDEOEDITING_k2_MBPS:
+        case M4VIDEOEDITING_k5_MBPS:
+        case M4VIDEOEDITING_k8_MBPS:
+/*- New Encoder bitrates */
+            EncParams.Bitrate = pC->m_Params.OutputVideoBitrate;
+            break;
+
+        case M4VIDEOEDITING_kVARIABLE_KBPS:
+/*+ New Encoder bitrates */
+            EncParams.Bitrate = M4VIDEOEDITING_k8_MBPS;
+/*- New Encoder bitrates */
+            break;
+
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+                           pC->m_Params.OutputVideoBitrate);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+    }
+
+    /**
+     * Video format */
+    switch(pC->m_Params.OutputVideoFormat)
+    {
+        case M4VIDEOEDITING_kMPEG4_EMP :
+        case M4VIDEOEDITING_kMPEG4 :
+            EncParams.Format    = M4ENCODER_kMPEG4;
+            break;
+        case M4VIDEOEDITING_kH263 :
+            EncParams.Format    = M4ENCODER_kH263;
+            break;
+        case M4VIDEOEDITING_kH264:
+            EncParams.Format    = M4ENCODER_kH264;
+            break;
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                           pC->m_Params.OutputVideoFormat);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+    }
+
+    /**
+     * Video frame rate (set it to max = 30 fps) */
+    EncParams.uiTimeScale = 30;
+    EncParams.uiRateFactor = 1;
+
+    EncParams.FrameRate = M4ENCODER_k30_FPS;
+
+
+    /******************************/
+    /******** 3GP out init ********/
+    /******************************/
+
+    /* Get the 3GPP writer interface */
+    err = M4WRITER_3GP_getInterfaces(&outputFileType, &pC->m_pWriterGlobInt, &pC->m_pWriterDataInt);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4WRITER_3GP_getInterfaces: M4WRITER_3GP_getInterfaces returns 0x%x", err);
+        return err;
+    }
+
+    /* Init the 3GPP writer */
+    err = pC->m_pWriterGlobInt->pFctOpen(&pC->m_p3gpWriterContext, pC->m_Params.pOutput3gppFile,
+        pC->pOsalFileWrite, pC->m_Params.pTemporaryFile, pC->pOsalFileRead);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctOpen returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     *  Link to the writer context in the writer interface */
+    pC->m_pWriterDataInt->pWriterContext = pC->m_p3gpWriterContext;
+
+    /**
+     *  Set the product description string in the written file */
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedString,
+        (M4OSA_DataOption)M4PTO3GPP_SIGNATURE);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
+                       pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     *  Set the product version in the written file */
+    uiVersion = M4VIDEOEDITING_VERSION_MAJOR*100 + M4VIDEOEDITING_VERSION_MINOR*10
+        + M4VIDEOEDITING_VERSION_REVISION;
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext, M4WRITER_kEmbeddedVersion,
+        (M4OSA_DataOption)&uiVersion);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: \
+                       pWriterGlobInt->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     * In case of EMP, we have to explicitely give an emp ftyp to the writer */
+    if(M4OSA_TRUE == bActivateEmp)
+    {
+        M4VIDEOEDITING_FtypBox ftyp;
+
+        ftyp.major_brand          = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.minor_version        = M4VIDEOEDITING_BRAND_0000;
+        ftyp.nbCompatibleBrands   = 2;
+        ftyp.compatible_brands[0] = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.compatible_brands[1] = M4VIDEOEDITING_BRAND_EMP;
+
+        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kSetFtypBox, (M4OSA_DataOption) &ftyp);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing:\
+                         m_pWriterGlobInt->pFctSetOption(M4WRITER_kSetFtypBox) returns 0x%x!", err);
+            return err;
+        }
+    }
+
+    /**
+     *  Allocate and fill the video stream structures for the writer */
+    pC->m_pWriterVideoStream =
+        (M4SYS_StreamDescription*)M4OSA_malloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
+        (M4OSA_Char *)"pWriterVideoStream");
+    if (M4OSA_NULL == pC->m_pWriterVideoStream)
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStream, \
+                       returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+    pC->m_pWriterVideoStreamInfo =
+        (M4WRITER_StreamVideoInfos*)M4OSA_malloc(sizeof(M4WRITER_StreamVideoInfos), M4PTO3GPP,
+        (M4OSA_Char *)"pWriterVideoStreamInfo");
+    if (M4OSA_NULL == pC->m_pWriterVideoStreamInfo)
+    {
+        M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterVideoStreamInfo,\
+                       returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**
+     * Fill Video properties structure for the AddStream method */
+    pC->m_pWriterVideoStreamInfo->height        = EncParams.FrameHeight;
+    pC->m_pWriterVideoStreamInfo->width         = EncParams.FrameWidth;
+    pC->m_pWriterVideoStreamInfo->fps           = 0;        /**< Not used by the core writer */
+    pC->m_pWriterVideoStreamInfo->Header.pBuf   = M4OSA_NULL;
+    /** No header, will be set by setOption */
+    pC->m_pWriterVideoStreamInfo->Header.Size   = 0;
+
+    /**
+     *  Fill Video stream description structure for the AddStream method */
+    pC->m_pWriterVideoStream->streamID = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+
+    /**
+     * Video format */
+    switch(pC->m_Params.OutputVideoFormat)
+    {
+        case M4VIDEOEDITING_kMPEG4_EMP:
+        case M4VIDEOEDITING_kMPEG4:
+            pC->m_pWriterVideoStream->streamType = M4SYS_kMPEG_4;   break;
+        case M4VIDEOEDITING_kH263:
+            pC->m_pWriterVideoStream->streamType = M4SYS_kH263;     break;
+        case M4VIDEOEDITING_kH264:
+            pC->m_pWriterVideoStream->streamType = M4SYS_kH264;     break;
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning \
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT",
+                           pC->m_Params.OutputVideoFormat);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_FORMAT;
+    }
+
+    /**
+     * Video bitrate */
+    switch(pC->m_Params.OutputVideoBitrate)
+    {
+        case M4VIDEOEDITING_k16_KBPS:
+        case M4VIDEOEDITING_k24_KBPS:
+        case M4VIDEOEDITING_k32_KBPS:
+        case M4VIDEOEDITING_k48_KBPS:
+        case M4VIDEOEDITING_k64_KBPS:
+        case M4VIDEOEDITING_k96_KBPS:
+        case M4VIDEOEDITING_k128_KBPS:
+        case M4VIDEOEDITING_k192_KBPS:
+        case M4VIDEOEDITING_k256_KBPS:
+        case M4VIDEOEDITING_k288_KBPS:
+        case M4VIDEOEDITING_k384_KBPS:
+        case M4VIDEOEDITING_k512_KBPS:
+        case M4VIDEOEDITING_k800_KBPS:
+/*+ New Encoder bitrates */
+        case M4VIDEOEDITING_k2_MBPS:
+        case M4VIDEOEDITING_k5_MBPS:
+        case M4VIDEOEDITING_k8_MBPS:
+/*- New Encoder bitrates */
+            pC->m_pWriterVideoStream->averageBitrate = pC->m_Params.OutputVideoBitrate;
+            break;
+
+        case M4VIDEOEDITING_kVARIABLE_KBPS :
+            pC->m_pWriterVideoStream->averageBitrate = 0;
+            break;
+
+        default :
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unknown format 0x%x returning\
+                           ERR_M4PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+                           pC->m_Params.OutputVideoBitrate);
+            return ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE;
+    }
+
+    pC->m_pWriterVideoStream->duration                  = 0;        /**< Duration is not known */
+    pC->m_pWriterVideoStream->timeScale                 = 0;    /**< Not used by the core writer */
+    pC->m_pWriterVideoStream->maxBitrate                = pC->m_pWriterVideoStream->averageBitrate;
+    pC->m_pWriterVideoStream->profileLevel              = 0;    /**< Not used by the core writer */
+    pC->m_pWriterVideoStream->decoderSpecificInfo       = (M4OSA_MemAddr32)
+                                                            (pC->m_pWriterVideoStreamInfo);
+    pC->m_pWriterVideoStream->decoderSpecificInfoSize   = sizeof(M4WRITER_StreamVideoInfos);
+
+    /**
+     * Update AU properties for video stream */
+    pC->m_WriterVideoAU.CTS         = pC->m_WriterVideoAU.DTS = 0;  /** Reset time */
+    pC->m_WriterVideoAU.size        = 0;
+    pC->m_WriterVideoAU.frag        = M4OSA_NULL;
+    pC->m_WriterVideoAU.nbFrag      = 0;                            /** No fragment */
+    pC->m_WriterVideoAU.stream      = pC->m_pWriterVideoStream;
+    pC->m_WriterVideoAU.attribute   = AU_RAP;
+    pC->m_WriterVideoAU.dataAddress = M4OSA_NULL;
+
+    /**
+     *  If there is an audio input, allocate and fill the audio stream structures for the writer */
+    if(M4OSA_NULL != pC->m_pReaderAudioStream)
+    {
+        pC->m_pWriterAudioStream =
+            (M4SYS_StreamDescription*)M4OSA_malloc(sizeof(M4SYS_StreamDescription), M4PTO3GPP,
+            (M4OSA_Char *)"pWriterAudioStream");
+        if (M4OSA_NULL == pC->m_pWriterAudioStream)
+        {
+            M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate pWriterAudioStream, \
+                           returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->m_pWriterAudioStreamInfo =
+            (M4WRITER_StreamAudioInfos*)M4OSA_malloc(sizeof(M4WRITER_StreamAudioInfos), M4PTO3GPP,
+            (M4OSA_Char *)"pWriterAudioStreamInfo");
+        if (M4OSA_NULL == pC->m_pWriterAudioStreamInfo)
+        {
+            M4OSA_TRACE1_0("M4PTO3GPP_Ready4Processing(): unable to allocate \
+                           pWriterAudioStreamInfo, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        pC->m_pWriterAudioStreamInfo->nbSamplesPerSec = 0; /**< unused by our shell writer */
+        pC->m_pWriterAudioStreamInfo->nbBitsPerSample = 0; /**< unused by our shell writer */
+        pC->m_pWriterAudioStreamInfo->nbChannels = 1;      /**< unused by our shell writer */
+
+        if( (M4OSA_NULL != pC->m_pReaderAudioStream) && /* audio could have been discarded */
+            (M4OSA_NULL != pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo) )
+        {
+            /* If we copy the stream from the input, we copy its DSI */
+            pC->m_pWriterAudioStreamInfo->Header.Size =
+                pC->m_pReaderAudioStream->m_basicProperties.m_decoderSpecificInfoSize;
+            pC->m_pWriterAudioStreamInfo->Header.pBuf =
+                (M4OSA_MemAddr8)pC->m_pReaderAudioStream->m_basicProperties.m_pDecoderSpecificInfo;
+        }
+        else
+        {
+            /* Writer will put a default DSI */
+            pC->m_pWriterAudioStreamInfo->Header.Size = 0;
+            pC->m_pWriterAudioStreamInfo->Header.pBuf = M4OSA_NULL;
+        }
+
+        /**
+         * Add the audio stream */
+        switch (pC->m_pReaderAudioStream->m_basicProperties.m_streamType)
+        {
+            case M4DA_StreamTypeAudioAmrNarrowBand:
+                pC->m_pWriterAudioStream->streamType = M4SYS_kAMR;
+                break;
+            case M4DA_StreamTypeAudioAac:
+                pC->m_pWriterAudioStream->streamType = M4SYS_kAAC;
+                break;
+            case M4DA_StreamTypeAudioEvrc:
+                pC->m_pWriterAudioStream->streamType = M4SYS_kEVRC;
+                break;
+            default:
+                M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: unhandled audio format (0x%x),\
+                               returning ERR_PTO3GPP_UNDEFINED_OUTPUT_VIDEO_BITRATE",
+                               pC->m_pReaderAudioStream->m_basicProperties.m_streamType);
+                return ERR_PTO3GPP_UNDEFINED_OUTPUT_AUDIO_FORMAT;
+        }
+
+        /*
+         * Fill Audio stream description structure for the AddStream method */
+        pC->m_pWriterAudioStream->streamID                  = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
+        pC->m_pWriterAudioStream->duration                  = 0;/**< Duration is not known yet */
+        pC->m_pWriterAudioStream->timeScale                 = M4PTO3GPP_WRITER_AUDIO_AMR_TIME_SCALE;
+        pC->m_pWriterAudioStream->profileLevel              = M4PTO3GPP_WRITER_AUDIO_PROFILE_LEVEL;
+        pC->m_pWriterAudioStream->averageBitrate            =
+                                pC->m_pReaderAudioStream->m_basicProperties.m_averageBitRate;
+        pC->m_pWriterAudioStream->maxBitrate                =
+                                pC->m_pWriterAudioStream->averageBitrate;
+
+        /**
+         * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos \
+            in the DSI pointer... */
+        pC->m_pWriterAudioStream->decoderSpecificInfo =
+                    (M4OSA_MemAddr32)pC->m_pWriterAudioStreamInfo;
+
+        /**
+         * Update AU properties for audio stream */
+        pC->m_WriterAudioAU.CTS         = pC->m_WriterAudioAU.DTS = 0;  /** Reset time */
+        pC->m_WriterAudioAU.size        = 0;
+        pC->m_WriterAudioAU.frag        = M4OSA_NULL;
+        pC->m_WriterAudioAU.nbFrag      = 0;                            /** No fragment */
+        pC->m_WriterAudioAU.stream      = pC->m_pWriterAudioStream;
+        pC->m_WriterAudioAU.attribute   = AU_RAP;
+        pC->m_WriterAudioAU.dataAddress = M4OSA_NULL;
+    }
+
+    /************************************/
+    /******** Video Encoder Init ********/
+    /************************************/
+
+    /**
+     * PTO uses its own bitrate regulation, not the "true" core regulation */
+    EncParams.bInternalRegulation = M4OSA_TRUE; //M4OSA_FALSE;
+    EncParams.uiStartingQuantizerValue = M4PTO3GPP_QUANTIZER_STEP;
+
+    /**
+     * Other encoder settings */
+    if(M4OSA_TRUE == bActivateEmp)
+    {
+        EncParams.uiHorizontalSearchRange  = 15;            /* set value */
+        EncParams.uiVerticalSearchRange    = 15;            /* set value */
+        EncParams.bErrorResilience         = M4OSA_FALSE;   /* no error resilience */
+        EncParams.uiIVopPeriod             = 15;            /* one I frame every 15 frames */
+        EncParams.uiMotionEstimationTools  = 1;             /* M4V_MOTION_EST_TOOLS_NO_4MV */
+        EncParams.bAcPrediction            = M4OSA_FALSE;   /* no AC prediction */
+        EncParams.bDataPartitioning        = M4OSA_FALSE;   /* no data partitioning */
+    }
+    else
+    {
+        EncParams.uiHorizontalSearchRange  = 0;             /* use default */
+        EncParams.uiVerticalSearchRange    = 0;             /* use default */
+        EncParams.bErrorResilience         = M4OSA_FALSE;   /* no error resilience */
+        EncParams.uiIVopPeriod             = 15;             /* use default */
+        EncParams.uiMotionEstimationTools  = 0;             /* M4V_MOTION_EST_TOOLS_ALL */
+        EncParams.bAcPrediction            = M4OSA_TRUE;    /* use AC prediction */
+        EncParams.bDataPartitioning        = M4OSA_FALSE;   /* no data partitioning */
+    }
+
+    /**
+     * Create video encoder */
+    err = pC->m_pEncoderInt->pFctInit(&pC->m_pMp4EncoderContext, pC->m_pWriterDataInt,
+                                    M4PTO3GPP_applyVPP, pC, pC->m_pEncoderExternalAPI,
+                                    pC->m_pEncoderUserData);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctInit returns 0x%x", err);
+        return err;
+    }
+
+    pC->m_eEncoderState = M4PTO3GPP_kEncoderClosed;
+
+    err = pC->m_pEncoderInt->pFctOpen(pC->m_pMp4EncoderContext, &pC->m_WriterVideoAU, &EncParams);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctOpen returns 0x%x", err);
+        return err;
+    }
+
+    pC->m_eEncoderState = M4PTO3GPP_kEncoderStopped;
+
+    if (M4OSA_NULL != pC->m_pEncoderInt->pFctStart)
+    {
+        err = pC->m_pEncoderInt->pFctStart(pC->m_pMp4EncoderContext);
+
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: EncoderInt->pFctStart returns 0x%x", err);
+            return err;
+        }
+    }
+
+    pC->m_eEncoderState = M4PTO3GPP_kEncoderRunning;
+
+    /**
+     * No more  setoption on "M4ENCODER_kVideoFragmentSize" here.
+     * It is now automaticly and "smartly" set in the encoder shell. */
+
+    /**************************************/
+    /******** 3GP out add streams  ********/
+    /**************************************/
+
+    err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterVideoStream);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(video) returns\
+                       0x%x", err);
+        return err;
+    }
+
+    /**
+     * Set video max au size */
+    optionValue.streamID    = M4PTO3GPP_WRITER_VIDEO_STREAM_ID;
+    optionValue.value = (M4OSA_UInt32)(1.5F * (M4OSA_Float)(pC->m_pWriterVideoStreamInfo->width
+                                                * pC->m_pWriterVideoStreamInfo->height)
+                                                * M4PTO3GPP_VIDEO_MIN_COMPRESSION_RATIO);
+    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxAUSize: %u",optionValue.value);
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                                (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
+                       M4WRITER_kMaxAUSize) returns 0x%x", err);
+        return err;
+    }
+
+    /**
+     * Set video max chunck size */
+    optionValue.value = (M4OSA_UInt32)((M4OSA_Float)optionValue.value
+                        * M4PTO3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO);
+    M4OSA_TRACE3_1("M4PTO3GPP_Ready4Processing,M4WRITER_kMaxChunckSize: %u",optionValue.value);
+    err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                        (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(video,\
+                       M4WRITER_kMaxChunckSize) returns 0x%x", err);
+        return err;
+    }
+
+    if (M4OSA_NULL != pC->m_pReaderAudioStream)
+    {
+        err = pC->m_pWriterGlobInt->pFctAddStream(pC->m_p3gpWriterContext, pC->m_pWriterAudioStream);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctAddStream(audio) \
+                           returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         * Set audio max au size */
+        optionValue.value       = M4PTO3GPP_AUDIO_MAX_AU_SIZE;
+        optionValue.streamID    = M4PTO3GPP_WRITER_AUDIO_STREAM_ID;
+        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxAUSize,(M4OSA_DataOption) &optionValue);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
+                           M4WRITER_kMaxAUSize) returns 0x%x", err);
+            return err;
+        }
+
+        /**
+         * Set audio max chunck size */
+        optionValue.value = M4PTO3GPP_AUDIO_MAX_CHUNK_SIZE; /**< Magical */
+        err = pC->m_pWriterGlobInt->pFctSetOption(pC->m_p3gpWriterContext,
+                        (M4OSA_UInt32)M4WRITER_kMaxChunckSize,(M4OSA_DataOption) &optionValue);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctSetOption(audio,\
+                           M4WRITER_kMaxChunckSize) returns 0x%x", err);
+            return err;
+        }
+    }
+
+    /*
+     * Close the stream registering in order to be ready to write data */
+    err = pC->m_pWriterGlobInt->pFctStartWriting(pC->m_p3gpWriterContext);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Ready4Processing: pWriterGlobInt->pFctStartWriting returns 0x%x",
+                        err);
+        return err;
+    }
+
+
+    M4OSA_TRACE3_0("M4PTO3GPP_Ready4Processing: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                            M4WRITER_Context* pWriterContext,
+                                      M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
+ * @brief   Write an AMR 12.2kbps silence FRAME into the writer
+ * @note    Mainly used to fix the 'bzz' bug...
+ * @param   pWriterDataIntInterface (IN)    writer data interfaces
+ *          pWriterContext          (IN/OUT)writer context
+ *          pWriterAudioAU          (OUT)   writer audio access unit
+ *          mtIncCts                (IN)    writer CTS
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+*/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence122Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                                                   M4WRITER_Context* pWriterContext,
+                                                    M4SYS_AccessUnit* pWriterAudioAU,
+                                                    M4OSA_Time mtIncCts)
+{
+    M4OSA_ERR err;
+
+    err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+                                        pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pStartAU(audio) returns \
+                                                    0x%x!", err);
+        return err;
+    }
+
+    M4OSA_memcpy((M4OSA_MemAddr8)pWriterAudioAU->dataAddress,
+     (M4OSA_MemAddr8)M4PTO3GPP_AMR_AU_SILENCE_122_FRAME, M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE);
+    pWriterAudioAU->size    = M4PTO3GPP_AMR_AU_SILENCE_FRAME_122_SIZE;
+    pWriterAudioAU->CTS     = mtIncCts;
+    pWriterAudioAU->nbFrag  = 0;
+
+    err = pWriterDataIntInterface->pProcessAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+                                                pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence122Frame: pWriterDataInt->pProcessAU(silence) \
+                       returns 0x%x!", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                                        M4WRITER_Context* pWriterContext,
+                                      M4SYS_AccessUnit* pWriterAudioAU, M4OSA_Time mtIncCts)
+ * @brief   Write an AMR 12.2kbps silence FRAME into the writer
+ * @note    Mainly used to fix the 'bzz' bug...
+ * @param   pWriterDataIntInterface (IN)    writer data interfaces
+ *          pWriterContext          (IN/OUT)writer context
+ *          pWriterAudioAU          (OUT)   writer audio access unit
+ *          mtIncCts                (IN)    writer CTS
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+*/
+static M4OSA_ERR M4PTO3GPP_writeAmrSilence048Frame(M4WRITER_DataInterface* pWriterDataIntInterface,
+                                                   M4WRITER_Context* pWriterContext,
+                                                M4SYS_AccessUnit* pWriterAudioAU,
+                                                M4OSA_Time mtIncCts)
+{
+    M4OSA_ERR err;
+
+    err = pWriterDataIntInterface->pStartAU(pWriterContext, M4PTO3GPP_WRITER_AUDIO_STREAM_ID,
+                                                        pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: pWriterDataInt->pStartAU(audio)\
+                       returns 0x%x!", err);
+        return err;
+    }
+
+    M4OSA_memcpy((M4OSA_MemAddr8)pWriterAudioAU->dataAddress,
+                (M4OSA_MemAddr8)M4PTO3GPP_AMR_AU_SILENCE_048_FRAME,
+                M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
+    pWriterAudioAU->size    = M4PTO3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+    pWriterAudioAU->CTS     = mtIncCts;
+    pWriterAudioAU->nbFrag  = 0;
+
+    err = pWriterDataIntInterface->pProcessAU(pWriterContext,
+                    M4PTO3GPP_WRITER_AUDIO_STREAM_ID, pWriterAudioAU);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_writeAmrSilence048Frame: \
+                       pWriterDataInt->pProcessAU(silence) returns 0x%x!", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+M4OSA_ERR M4PTO3GPP_RegisterExternalVideoEncoder(M4PTO3GPP_Context pContext,
+                                     M4VE_EncoderType encoderType,
+                                     M4VE_Interface*    pEncoderInterface,
+                                     M4OSA_Void* pUserData)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+    switch (encoderType)
+    {
+        case M4VE_kMpeg4VideoEnc:
+        case M4VE_kH263VideoEnc:
+            /* OK */
+        break;
+
+        case M4VE_kH264VideoEnc:
+            M4OSA_TRACE1_0("M4PTO3GPP_RegisterExternalVideoEncoder: \
+                           H264 encoder type not implemented yet");
+            return M4ERR_NOT_IMPLEMENTED;
+        break;
+
+        default:
+            M4OSA_TRACE1_1("M4PTO3GPP_RegisterExternalVideoEncoder:\
+                           unknown encoderType %d", encoderType);
+            return M4ERR_PARAMETER;
+        break;
+    }
+
+    pC->registeredExternalEncs[encoderType].pEncoderInterface = pEncoderInterface;
+    pC->registeredExternalEncs[encoderType].pUserData = pUserData;
+    pC->registeredExternalEncs[encoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW encoder that may already have been registered for this type;
+    this is normal. */
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c b/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
new file mode 100755
index 0000000..bcbfaf0
--- /dev/null
+++ b/libvideoeditor/vss/src/M4PTO3GPP_VideoPreProcessing.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4PTO3GPP_VideoPreProcessing.c
+ * @brief   Picture to 3gpp Service video preprocessing management.
+ ******************************************************************************
+ */
+
+/**
+ *    OSAL Debug utilities */
+#include "M4OSA_Debug.h"
+
+/**
+ *    OSAL Memory management */
+#include "M4OSA_Memory.h"
+
+/**
+ *    Definition of the M4PTO3GPP internal context */
+#include "M4PTO3GPP_InternalTypes.h"
+
+/**
+ *    Definition of the M4PTO3GPP errors */
+#include "M4PTO3GPP_ErrorCodes.h"
+
+/* If time increment is too low then we have an infinite alloc loop into M4ViEncCaptureFrame() */
+/* Time increment should match 30 fps maximum */
+#define M4PTO3GPP_MIN_TIME_INCREMENT 33.3333334
+
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+ *                                 M4VIFI_ImagePlane* pPlaneOut)
+ * @brief    Call an external callback to get the picture to encode
+ * @note    It is called by the video encoder
+ * @param    pContext    (IN) VPP context, which actually is the M4PTO3GPP internal context
+ *                            in our case
+ * @param    pPlaneIn    (IN) Contains the image
+ * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the
+ *                        output YUV420 image read with the m_pPictureCallbackFct
+ * @return    M4NO_ERROR:    No error
+ * @return    Any error returned by an underlaying module
+ ******************************************************************************
+ */
+/******************************************************/
+M4OSA_ERR M4PTO3GPP_applyVPP(M4VPP_Context pContext, M4VIFI_ImagePlane* pPlaneIn,
+                             M4VIFI_ImagePlane* pPlaneOut)
+/******************************************************/
+{
+    M4OSA_ERR    err;
+    M4OSA_Double mtDuration;
+    M4OSA_UInt32 i;
+
+    /*** NOTE ***/
+    /* It's OK to get pPlaneIn == M4OSA_NULL here                        */
+    /* since it has been given NULL in the pFctEncode() call.            */
+    /* It's because we use the M4PTO3GPP internal context to            */
+    /* transmit the encoder input data.                                    */
+    /* The input data is the image read from the m_pPictureCallbackFct    */
+
+    /**
+     *    The VPP context is actually the M4PTO3GPP context! */
+    M4PTO3GPP_InternalContext *pC = (M4PTO3GPP_InternalContext*)(pContext);
+
+    /**
+    *  Get the picture to encode */
+    if (M4OSA_FALSE == pC->m_bLastInternalCallBack)
+    {
+        err = pC->m_Params.pPictureCallbackFct(pC->m_Params.pPictureCallbackCtxt, pPlaneOut,
+             &mtDuration);
+
+        /* In case of error when getting YUV to encode (ex: error when decoding a JPEG) */
+        if((M4NO_ERROR != err) && (((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) != err))
+        {
+            return err;
+        }
+
+        /**
+         * If end of encoding is asked by the size limitation system,
+         * we must end the encoding the same way that when it is asked by the
+         * picture callback (a.k.a. the integrator).
+         * Thus we simulate the LastPicture code return: */
+        if (M4OSA_TRUE == pC->m_IsLastPicture)
+        {
+            err = M4PTO3GPP_WAR_LAST_PICTURE;
+        }
+
+        if(((M4OSA_UInt32)M4PTO3GPP_WAR_LAST_PICTURE) == err)
+        {
+            pC->m_bLastInternalCallBack = M4OSA_TRUE; /* Toggle flag for the final call of the CB*/
+            pC->m_IsLastPicture         = M4OSA_TRUE; /* To stop the encoder */
+            pC->pSavedPlane             = pPlaneOut;  /* Save the last YUV plane ptr */
+            pC->uiSavedDuration         = (M4OSA_UInt32)mtDuration; /* Save the last duration */
+        }
+    }
+    else
+    {
+        /**< Not necessary here because the last frame duration is set to the-last-but-one by
+                the light writer */
+        /**< Only necessary for pC->m_mtNextCts below...*/
+        mtDuration = pC->uiSavedDuration;
+
+
+        /** Copy the last YUV plane into the current one
+         * (the last pic is splited due to the callback extra-call... */
+        for (i=0; i<3; i++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[i].pac_data,
+                 (M4OSA_MemAddr8)pC->pSavedPlane[i].pac_data,
+                     pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
+        }
+    }
+
+    /* TimeIncrement should be 30 fps maximum */
+    if(mtDuration < M4PTO3GPP_MIN_TIME_INCREMENT)
+    {
+        mtDuration = M4PTO3GPP_MIN_TIME_INCREMENT;
+    }
+
+    pC->m_mtNextCts += mtDuration;
+
+    M4OSA_TRACE3_0("M4PTO3GPP_applyVPP: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4READER_Amr.c b/libvideoeditor/vss/src/M4READER_Amr.c
new file mode 100755
index 0000000..32bf9cf
--- /dev/null
+++ b/libvideoeditor/vss/src/M4READER_Amr.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ************************************************************************
+ * @file   M4READER_Amr.c
+ * @brief  Generic encapsulation of the core amr reader
+ * @note   This file implements the generic M4READER interface
+ *         on top of the AMR reader
+ ************************************************************************
+*/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+
+#include "M4_Utils.h"
+
+#include "M4AMRR_CoreReader.h"
+#include "M4READER_Amr.h"
+
+/**
+ ************************************************************************
+ * structure    M4READER_AMR_Context
+ * @brief       This structure defines the internal context of a amr reader instance
+ * @note        The context is allocated and de-allocated by the reader
+ ************************************************************************
+*/
+typedef struct _M4READER_AMR_Context
+{
+    M4OSA_Context           m_pCoreContext;     /**< core amr reader context */
+    M4_AudioStreamHandler*  m_pAudioStream;     /**< pointer on the audio stream
+                                                 description returned by the core */
+    M4SYS_AccessUnit        m_audioAu;          /**< audio access unit to be filled by the core */
+    M4OSA_Time              m_maxDuration;      /**< duration of the audio stream */
+    M4OSA_FileReadPointer*    m_pOsaFileReaderFcts;    /**< OSAL file read functions */
+
+} M4READER_AMR_Context;
+
+
+/**
+ ************************************************************************
+ * @brief    create an instance of the reader
+ * @note     allocates the context
+ * @param    pContext:        (OUT)    pointer on a reader context
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_ALLOC                a memory allocation has failed
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_create(M4OSA_Context *pContext)
+{
+    M4READER_AMR_Context* pReaderContext;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((pContext == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_create: invalid context pointer");
+
+    pReaderContext = (M4READER_AMR_Context*)M4OSA_malloc(sizeof(M4READER_AMR_Context),
+         M4READER_AMR, (M4OSA_Char *)"M4READER_AMR_Context");
+    if (pReaderContext == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    pReaderContext->m_pAudioStream  = M4OSA_NULL;
+    pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
+    M4OSA_INT64_FROM_INT32(pReaderContext->m_maxDuration, 0);
+    pReaderContext->m_pCoreContext = M4OSA_NULL;
+    pReaderContext->m_pOsaFileReaderFcts = M4OSA_NULL;
+
+    *pContext = pReaderContext;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief    destroy the instance of the reader
+ * @note     after this call the context is invalid
+ *
+ * @param    context:        (IN)    Context of the reader
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_destroy(M4OSA_Context context)
+{
+    M4READER_AMR_Context*   pC=(M4READER_AMR_Context*)context;
+
+    /* Check function parameters*/
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_AMR_destroy: invalid context pointer");
+
+    /**
+     *    Check input parameter */
+    if (M4OSA_NULL == pC)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_destroy(): M4READER_AMR_destroy: context is M4OSA_NULL,\
+             returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)pC);
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ************************************************************************
+ * @brief    open the reader and initializes its created instance
+ * @note     this function opens the AMR file
+ * @param    context:            (IN)    Context of the reader
+ * @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying the media to open
+ * @return    M4NO_ERROR                     there is no error
+ * @return    M4ERR_PARAMETER                the context is NULL
+ * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
+{
+    M4READER_AMR_Context*    pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR                err;
+
+    /* Check function parameters*/
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),              M4ERR_PARAMETER,
+         "M4READER_AMR_open: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+         "M4READER_AMR_open: invalid pointer pFileDescriptor");
+
+    err = M4AMRR_openRead( &pC->m_pCoreContext, pFileDescriptor, pC->m_pOsaFileReaderFcts);
+
+    return err;
+}
+
+
+
+/**
+ ************************************************************************
+ * @brief    close the reader
+ * @note
+ * @param    context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            the context is NULL
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR   M4READER_AMR_close(M4OSA_Context context)
+{
+    M4READER_AMR_Context*    pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR                err;
+    M4AMRR_State State;
+
+    /* Check function parameters*/
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_AMR_close: invalid context pointer");
+
+    /**
+     *    Check input parameter */
+    if (M4OSA_NULL == pC)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_close(): M4READER_AMR_close: context is M4OSA_NULL,\
+             returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    if (M4OSA_NULL != pC->m_pAudioStream)
+    {
+        err = M4AMRR_getState(pC->m_pCoreContext, &State,
+                ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId);
+        if(M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_0("M4READER_AMR_close: error when calling M4AMRR_getState\n");
+            return err;
+        }
+
+        if (M4AMRR_kReading_nextAU == State)
+        {
+            err = M4AMRR_freeAU(pC->m_pCoreContext,
+                ((M4_StreamHandler*)pC->m_pAudioStream)->m_streamId,  &pC->m_audioAu);
+            if (err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_0("M4READER_AMR_close: error when freeing access unit\n");
+                return err;
+            }
+        }
+
+        /* Delete the DSI if needed */
+        if(M4OSA_NULL != pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo)
+        {
+            M4OSA_free((M4OSA_MemAddr32)\
+                pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo);
+
+            pC->m_pAudioStream->m_basicProperties.m_decoderSpecificInfoSize = 0;
+            pC->m_pAudioStream->m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
+        }
+
+        /* Finally destroy the stream handler */
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioStream);
+        pC->m_pAudioStream = M4OSA_NULL;
+    }
+
+    if (M4OSA_NULL != pC->m_pCoreContext)
+    {
+        err = M4AMRR_closeRead(pC->m_pCoreContext);
+        pC->m_pCoreContext = M4OSA_NULL;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    Get the next stream found in the media
+ * @note    current version needs to translate M4SYS_Stream to M4_StreamHandler
+ *
+ * @param    context:        (IN)   Context of the reader
+ * @param    pMediaFamily:   (OUT)  pointer to a user allocated M4READER_MediaFamily
+ *                                  that will be filled with the media family of the found stream
+ * @param    pStreamHandler: (OUT)  pointer to a stream handler that will be
+ *                                  allocated and filled with the found stream description
+ *
+ * @return    M4NO_ERROR            there is no error
+ * @return    M4WAR_NO_MORE_STREAM  no more available stream in the media (all streams found)
+ * @return    M4ERR_PARAMETER       at least one parameter is not properly set (in DEBUG mode only)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
+                                     M4_StreamHandler **pStreamHandlerParam)
+{
+    M4READER_AMR_Context*   pC=(M4READER_AMR_Context*)context;
+    M4OSA_ERR               err;
+    M4SYS_StreamID          streamIdArray[2];
+    M4SYS_StreamDescription streamDesc;
+    M4_AudioStreamHandler*  pAudioStreamHandler;
+    M4_StreamHandler*       pStreamHandler;
+
+    M4OSA_DEBUG_IF1((pC == 0),                  M4ERR_PARAMETER,
+                "M4READER_AMR_getNextStream: invalid context");
+    M4OSA_DEBUG_IF1((pMediaFamily == 0),        M4ERR_PARAMETER,
+                "M4READER_AMR_getNextStream: invalid pointer to MediaFamily");
+    M4OSA_DEBUG_IF1((pStreamHandlerParam == 0), M4ERR_PARAMETER,
+                "M4READER_AMR_getNextStream: invalid pointer to StreamHandler");
+
+    err = M4AMRR_getNextStream( pC->m_pCoreContext, &streamDesc);
+    if (err == M4WAR_NO_MORE_STREAM)
+    {
+        streamIdArray[0] = 0;
+        streamIdArray[1] = 0;
+        err = M4AMRR_startReading(pC->m_pCoreContext, streamIdArray);
+        if ((M4OSA_UInt32)M4ERR_ALLOC == err)
+        {
+            M4OSA_TRACE2_0("M4READER_AMR_getNextStream: M4AMRR_startReading returns M4ERR_ALLOC!");
+            return err;
+        }
+        return M4WAR_NO_MORE_STREAM;
+    }
+    else if (err != M4NO_ERROR)
+    {
+        return err;
+    }
+
+    *pMediaFamily = M4READER_kMediaFamilyAudio;
+
+    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_malloc(sizeof(M4_AudioStreamHandler),
+                        M4READER_AMR, (M4OSA_Char *)"M4_AudioStreamHandler");
+    if (pAudioStreamHandler == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+    pStreamHandler =(M4_StreamHandler*)(pAudioStreamHandler);
+    *pStreamHandlerParam = pStreamHandler;
+    pC->m_pAudioStream = pAudioStreamHandler;
+
+    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+
+    /*
+     * Audio stream handler fields are initialised with 0 value.
+     * They will be properly set by the AMR decoder
+     */
+    pAudioStreamHandler->m_samplingFrequency = 0;
+    pAudioStreamHandler->m_byteFrameLength   = 0;
+    pAudioStreamHandler->m_byteSampleSize    = 0;
+    pAudioStreamHandler->m_nbChannels        = 0;
+
+    pStreamHandler->m_pDecoderSpecificInfo    = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+    pStreamHandler->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
+    pStreamHandler->m_streamId                = streamDesc.streamID;
+ // M4OSA_INT64_FROM_DOUBLE(pStreamHandler->m_duration,
+ // (M4OSA_Double)(((M4OSA_Float)streamDesc.duration*1000/(M4OSA_Float)(streamDesc.timeScale))));
+    pStreamHandler->m_duration                = streamDesc.duration;
+    pStreamHandler->m_pUserData               = (void*)streamDesc.timeScale; /*trick to change*/
+
+    if (M4OSA_TIME_COMPARE(streamDesc.duration, pC->m_maxDuration) > 0)
+    {
+        M4OSA_TIME_SET(pC->m_maxDuration, streamDesc.duration);
+    }
+    pStreamHandler->m_averageBitRate          = streamDesc.averageBitrate;
+
+    M4AMRR_getmaxAUsize(pC->m_pCoreContext, &pStreamHandler->m_maxAUSize);
+
+    switch (streamDesc.streamType)
+    {
+    case M4SYS_kAMR:
+        pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrNarrowBand;
+        break;
+    case M4SYS_kAMR_WB:
+        pStreamHandler->m_streamType = M4DA_StreamTypeAudioAmrWideBand;
+        break;
+    default:
+        break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    fill the access unit structure with initialization values
+ * @note
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler: (IN)     pointer to the stream handler to
+ *                                    which the access unit will be associated
+ * @param    pAccessUnit:    (IN/OUT) pointer to the access unit (allocated by the caller)
+ *                                      to initialize
+ *
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                     M4_AccessUnit *pAccessUnit)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4SYS_AccessUnit*       pAu;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_AMR_fillAuStruct: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_fillAuStruct: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_AMR_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_fillAuStruct: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    pAu->dataAddress = M4OSA_NULL;
+    pAu->size        = 0;
+    /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
+    pAu->CTS         = -20;
+    pAu->DTS         = -20;
+    pAu->attribute   = 0;
+    pAu->nbFrag      = 0;
+
+    pAccessUnit->m_size         = 0;
+    /* JC: bug fix 1197 (set CTS to -20 in order the first AU CTS is 0) */
+    pAccessUnit->m_CTS          = -20;
+    pAccessUnit->m_DTS          = -20;
+    pAccessUnit->m_attribute    = 0;
+    pAccessUnit->m_dataAddress  = M4OSA_NULL;/*pBuffer;*/
+    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
+    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
+    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief    get an option value from the reader
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to retrieve a property value:
+ *          - the duration of the longest stream of the media
+ *          - the version number of the reader (not implemented yet)
+ *
+ * @param    context:        (IN)    Context of the reader
+ * @param    optionId:        (IN)    indicates the option to get
+ * @param    pValue:            (OUT)    pointer to structure or value (allocated by user)
+ *                                       where option is stored
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID        when the option ID is not a valid one
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getOption(M4OSA_Context context, M4OSA_OptionID optionId,
+                                 M4OSA_DataOption pValue)
+
+{
+    M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER, "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
+
+    switch(optionId)
+    {
+    case M4READER_kOptionID_Duration :
+        {
+            M4OSA_TIME_SET(*(M4OSA_Time*)pValue, pC->m_maxDuration);
+        }
+        break;
+
+    case M4READER_kOptionID_Bitrate:
+        {
+            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+            if (M4OSA_NULL != pC->m_pAudioStream)
+            {
+                *pBitrate = pC->m_pAudioStream->m_basicProperties.m_averageBitRate;
+            }
+            else
+            {
+                pBitrate = 0;
+                err = M4ERR_PARAMETER;
+            }
+
+        }
+        break;
+    case M4READER_kOptionID_Version:
+        {
+            err = M4AMRR_getVersion((M4_VersionInfo*)pValue);
+        }
+        break;
+
+    default :
+        {
+            err = M4ERR_PARAMETER;
+        }
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the readder
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to set a property value:
+ *          - the OSAL file read functions
+ *
+ * @param   context:    (IN)        Context of the decoder
+ * @param   optionId:   (IN)        Identifier indicating the option to set
+ * @param   pValue:     (IN)        Pointer to structure or value (allocated by user)
+ *                                  where option is stored
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_PARAMETER         The option parameter is invalid
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_setOption(M4OSA_Context context, M4OSA_OptionID optionId,
+                                 M4OSA_DataOption pValue)
+{
+    M4READER_AMR_Context* pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER, "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER, "invalid value pointer");
+
+    switch(optionId)
+    {
+    case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
+        {
+            pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
+        }
+        break;
+    default :
+        {
+            err = M4ERR_PARAMETER;
+        }
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    reset the stream, that is seek it to beginning and make it ready to be read
+ * @note    this function is to be deprecated in next versions
+ *
+ * @param    context:        (IN)    Context of the reader
+ * @param    pStreamHandler    (IN)    The stream handler of the stream to reset
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                there is no more memory available
+ * @return    M4ERR_BAD_STREAM_ID        the streamID does not exist
+ * @return    M4ERR_STATE    this function cannot be called now
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4WAR_INVALID_TIME        beginning of the stream can not be reached
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64;
+    M4AMRR_State            State;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
+
+    M4OSA_INT64_FROM_INT32(time64, 0);
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_reset: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+    if (M4AMRR_kReading_nextAU == State)
+    {
+        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_AMR_reset: error when freeing access unit\n");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    err = M4NO_ERROR;
+
+    /* for reset during playback */
+    /* (set CTS to -20 in order the first AU CTS is 0) */
+    pAu->CTS = -20;
+    pAu->DTS = -20;
+
+    err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_reset: error when calling M4AMRR_seek()\n");
+        return err;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief    jump into the stream at the specified time
+ * @note
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler    (IN)     the stream description of the stream to make jump
+ * @param    pTime            (IN/OUT) IN:  the time to jump to (in ms)
+ *                                     OUT: the time to which the stream really jumped
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ * @return    M4ERR_PARAMETER            at least one parameter is not properly set
+ * @return    M4ERR_ALLOC                there is no more memory available
+ * @return    M4WAR_INVALID_TIME        the time can not be reached
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                             M4OSA_Int32* pTime)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64;
+    M4OSA_Double            timeDouble; /*used for type conversion only*/
+    M4AMRR_State            State;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_AMR_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_reset: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_3GP_jump: invalid time pointer");
+
+    M4OSA_INT64_FROM_INT32(time64, *pTime);
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_jump: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+    if (M4AMRR_kReading_nextAU == State)
+    {
+        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_AMR_jump: error when freeing access unit\n");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    pAu->CTS = time64;
+    pAu->DTS = time64;
+    err = M4AMRR_seek(pC->m_pCoreContext, streamIdArray, time64, M4SYS_kNoRAPprevious, &time64);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_jump: error when calling M4AMRR_seek()\n");
+        return err;
+    }
+
+    M4OSA_INT64_TO_DOUBLE(timeDouble, time64);
+    *pTime = (M4OSA_Int32)timeDouble;
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Gets an access unit (AU) from the stream handler source.
+ * @note    An AU is the smallest possible amount of data to be decoded by a decoder (audio/video).
+ *          In the current version, we need to translate M4OSA_AccessUnit to M4_AccessUnit
+ *
+ * @param    context:        (IN)        Context of the reader
+ * @param    pStreamHandler  (IN)        The stream handler of the stream to make jump
+ * @param    pAccessUnit     (IN/OUT)    Pointer to an access unit to fill with read data (the au
+                                         structure is allocated by the user, and must be
+                                         initialized by calling M4READER_fillAuStruct_fct after
+                                         creation)
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_BAD_CONTEXT       provided context is not a valid one
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set
+ * @return    M4ERR_ALLOC             memory allocation failed
+ * @return    M4ERR_BAD_STREAM_ID     at least one of the stream Id. does not exist.
+ * @return    M4WAR_NO_MORE_AU        there are no more access unit in the stream (end of stream)
+ ************************************************************************
+*/
+M4OSA_ERR M4READER_AMR_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                M4_AccessUnit *pAccessUnit)
+{
+    M4READER_AMR_Context*   pC = (M4READER_AMR_Context*)context;
+    M4OSA_ERR               err = M4NO_ERROR;
+    M4SYS_AccessUnit*       pAu;
+    M4_MediaTime            timeScale;
+    M4AMRR_State            State;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_AMR_getNextAu: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_getNextAu: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_AMR_getNextAu: invalid pointer to M4_AccessUnit");
+
+    /* keep trace of the allocated buffers in AU to be able to free them at destroy()
+       but be aware that system is risky and would need upgrade if more than
+       one video and one audio AU is needed */
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_AMR_getNextAu: passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    err = M4AMRR_getState(pC->m_pCoreContext, &State, pStreamHandler->m_streamId);
+    if (M4AMRR_kReading_nextAU == State)
+    {
+        err = M4AMRR_freeAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_AVI_getNextAu: error when freeing access unit\n");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    pAu->nbFrag = 0;
+    err = M4AMRR_nextAU(pC->m_pCoreContext, pStreamHandler->m_streamId, pAu);
+
+    if (err == M4NO_ERROR)
+    {
+        timeScale = (M4OSA_Float)(M4OSA_Int32)(pStreamHandler->m_pUserData)/1000;
+        pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
+        pAccessUnit->m_size = pAu->size;
+        pAccessUnit->m_CTS  = (M4_MediaTime)pAu->CTS/*/timeScale*/;
+        pAccessUnit->m_DTS  = (M4_MediaTime)pAu->DTS/*/timeScale*/;
+        pAccessUnit->m_attribute = pAu->attribute;
+    }
+    else
+    {
+        pAccessUnit->m_size=0;
+    }
+
+    return err;
+}
+
+/**
+*************************************************************************
+* @brief Retrieves the generic interfaces implemented by the reader
+*
+* @param pMediaType          : Pointer on a M4READER_MediaType (allocated by the caller)
+*                              that will be filled with the media type supported by this reader
+* @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface implemented
+*                              by this reader. The interface is a structure allocated by the function and must
+*                              be un-allocated by the caller.
+* @param pRdrDataInterface   : Address of a pointer that will be set to the data interface implemented
+*                              by this reader. The interface is a structure allocated by the function and must
+*                              be un-allocated by the caller.
+*
+* @returns : M4NO_ERROR     if OK
+*            ERR_ALLOC      if an allocation failed
+*            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
+*************************************************************************
+*/
+M4OSA_ERR   M4READER_AMR_getInterfaces(M4READER_MediaType *pMediaType,
+                                         M4READER_GlobalInterface **pRdrGlobalInterface,
+                                         M4READER_DataInterface **pRdrDataInterface)
+{
+    M4OSA_DEBUG_IF1((pMediaType == 0),          M4ERR_PARAMETER,
+         "M4READER_AMR_getInterfaces: invalid pointer to MediaType");
+    M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
+         "M4READER_AMR_getInterfaces: invalid pointer to M4READER_GlobalInterface");
+    M4OSA_DEBUG_IF1((pRdrDataInterface == 0),   M4ERR_PARAMETER,
+         "M4READER_AMR_getInterfaces: invalid pointer to M4READER_DataInterface");
+
+    *pRdrGlobalInterface =
+         (M4READER_GlobalInterface*)M4OSA_malloc( sizeof(M4READER_GlobalInterface),
+             M4READER_AMR, (M4OSA_Char *)"M4READER_GlobalInterface" );
+    if (M4OSA_NULL == *pRdrGlobalInterface)
+    {
+        *pRdrDataInterface = M4OSA_NULL;
+        return M4ERR_ALLOC;
+    }
+    *pRdrDataInterface = (M4READER_DataInterface*)M4OSA_malloc( sizeof(M4READER_DataInterface),
+         M4READER_AMR, (M4OSA_Char *)"M4READER_DataInterface");
+    if (M4OSA_NULL == *pRdrDataInterface)
+    {
+        M4OSA_free((M4OSA_MemAddr32)*pRdrGlobalInterface);
+        *pRdrGlobalInterface = M4OSA_NULL;
+        return M4ERR_ALLOC;
+    }
+
+    *pMediaType = M4READER_kMediaTypeAMR;
+
+    (*pRdrGlobalInterface)->m_pFctCreate           = M4READER_AMR_create;
+    (*pRdrGlobalInterface)->m_pFctDestroy          = M4READER_AMR_destroy;
+    (*pRdrGlobalInterface)->m_pFctOpen             = M4READER_AMR_open;
+    (*pRdrGlobalInterface)->m_pFctClose            = M4READER_AMR_close;
+    (*pRdrGlobalInterface)->m_pFctGetOption        = M4READER_AMR_getOption;
+    (*pRdrGlobalInterface)->m_pFctSetOption        = M4READER_AMR_setOption;
+    (*pRdrGlobalInterface)->m_pFctGetNextStream    = M4READER_AMR_getNextStream;
+    (*pRdrGlobalInterface)->m_pFctFillAuStruct     = M4READER_AMR_fillAuStruct;
+    (*pRdrGlobalInterface)->m_pFctStart            = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctStop             = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctJump             = M4READER_AMR_jump;
+    (*pRdrGlobalInterface)->m_pFctReset            = M4READER_AMR_reset;
+    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime   = M4OSA_NULL; /*all AUs are RAP*/
+
+    (*pRdrDataInterface)->m_pFctGetNextAu          = M4READER_AMR_getNextAu;
+
+    (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
diff --git a/libvideoeditor/vss/src/M4READER_Pcm.c b/libvideoeditor/vss/src/M4READER_Pcm.c
new file mode 100755
index 0000000..5604983
--- /dev/null
+++ b/libvideoeditor/vss/src/M4READER_Pcm.c
@@ -0,0 +1,721 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file   M4READER_Wav.c
+ * @brief  Generic encapsulation of the core pcm reader
+ * @note   This file implements the generic M4READER interface
+ *         on top of the PCM reader
+ ************************************************************************
+*/
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_CoreID.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4PCMR_CoreReader.h"
+#include "M4READER_Pcm.h"
+/**
+ ************************************************************************
+ * structure    M4READER_WAV_Context
+ * @brief       This structure defines the internal context of a wav reader instance
+ * @note        The context is allocated and de-allocated by the reader
+ ************************************************************************
+ */
+typedef struct _M4READER_PCM_Context
+{
+    M4OSA_Context           m_coreContext;        /**< core wav reader context */
+    M4_StreamHandler*       m_pAudioStream;       /**< pointer on the audio stream description
+                                                        returned by the core */
+    M4SYS_AccessUnit        m_audioAu;            /**< audio access unit to be filled by the core */
+    M4OSA_FileReadPointer*  m_pOsaFileReaderFcts; /**< OSAL file read functions */
+
+} M4READER_PCM_Context;
+
+
+/**
+ ************************************************************************
+ * @brief   Creates a wav reader instance
+ * @note    allocates the context
+ * @param   pContext:            (OUT)  Pointer to a wav reader context
+ * @return  M4NO_ERROR:                 there is no error
+ * @return  M4ERR_ALLOC:                a memory allocation has failed
+ * @return  M4ERR_PARAMETER:            at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_create(M4OSA_Context* pContext)
+{
+    M4READER_PCM_Context*   pReaderContext;
+
+    M4OSA_DEBUG_IF1((pContext == 0),       M4ERR_PARAMETER,
+         "M4READER_PCM_create: invalid context pointer");
+
+    pReaderContext = (M4READER_PCM_Context*)M4OSA_malloc(sizeof(M4READER_PCM_Context),
+         M4READER_WAV, (M4OSA_Char *)"M4READER_PCM_Context");
+    if (pReaderContext == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    pReaderContext->m_coreContext         = M4OSA_NULL;
+    pReaderContext->m_pAudioStream        = M4OSA_NULL;
+    pReaderContext->m_audioAu.dataAddress = M4OSA_NULL;
+    pReaderContext->m_pOsaFileReaderFcts  = M4OSA_NULL;
+
+    *pContext = pReaderContext;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Destroy the instance of the reader
+ * @note    the context is un-allocated
+ * @param   context:         (IN) context of the network reader
+ * @return  M4NO_ERROR:           there is no error
+ * @return  M4ERR_PARAMETER:      at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_destroy(M4OSA_Context context)
+{
+    M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_PCM_destroy: invalid context pointer");
+
+    M4OSA_free((M4OSA_MemAddr32)pC);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Initializes the reader instance
+ * @param   context:           (IN)    context of the network reader
+ * @param   pFileDescriptor:   (IN)    Pointer to proprietary data identifying the media to open
+ * @return  M4NO_ERROR:                there is no error
+ * @return  M4ERR_PARAMETER:           at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_open(M4OSA_Context context, M4OSA_Void* pFileDescriptor)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_PCM_open: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor),   M4ERR_PARAMETER,
+         "M4READER_PCM_open: invalid pointer pFileDescriptor");
+
+    err = M4PCMR_openRead(&(pC->m_coreContext), (M4OSA_Char*)pFileDescriptor,
+         pC->m_pOsaFileReaderFcts);
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief     close the reader
+ * @note
+ * @param     context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            the context is NULL
+ * @return    M4ERR_BAD_CONTEXT        provided context is not a valid one
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_close(M4OSA_Context context)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+         "M4READER_PCM_close: invalid context pointer");
+
+    /* Free audio AU and audio stream */
+    if (M4OSA_NULL != pC->m_pAudioStream)
+    {
+        if (M4OSA_NULL != pC->m_audioAu.dataAddress)
+        {
+            err = M4PCMR_freeAU(pC->m_coreContext, pC->m_pAudioStream->m_streamId,
+                 &pC->m_audioAu);
+            if (err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_0("M4READER_PCM_close: Error when freeing audio access unit");
+                return err;
+            }
+        }
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioStream);
+        pC->m_pAudioStream = M4OSA_NULL;
+    }
+
+
+    if (M4OSA_NULL != pC->m_coreContext)
+    {
+        /* Close tha PCM file */
+       err = M4PCMR_closeRead(pC->m_coreContext);
+       pC->m_coreContext = M4OSA_NULL;
+    }
+
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the reader
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to set a property value:
+ * @param    context:        (IN)    Context of the reader
+ * @param    optionId:       (IN)    indicates the option to set
+ * @param    pValue:         (IN)    pointer to structure or value (allocated by user)
+ *                                    where option is stored
+ *
+ * @return    M4NO_ERROR             there is no error
+ * @return    M4ERR_BAD_CONTEXT      provided context is not a valid one
+ * @return    M4ERR_PARAMETER        at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID    when the option ID is not a valid one
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_setOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
+{
+    M4READER_PCM_Context* pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),     M4ERR_PARAMETER,
+         "M4READER_PCM_setOption: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+         "M4READER_PCM_setOption: invalid value pointer");
+
+    switch(optionId)
+    {
+    case M4READER_kOptionID_SetOsaFileReaderFctsPtr :
+        {
+            pC->m_pOsaFileReaderFcts = (M4OSA_FileReadPointer*)pValue;
+        }
+        break;
+    default :
+        {
+            err = M4ERR_PARAMETER;
+        }
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Retrieves the an option value from the reader, given an option ID.
+ * @note    this function follows the set/get option mechanism described in OSAL 3.0
+ *          it allows the caller to retrieve a property value:
+ *
+ * @param   context:  (IN) context of the network reader
+ * @param   optionId: (IN) option identificator whose option value is to be retrieved.
+ * @param   pValue:  (OUT) option value retrieved.
+ *
+ * @return  M4NO_ERROR:          there is no error
+ * @return  M4ERR_PARAMETER:     at least one parameter is not properly set (in DEBUG only)
+ * @return  M4ERR_BAD_OPTION_ID: the required option identificator is unknown
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getOption(M4OSA_Context context, M4OSA_OptionID optionId, void* pValue)
+{
+    M4READER_PCM_Context*   pContext = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err      = M4NO_ERROR;
+
+    /* no check of context at this level because some option does not need it */
+    M4OSA_DEBUG_IF1((pValue == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getOption: invalid pointer on value");
+
+    switch (optionId)
+    {
+    case M4READER_kOptionID_Duration:
+        *((M4OSA_UInt64*)pValue) = pContext->m_pAudioStream->m_duration;
+        break;
+
+    case M4READER_kOptionID_Version:
+        err = M4PCMR_getVersion((M4_VersionInfo*)pValue);
+        break;
+
+    case M4READER_kOptionID_Copyright:
+        return M4ERR_NOT_IMPLEMENTED;
+        break;
+
+    case M4READER_kOptionID_Bitrate:
+        {
+            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+            if (M4OSA_NULL != pContext->m_pAudioStream)
+            {
+                *pBitrate = pContext->m_pAudioStream->m_averageBitRate;
+            }
+            else
+            {
+                pBitrate = 0;
+                err = M4ERR_PARAMETER;
+            }
+        }
+        break;
+
+    default:
+        err = M4ERR_BAD_OPTION_ID;
+        M4OSA_TRACE1_0("M4READER_PCM_getOption: unsupported optionId");
+        break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Get the next stream found in the media
+ * @note
+ *
+ * @param   context:        (IN)  context of the network reader
+ * @param   pMediaFamily:   (OUT) pointer to a user allocated M4READER_MediaFamily that will
+ *                                be filled
+ * @param   pStreamHandler: (OUT) pointer to a stream handler that will be allocated and filled
+ *                                with the found stream description
+ *
+ * @return  M4NO_ERROR:       there is no error.
+ * @return  M4ERR_PARAMETER:  at least one parameter is not properly set (in DEBUG only)
+ * @return  M4WAR_NO_MORE_STREAM    no more available stream in the media (all streams found)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getNextStream(M4OSA_Context context, M4READER_MediaFamily *pMediaFamily,
+                                     M4_StreamHandler **pStreamHandler)
+{
+    M4READER_PCM_Context*   pC=(M4READER_PCM_Context*)context;
+    M4OSA_ERR               err;
+/*    M4_StreamHandler*       pStreamHandler = M4OSA_NULL;*/
+    M4SYS_StreamDescription streamDesc;
+    M4_AudioStreamHandler*  pAudioStreamHandler;
+    M4OSA_Double            fDuration;
+    M4SYS_StreamID          streamIdArray[2];
+    M4PCMC_DecoderSpecificInfo* pDsi;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid context");
+    M4OSA_DEBUG_IF1((pMediaFamily == 0),   M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid pointer to MediaFamily");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid pointer to StreamHandler");
+
+    err = M4PCMR_getNextStream( pC->m_coreContext, &streamDesc);
+    if (err == M4WAR_NO_MORE_STREAM)
+    {
+        streamIdArray[0] = 0;
+        streamIdArray[1] = 0;
+        err = M4PCMR_startReading(pC->m_coreContext, streamIdArray); /*to put in open function*/
+
+        return M4WAR_NO_MORE_STREAM;
+    }
+    else if (M4NO_ERROR != err)
+    {
+        return err; /*also return M4WAR_NO_MORE_STREAM*/
+    }
+
+    switch (streamDesc.streamType)
+    {
+        case M4SYS_kAudioUnknown:
+        case M4SYS_kPCM_16bitsS:
+        case M4SYS_kPCM_16bitsU:
+        case M4SYS_kPCM_8bitsU:
+            *pMediaFamily = M4READER_kMediaFamilyAudio;
+            M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found audio stream");
+            break;
+        default:
+            *pMediaFamily = M4READER_kMediaFamilyUnknown;
+            M4OSA_TRACE2_0("M4READER_PCM_getNextStream: found UNKNOWN stream");
+            return M4NO_ERROR;
+    }
+
+    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_malloc(sizeof(M4_AudioStreamHandler),
+         M4READER_WAV, (M4OSA_Char *)"M4_AudioStreamHandler");
+    if (pAudioStreamHandler == M4OSA_NULL)
+    {
+        return M4ERR_ALLOC;
+    }
+    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+    pC->m_pAudioStream = (M4_StreamHandler*)(pAudioStreamHandler);
+
+    pDsi = (M4PCMC_DecoderSpecificInfo*)(streamDesc.decoderSpecificInfo);
+    M4OSA_DEBUG_IF1((pDsi == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getNextStream: invalid decoder specific info in stream");
+
+    pAudioStreamHandler->m_samplingFrequency = pDsi->SampleFrequency;
+    pAudioStreamHandler->m_byteSampleSize    = (M4OSA_UInt32)(pDsi->BitsPerSample/8);
+    /* m_byteFrameLength is badly named: it is not in bytes but in samples number */
+    if(pAudioStreamHandler->m_samplingFrequency == 8000)
+    {
+        /* AMR case */
+        pAudioStreamHandler->m_byteFrameLength   =
+             (((streamDesc.averageBitrate/8)/50)/pDsi->nbChannels)\
+                /pAudioStreamHandler->m_byteSampleSize;/*/50 to get around 20 ms of audio*/
+    }
+    else
+    {
+        /* AAC Case */
+        pAudioStreamHandler->m_byteFrameLength =
+             (M4OSA_UInt32)(((streamDesc.averageBitrate/8)/15.625)/pDsi->nbChannels)\
+                /pAudioStreamHandler->m_byteSampleSize;
+    }
+
+    pAudioStreamHandler->m_nbChannels        = pDsi->nbChannels;
+
+    M4OSA_TIME_TO_MS( fDuration, streamDesc.duration, streamDesc.timeScale);
+    pC->m_pAudioStream->m_duration                = (M4OSA_Int64)fDuration;
+    pC->m_pAudioStream->m_pDecoderSpecificInfo    = (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+    pC->m_pAudioStream->m_decoderSpecificInfoSize = streamDesc.decoderSpecificInfoSize;
+    pC->m_pAudioStream->m_streamId                = streamDesc.streamID;
+    pC->m_pAudioStream->m_pUserData               =
+        (void*)streamDesc.timeScale; /*trick to change*/
+    pC->m_pAudioStream->m_averageBitRate          = streamDesc.averageBitrate;
+    pC->m_pAudioStream->m_maxAUSize               =
+         pAudioStreamHandler->m_byteFrameLength*pAudioStreamHandler->m_byteSampleSize\
+            *pAudioStreamHandler->m_nbChannels;
+    pC->m_pAudioStream->m_streamType              = M4DA_StreamTypeAudioPcm;
+
+    *pStreamHandler = pC->m_pAudioStream;
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   fill the access unit structure with initialization values
+ * @note
+ *
+ * @param   context:        (IN) context of the network reader
+ * @param   pStreamHandler: (IN) pointer to the stream handler to which the access unit will
+ *                                 be associated
+ * @param   pAccessUnit:    (IN) pointer to the access unit(allocated by the caller) to initialize
+ * @return  M4NO_ERROR:       there is no error.
+ * @return  M4ERR_PARAMETER:  at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_fillAuStruct(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                     M4_AccessUnit *pAccessUnit)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4SYS_AccessUnit*       pAu;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_PCM_fillAuStruct: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_fillAuStruct: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_PCM_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_fillAuStruct: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    pAu->dataAddress = M4OSA_NULL;
+    pAu->size        = 0;
+    pAu->CTS         = 0;
+    pAu->DTS         = 0;
+    pAu->attribute   = 0;
+    pAu->nbFrag      = 0;
+
+    pAccessUnit->m_size         = 0;
+    pAccessUnit->m_CTS          = 0;
+    pAccessUnit->m_DTS          = 0;
+    pAccessUnit->m_attribute    = 0;
+    pAccessUnit->m_dataAddress  = M4OSA_NULL;/*pBuffer;*/
+    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
+    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
+    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   reset the stream, that is: seek it to beginning and make it ready to be read
+ * @note
+ * @param   context:        (IN) context of the network reader
+ * @param   pStreamHandler: (IN) The stream handler of the stream to reset
+ * @return  M4NO_ERROR: there is no error.
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_reset(M4OSA_Context context, M4_StreamHandler *pStreamHandler)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64 = 0;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_reset: invalid pointer to M4_StreamHandler");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_reset: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pAu->dataAddress != M4OSA_NULL)
+    {
+        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_PCM_reset: error when freeing access unit");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    pAu->CTS = 0;
+    pAu->DTS = 0;
+
+    /* This call is needed only when replay during playback */
+    err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Get the next access unit of the specified stream
+ * @note
+ * @param   context:        (IN)        Context of the reader
+ * @param   pStreamHandler  (IN)        The stream handler of the stream to make jump
+ * @param   pAccessUnit     (IN/OUT)    Pointer to an access unit to fill with read data
+ *                                      (the au structure is allocated by the user, and must be
+ *                                        initialized
+ *                                      by calling M4READER_fillAuStruct_fct after creation)
+ * @return  M4NO_ERROR                  there is no error
+ * @return  M4ERR_BAD_CONTEXT           provided context is not a valid one
+ * @return  M4ERR_PARAMETER             at least one parameter is not properly set
+ * @returns M4ERR_ALLOC                 memory allocation failed
+ * @returns M4ERR_BAD_STREAM_ID         at least one of the stream Id. does not exist.
+ * @returns M4WAR_NO_DATA_YET           there is no enough data on the stream for new access unit
+ * @returns M4WAR_NO_MORE_AU            there are no more access unit in the stream (end of stream)
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_getNextAu(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+                                 M4_AccessUnit *pAccessUnit)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4OSA_ERR               err = M4NO_ERROR;
+    M4SYS_AccessUnit*       pAu;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+         "M4READER_PCM_getNextAu: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getNextAu: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+         "M4READER_PCM_getNextAu: invalid pointer to M4_AccessUnit");
+
+    /* keep trace of the allocated buffers in AU to be able to free them at destroy()
+       but be aware that system is risky and would need upgrade if more than
+       one video and one audio AU is needed */
+    if (pStreamHandler == (M4_StreamHandler*)pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_getNextAu: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pAu->dataAddress != M4OSA_NULL)
+    {
+        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_PCM_getNextAu: error when freeing access unit");
+            return err;
+        }
+    }
+
+    pAu->nbFrag = 0;
+    err = M4PCMR_nextAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+
+    if (err == M4NO_ERROR)
+    {
+        pAccessUnit->m_dataAddress = (M4OSA_MemAddr8)pAu->dataAddress;
+        pAccessUnit->m_size = pAu->size;
+        pAccessUnit->m_CTS  = (M4OSA_Double)pAu->CTS;
+        pAccessUnit->m_DTS  = (M4OSA_Double)pAu->DTS;
+        pAccessUnit->m_attribute = pAu->attribute;
+    }
+    else
+    {
+        pAccessUnit->m_size=0;
+    }
+
+    return err;
+}
+
+
+/**
+ ************************************************************************
+ * @brief   jump into the stream at the specified time
+ * @note
+ * @param   context:        (IN)     Context of the reader
+ * @param   pStreamHandler  (IN)     the stream handler of the stream to make jump
+ * @param   pTime           (IN/OUT) IN:  the time to jump to (in ms)
+ *                                   OUT: the time to which the stream really jumped
+ *                                        But in this reader, we do not modify the time
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_BAD_CONTEXT       provided context is not a valid one
+ * @return  M4ERR_PARAMETER         at least one parameter is not properly set
+ * @return  M4ERR_ALLOC             there is no more memory available
+ * @return  M4ERR_BAD_STREAM_ID     the streamID does not exist
+ ************************************************************************
+ */
+M4OSA_ERR M4READER_PCM_jump(M4OSA_Context context, M4_StreamHandler *pStreamHandler,
+     M4OSA_Int32* pTime)
+{
+    M4READER_PCM_Context*   pC = (M4READER_PCM_Context*)context;
+    M4SYS_StreamID          streamIdArray[2];
+    M4OSA_ERR               err;
+    M4SYS_AccessUnit*       pAu;
+    M4OSA_Time                time64;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_jump: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER, "M4READER_PCM_jump: invalid time pointer");
+
+    time64 = (M4OSA_Time)*pTime;
+
+    if (pStreamHandler == pC->m_pAudioStream)
+    {
+        pAu = &pC->m_audioAu;
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4READER_PCM_jump: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pAu->dataAddress != M4OSA_NULL)
+    {
+        err = M4PCMR_freeAU(pC->m_coreContext, pStreamHandler->m_streamId, pAu);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_0("M4READER_PCM_jump: Error when freeing access unit");
+            return err;
+        }
+        pAu->dataAddress = M4OSA_NULL;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    pAu->CTS = time64;
+    pAu->DTS = time64;
+
+    err = M4PCMR_seek(pC->m_coreContext, streamIdArray, time64, M4SYS_kBeginning, &time64);
+
+    *pTime = (M4OSA_Int32)time64;
+
+    return err;
+}
+
+/**
+ *************************************************************************
+ * @brief Retrieves the generic interfaces implemented by the reader
+ *
+ * @param pMediaType          : Pointer on a M4READER_MediaType (allocated by the caller)
+ *                              that will be filled with the media type supported by this reader
+ * @param pRdrGlobalInterface : Address of a pointer that will be set to the global interface
+ *                              implemented by this reader. The interface is a structure allocated
+ *                              by the function and must be un-allocated by the caller.
+ * @param pRdrDataInterface   : Address of a pointer that will be set to the data interface
+ *                              implemented by this reader. The interface is a structure allocated
+ *                              by the function and must be un-allocated by the caller.
+ *
+ * @returns : M4NO_ERROR     if OK
+ *            ERR_ALLOC      if an allocation failed
+ *            ERR_PARAMETER  at least one parameter is not properly set (in DEBUG only)
+ *************************************************************************
+ */
+M4OSA_ERR   M4READER_PCM_getInterfaces(M4READER_MediaType *pMediaType,
+                                       M4READER_GlobalInterface **pRdrGlobalInterface,
+                                       M4READER_DataInterface **pRdrDataInterface)
+/************************************************************************/
+{
+    M4OSA_DEBUG_IF1((pMediaType == 0),          M4ERR_PARAMETER,
+         "M4READER_PCM_getInterfaces: invalid pointer to MediaType passed");
+    M4OSA_DEBUG_IF1((pRdrGlobalInterface == 0), M4ERR_PARAMETER,
+         "M4READER_PCM_getInterfaces: invalid pointer to M4READER_GlobalInterface");
+    M4OSA_DEBUG_IF1((pRdrDataInterface == 0),   M4ERR_PARAMETER,
+         "M4READER_PCM_getInterfaces: invalid pointer to M4READER_DataInterface");
+
+    *pRdrGlobalInterface =
+         (M4READER_GlobalInterface*)M4OSA_malloc( sizeof(M4READER_GlobalInterface), M4READER_WAV,
+             (M4OSA_Char *)"M4READER_PCM GlobalInterface");
+    if (M4OSA_NULL == *pRdrGlobalInterface)
+    {
+        return M4ERR_ALLOC;
+    }
+    *pRdrDataInterface =
+         (M4READER_DataInterface*)M4OSA_malloc( sizeof(M4READER_DataInterface), M4READER_WAV,
+            (M4OSA_Char *) "M4READER_PCM DataInterface");
+    if (M4OSA_NULL == *pRdrDataInterface)
+    {
+        M4OSA_free((M4OSA_MemAddr32)*pRdrGlobalInterface);
+        return M4ERR_ALLOC;
+    }
+
+    *pMediaType = M4READER_kMediaTypePCM;
+
+    (*pRdrGlobalInterface)->m_pFctCreate           = M4READER_PCM_create;
+    (*pRdrGlobalInterface)->m_pFctDestroy          = M4READER_PCM_destroy;
+    (*pRdrGlobalInterface)->m_pFctOpen             = M4READER_PCM_open;
+    (*pRdrGlobalInterface)->m_pFctClose            = M4READER_PCM_close;
+    (*pRdrGlobalInterface)->m_pFctStart            = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctStop             = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctGetOption        = M4READER_PCM_getOption;
+    (*pRdrGlobalInterface)->m_pFctSetOption        = M4READER_PCM_setOption;
+    (*pRdrGlobalInterface)->m_pFctGetNextStream    = M4READER_PCM_getNextStream;
+    (*pRdrGlobalInterface)->m_pFctFillAuStruct     = M4READER_PCM_fillAuStruct;
+    (*pRdrGlobalInterface)->m_pFctJump             = M4READER_PCM_jump;
+    (*pRdrGlobalInterface)->m_pFctReset            = M4READER_PCM_reset;
+    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime   = M4OSA_NULL; /*all AUs are RAP*/
+
+    (*pRdrDataInterface)->m_pFctGetNextAu          = M4READER_PCM_getNextAu;
+
+    (*pRdrDataInterface)->m_readerContext = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+
diff --git a/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c b/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
new file mode 100755
index 0000000..bc75488
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_EXTERNAL_BitstreamParser.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#include "M4VD_EXTERNAL_Interface.h"
+#include "M4VD_EXTERNAL_Internal.h"
+#include "M4VD_Tools.h"
+
+/**
+ ************************************************************************
+ * @file   M4VD_EXTERNAL_BitstreamParser.c
+ * @brief
+ * @note   This file implements external Bitstream parser
+ ************************************************************************
+ */
+
+M4OSA_UInt32 M4VD_EXTERNAL_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+     M4OSA_UInt32 nb_bits)
+{
+#if 0
+    M4OSA_UInt32    code;
+    M4OSA_UInt32    i;
+
+    code = 0;
+    for (i = 0; i < nb_bits; i++)
+    {
+        if (parsingCtxt->stream_index == 8)
+        {
+            M4OSA_memcpy( (M4OSA_MemAddr8)&(parsingCtxt->stream_byte), parsingCtxt->in,
+                 sizeof(unsigned char));
+            parsingCtxt->in++;
+            //fread(&stream_byte, sizeof(unsigned char),1,in);
+            parsingCtxt->stream_index = 0;
+        }
+        code = (code << 1);
+        code |= ((parsingCtxt->stream_byte & 0x80) >> 7);
+
+        parsingCtxt->stream_byte = (parsingCtxt->stream_byte << 1);
+        parsingCtxt->stream_index++;
+    }
+
+    return code;
+#endif
+        return(M4VD_Tools_GetBitsFromMemory(parsingCtxt,nb_bits));
+}
+
+M4OSA_ERR M4VD_EXTERNAL_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+                                                 M4OSA_MemAddr32 dest_bits,
+                                                 M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
+{
+#if 0
+    M4OSA_UInt8 i,j;
+    M4OSA_UInt32 temp_dest = 0, mask = 0, temp = 1;
+    M4OSA_UInt32 input = bitsToWrite;
+
+    input = (input << (32 - nb_bits - offset));
+
+    /* Put destination buffer to 0 */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                mask |= (temp << ((7*(j+1))-i+j));
+            }
+        }
+    }
+    mask = ~mask;
+    *dest_bits &= mask;
+
+    /* Parse input bits, and fill output buffer */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                temp = ((input & (0x80000000 >> offset)) >> (31-offset));
+                //*dest_bits |= (temp << (31 - i));
+                *dest_bits |= (temp << ((7*(j+1))-i+j));
+                input = (input << 1);
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+#endif
+        return (M4VD_Tools_WriteBitsToMemory( bitsToWrite,dest_bits,
+                                                offset,  nb_bits));
+}
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseVideoDSI(M4OSA_UInt8* pVol, M4OSA_Int32 aVolSize,
+                                             M4DECODER_MPEG4_DecoderConfigInfo* pDci,
+                                             M4DECODER_VideoSize* pVideoSize)
+{
+    M4VS_Bitstream_ctxt parsingCtxt;
+    M4OSA_UInt32 code, j;
+    M4OSA_MemAddr8 start;
+    M4OSA_UInt8 i;
+    M4OSA_UInt32 time_incr_length;
+    M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
+
+    /* Parsing variables */
+    M4OSA_UInt8 video_object_layer_shape = 0;
+    M4OSA_UInt8 sprite_enable = 0;
+    M4OSA_UInt8 reduced_resolution_vop_enable = 0;
+    M4OSA_UInt8 scalability = 0;
+    M4OSA_UInt8 enhancement_type = 0;
+    M4OSA_UInt8 complexity_estimation_disable = 0;
+    M4OSA_UInt8 interlaced = 0;
+    M4OSA_UInt8 sprite_warping_points = 0;
+    M4OSA_UInt8 sprite_brightness_change = 0;
+    M4OSA_UInt8 quant_precision = 0;
+
+    /* Fill the structure with default parameters */
+    pVideoSize->m_uiWidth              = 0;
+    pVideoSize->m_uiHeight             = 0;
+
+    pDci->uiTimeScale          = 0;
+    pDci->uiProfile            = 0;
+    pDci->uiUseOfResynchMarker = 0;
+    pDci->bDataPartition       = M4OSA_FALSE;
+    pDci->bUseOfRVLC           = M4OSA_FALSE;
+
+    /* Reset the bitstream context */
+    parsingCtxt.stream_byte = 0;
+    parsingCtxt.stream_index = 8;
+    parsingCtxt.in = (M4OSA_Int8 *)pVol;
+
+    start = (M4OSA_Int8 *)pVol;
+
+    /* Start parsing */
+    while (parsingCtxt.in - start < aVolSize)
+    {
+        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+        if (code == 0)
+        {
+            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+            if (code == 0)
+            {
+                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                if (code == 1)
+                {
+                    /* start code found */
+                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+
+                    /* ----- 0x20..0x2F : video_object_layer_start_code ----- */
+
+                    if ((code > 0x1F) && (code < 0x30))
+                    {
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* random accessible vol */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 8);/* video object type indication */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* is object layer identifier */
+                        if (code == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     4); /* video object layer verid */
+                            vol_verid = (M4OSA_UInt8)code;
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     3); /* video object layer priority */
+                        }
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 4);/* aspect ratio */
+                        if (code == 15)
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     16); /* par_width and par_height (8+8) */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* vol control parameters */
+                        if (code == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     3);/* chroma format + low delay (3+1) */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* vbv parameters */
+                            if (code == 1)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         32);/* first and latter half bitrate + 2 marker bits
+                                            (15 + 1 + 15 + 1) */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         31);/* first and latter half vbv buffer size + first
+                                          half vbv occupancy + marker bits (15+1+3+11+1)*/
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         16);/* first half vbv occupancy + marker bits (15+1)*/
+                            }
+                        }
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 2); /* video object layer shape */
+                        /* Need to save it for vop parsing */
+                        video_object_layer_shape = (M4OSA_UInt8)code;
+
+                        if (code != 0) return 0; /* only rectangular case supported */
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1); /* Marker bit */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 16); /* VOP time increment resolution */
+                        pDci->uiTimeScale = code;
+
+                        /* Computes time increment length */
+                        j    = code - 1;
+                        for (i = 0; (i < 32) && (j != 0); j >>=1)
+                        {
+                            i++;
+                        }
+                        time_incr_length = (i == 0) ? 1 : i;
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* Marker bit */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* Fixed VOP rate */
+                        if (code == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     time_incr_length);/* Fixed VOP time increment */
+                        }
+
+                        if(video_object_layer_shape != 1) /* 1 = Binary */
+                        {
+                            if(video_object_layer_shape == 0) /* 0 = rectangular */
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* Width */
+                                pVideoSize->m_uiWidth = code;
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* Height */
+                                pVideoSize->m_uiHeight = code;
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                            }
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* interlaced */
+                        interlaced = (M4OSA_UInt8)code;
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                 1);/* OBMC disable */
+
+                        if(vol_verid == 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* sprite enable */
+                            sprite_enable = (M4OSA_UInt8)code;
+                        }
+                        else
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     2);/* sprite enable */
+                            sprite_enable = (M4OSA_UInt8)code;
+                        }
+                        if ((sprite_enable == 1) || (sprite_enable == 2))
+                        /* Sprite static = 1 and Sprite GMC = 2 */
+                        {
+                            if (sprite_enable != 2)
+                            {
+
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite width */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite height */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite l coordinate */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         13);/* sprite top coordinate */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* Marker bit */
+                            }
+
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     6);/* sprite warping points */
+                            sprite_warping_points = (M4OSA_UInt8)code;
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     2);/* sprite warping accuracy */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* sprite brightness change */
+                            sprite_brightness_change = (M4OSA_UInt8)code;
+                            if (sprite_enable != 2)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                             1);/* low latency sprite enable */
+                            }
+                        }
+                        if ((vol_verid != 1) && (video_object_layer_shape != 0))
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* sadct disable */
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1); /* not 8 bits */
+                        if (code)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     4);/* quant precision */
+                            quant_precision = (M4OSA_UInt8)code;
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         4);/* bits per pixel */
+                        }
+
+                        /* greyscale not supported */
+                        if(video_object_layer_shape == 3)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     3); /* nogray quant update + composition method +
+                                            linear composition */
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* quant type */
+                        if (code)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* load intra quant mat */
+                            if (code)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
+                                 i    = 1;
+                                while (i < 64)
+                                {
+                                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                                    if (code == 0)
+                                        break;
+                                    i++;
+                                }
+                            }
+
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                         1);/* load non intra quant mat */
+                            if (code)
+                            {
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);/* */
+                                 i    = 1;
+                                while (i < 64)
+                                {
+                                    code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                                    if (code == 0)
+                                        break;
+                                    i++;
+                                }
+                            }
+                        }
+
+                        if (vol_verid != 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* quarter sample */
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* complexity estimation disable */
+                        complexity_estimation_disable = (M4OSA_UInt8)code;
+                        if (!code)
+                        {
+                            //return M4ERR_NOT_IMPLEMENTED;
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* resync marker disable */
+                        pDci->uiUseOfResynchMarker = (code) ? 0 : 1;
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt,
+                                     1);/* data partitionned */
+                        pDci->bDataPartition = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+                        if (code)
+                        {
+                            /* reversible VLC */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            pDci->bUseOfRVLC = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+                        }
+
+                        if (vol_verid != 1)
+                        {
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* newpred */
+                            if (code)
+                            {
+                                //return M4ERR_PARAMETER;
+                            }
+                            /* reduced resolution vop enable */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            reduced_resolution_vop_enable = (M4OSA_UInt8)code;
+                        }
+
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);/* scalability */
+                        scalability = (M4OSA_UInt8)code;
+                        if (code)
+                        {
+                            /* hierarchy type */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            b_hierarchy_type = (M4OSA_UInt8)code;
+                            /* ref layer id */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
+                            /* ref sampling direct */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            /* hor sampling factor N */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* hor sampling factor M */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* vert sampling factor N */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* vert sampling factor M */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            /* enhancement type */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                            enhancement_type = (M4OSA_UInt8)code;
+                            if ((!b_hierarchy_type) && (video_object_layer_shape == 1))
+                            {
+                                /* use ref shape */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                                /* use ref texture */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                                /* shape hor sampling factor N */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape hor sampling factor M */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape vert sampling factor N */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                                /* shape vert sampling factor M */
+                                code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 5);
+                            }
+                        }
+                        break;
+                    }
+
+                    /* ----- 0xB0 : visual_object_sequence_start_code ----- */
+
+                    else if(code == 0xB0)
+                    {
+                        /* profile_and_level_indication */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 8);
+                        pDci->uiProfile = (M4OSA_UInt8)code;
+                    }
+
+                    /* ----- 0xB5 : visual_object_start_code ----- */
+
+                    else if(code == 0xB5)
+                    {
+                        /* is object layer identifier */
+                        code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 1);
+                        if (code == 1)
+                        {
+                             /* visual object verid */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 4);
+                            vol_verid = (M4OSA_UInt8)code;
+                             /* visual object layer priority */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 3);
+                        }
+                        else
+                        {
+                             /* Realign on byte */
+                            code = M4VD_EXTERNAL_GetBitsFromMemory(&parsingCtxt, 7);
+                            vol_verid = 1;
+                        }
+                    }
+
+                    /* ----- end ----- */
+                }
+                else
+                {
+                    if ((code >> 2) == 0x20)
+                    {
+                        /* H263 ...-> wrong*/
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4DECODER_EXTERNAL_ParseAVCDSI(M4OSA_UInt8* pDSI, M4OSA_Int32 DSISize,
+                                         M4DECODER_AVCProfileLevel *profile)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_Bool NALSPS_and_Profile0Found = M4OSA_FALSE;
+    M4OSA_UInt16 index;
+    M4OSA_Bool    constraintSet3;
+
+    /* check for baseline profile */
+    for(index = 0; index < (DSISize-1); index++)
+    {
+        if(((pDSI[index] & 0x1f) == 0x07) && (pDSI[index+1] == 0x42))
+        {
+            NALSPS_and_Profile0Found = M4OSA_TRUE;
+            break;
+        }
+    }
+    if(M4OSA_FALSE == NALSPS_and_Profile0Found)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_ParseAVCDSI: index bad = %d", index);
+        *profile = M4DECODER_AVC_kProfile_and_Level_Out_Of_Range;
+    }
+    else
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_ParseAVCDSI: index = %d", index);
+        constraintSet3 = (pDSI[index+2] & 0x10);
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_ParseAVCDSI: level = %d", pDSI[index+3]);
+        switch(pDSI[index+3])
+        {
+        case 10:
+            *profile = M4DECODER_AVC_kProfile_0_Level_1;
+            break;
+        case 11:
+            if(constraintSet3)
+                *profile = M4DECODER_AVC_kProfile_0_Level_1b;
+            else
+                *profile = M4DECODER_AVC_kProfile_0_Level_1_1;
+            break;
+        case 12:
+            *profile = M4DECODER_AVC_kProfile_0_Level_1_2;
+            break;
+        case 13:
+            *profile = M4DECODER_AVC_kProfile_0_Level_1_3;
+            break;
+        case 20:
+            *profile = M4DECODER_AVC_kProfile_0_Level_2;
+            break;
+        case 21:
+            *profile = M4DECODER_AVC_kProfile_0_Level_2_1;
+            break;
+        case 22:
+            *profile = M4DECODER_AVC_kProfile_0_Level_2_2;
+            break;
+        case 30:
+            *profile = M4DECODER_AVC_kProfile_0_Level_3;
+            break;
+        case 31:
+            *profile = M4DECODER_AVC_kProfile_0_Level_3_1;
+            break;
+        case 32:
+            *profile = M4DECODER_AVC_kProfile_0_Level_3_2;
+            break;
+        case 40:
+            *profile = M4DECODER_AVC_kProfile_0_Level_4;
+            break;
+        case 41:
+            *profile = M4DECODER_AVC_kProfile_0_Level_4_1;
+            break;
+        case 42:
+            *profile = M4DECODER_AVC_kProfile_0_Level_4_2;
+            break;
+        case 50:
+            *profile = M4DECODER_AVC_kProfile_0_Level_5;
+            break;
+        case 51:
+            *profile = M4DECODER_AVC_kProfile_0_Level_5_1;
+            break;
+        default:
+            *profile = M4DECODER_AVC_kProfile_and_Level_Out_Of_Range;
+        }
+    }
+    return err;
+}
+
diff --git a/libvideoeditor/vss/src/M4VD_EXTERNAL_Interface.c b/libvideoeditor/vss/src/M4VD_EXTERNAL_Interface.c
new file mode 100755
index 0000000..009f495
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_EXTERNAL_Interface.c
@@ -0,0 +1,1155 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ ******************************************************************************
+ * @file    M4VD_EXTERNAL_Interface.c
+ * @brief
+ * @note
+ ******************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+#include "M4OSA_Semaphore.h"
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+#include "M4VD_EXTERNAL_Interface.h"
+#include "M4VD_EXTERNAL_Internal.h"
+
+/* Warning: the decode thread has finished decoding all the frames */
+#define M4WAR_DECODE_FINISHED                                M4OSA_ERR_CREATE(M4_WAR,\
+                                                                 M4DECODER_EXTERNAL, 0x0001)
+/* Warning: the render thread has finished rendering the frame */
+#define M4WAR_RENDER_FINISHED                                M4OSA_ERR_CREATE(M4_WAR,\
+                                                                 M4DECODER_EXTERNAL, 0x0002)
+
+#define M4ERR_CHECK(x) if(M4NO_ERROR!=x) return x;
+#define M4ERR_EXIT(x) do { err = x; goto exit_with_error; } while(0)
+
+
+/* ----- shell API ----- */
+
+static M4OSA_ERR M4DECODER_EXTERNAL_create(M4OSA_Context *pVS_Context,
+                                             M4_StreamHandler *pStreamHandler,
+                                             M4READER_DataInterface *pReaderDataInterface,
+                                             M4_AccessUnit* pAccessUnit, M4OSA_Void* pUserData);
+static M4OSA_ERR M4DECODER_EXTERNAL_destroy(M4OSA_Context pVS_Context);
+static M4OSA_ERR M4DECODER_EXTERNAL_getOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                                M4OSA_DataOption* pValue);
+static M4OSA_ERR M4DECODER_EXTERNAL_setOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                                 M4OSA_DataOption pValue);
+static M4OSA_ERR M4DECODER_EXTERNAL_decode(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                             M4OSA_Bool bJump);
+static M4OSA_ERR M4DECODER_EXTERNAL_render(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                             M4VIFI_ImagePlane* pOutputPlane,
+                                             M4OSA_Bool bForceRender);
+
+/* ----- Signaling functions ----- */
+
+static M4OSA_ERR M4DECODER_EXTERNAL_signalDecoderOver(M4OSA_Context pVS_Context,
+                                                        M4_MediaTime aTime, M4OSA_ERR aUserError);
+static M4OSA_ERR M4DECODER_EXTERNAL_signalRenderOver(M4OSA_Context pVS_Context,
+                                                     M4_MediaTime aTime, M4OSA_ERR aUserError);
+
+/* ----- static internal functions ----- */
+
+static M4OSA_ERR M4DECODER_EXTERNAL_Init(void** pVS_Context, M4VD_Interface* p_HWInterface,
+                                         M4_StreamHandler *pStreamHandler);
+static M4OSA_ERR M4DECODER_EXTERNAL_StreamDescriptionInit(M4VD_StreamInfo** ppStreamInfo,
+                                                             M4_StreamHandler *pStreamHandler);
+static M4OSA_ERR M4DECODER_EXTERNAL_SetUpReadInput(void* pVS_Context,
+                                                     M4READER_DataInterface* pReader,
+                                                     M4_AccessUnit* pAccessUnit);
+static M4OSA_ERR M4DECODER_EXTERNAL_GetNextAu(M4VS_VideoDecoder_Context* pStreamContext,
+                                                 M4VD_VideoBuffer *nextBuffer,
+                                                 M4_MediaTime* nextFrameTime);
+static M4OSA_ERR M4DECODER_EXTERNAL_SynchronousDecode(M4OSA_Context pVS_Context);
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousDecode(M4OSA_Context pVS_Context);
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousRender(M4OSA_Context pVS_Context);
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                                                       |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief   Retrieves the interface implemented by the decoder
+ * @note
+ *
+ * @param   pDecoderInterface: (OUT) address of a pointer that will be set to the interface
+ *                                   implemented by this decoder. The interface is a structure
+ *                                   allocated by the function and must be unallocated by the
+ *                                   caller.
+ *
+ * @returns : M4NO_ERROR  if OK
+ *            M4ERR_ALLOC if allocation failed
+ ************************************************************************
+ */
+M4OSA_ERR M4DECODER_EXTERNAL_getInterface(M4DECODER_VideoInterface **pDecoderInterface)
+{
+    /* Allocates memory for the decoder shell pointer to function */
+    *pDecoderInterface =
+         (M4DECODER_VideoInterface*)M4OSA_malloc( sizeof(M4DECODER_VideoInterface),
+             M4DECODER_EXTERNAL, (M4OSA_Char *)"M4DECODER_VideoInterface" );
+    if (M4OSA_NULL == *pDecoderInterface)
+    {
+        M4OSA_TRACE1_0("M4DECODER_EXTERNAL_getInterface:\
+             unable to allocate M4DECODER_VideoInterface, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    (*pDecoderInterface)->m_pFctCreate    = M4DECODER_EXTERNAL_create;
+    (*pDecoderInterface)->m_pFctDestroy   = M4DECODER_EXTERNAL_destroy;
+    (*pDecoderInterface)->m_pFctGetOption = M4DECODER_EXTERNAL_getOption;
+    (*pDecoderInterface)->m_pFctSetOption = M4DECODER_EXTERNAL_setOption;
+    (*pDecoderInterface)->m_pFctDecode    = M4DECODER_EXTERNAL_decode;
+    (*pDecoderInterface)->m_pFctRender    = M4DECODER_EXTERNAL_render;
+
+    return M4NO_ERROR;
+}
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                           shell API                            |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief   Creates the external video decoder
+ * @note    This function creates internal video decoder context and
+ *          initializes it.
+ *
+ * @param   pVS_Context     (OUT)   Context of the video hw shell
+ * @param   pStreamHandler  (IN)    Pointer to a video stream description
+ * @param   pReaderDataInterface: (IN)  Pointer to the M4READER_DataInterface
+ *                                  structure that must be used by the
+ *                                  decoder to read data from the stream
+ * @param   pAccessUnit     (IN)    Pointer to an access unit (allocated
+ *                                  by the caller) where the decoded data
+ *                                  are stored
+ * @param   pExternalAPI    (IN)    Interface of the client video decoder
+ * @param   pUserData       (IN)    User data of the external video decoder
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_ALLOC             a memory allocation has failed
+ * @return  M4ERR_PARAMETER         at least one parameter is not properly set (in DEBUG only)
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_create(M4OSA_Context *pVS_Context,
+                                             M4_StreamHandler *pStreamHandler,
+                                             M4READER_DataInterface *pReaderDataInterface,
+                                             M4_AccessUnit* pAccessUnit, M4OSA_Void* pUserData)
+{
+    M4VD_VideoType videoDecoderKind;
+    M4VD_StreamInfo* pStreamInfo;
+    M4VD_OutputFormat outputFormat;
+
+    M4VS_VideoDecoder_Context* pStreamContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_create");
+
+    /* Video Shell Creation */
+    err = M4DECODER_EXTERNAL_Init(pVS_Context,
+         ((M4DECODER_EXTERNAL_UserDataType)pUserData)->externalFuncs, pStreamHandler);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4VD_EXTERNAL_Init RETURNS THE ERROR CODE = 0x%x", err);
+        return err;
+    }
+
+    err = M4DECODER_EXTERNAL_SetUpReadInput(*pVS_Context, pReaderDataInterface, pAccessUnit);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4VD_EXTERNAL_SetUpReadInput RETURNS THE ERROR CODE = 0x%x", err);
+        return err;
+    }
+
+    pStreamContext = (M4VS_VideoDecoder_Context*)(*pVS_Context);
+
+    /* Stream Description init */
+    err = M4DECODER_EXTERNAL_StreamDescriptionInit(&pStreamInfo, pStreamHandler);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4VD_EXTERNAL_StreamDescriptionInit RETURNS THE ERROR CODE = 0x%x", err);
+        return err;
+    }
+
+    pStreamContext->m_pStreamInfo = pStreamInfo;
+
+    /* HW context creation */
+    err = pStreamContext->m_VD_Interface->m_pFctInitVideoDecoder(&(pStreamContext->m_VD_Context),
+         &(pStreamContext->m_VD_SignalingInterface));
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create : m_pFctInitVideoDecoder() error 0x%x", err);
+        return err;
+    }
+
+    /* HW decoder creation */
+    switch(pStreamHandler->m_streamType)
+    {
+        case M4DA_StreamTypeVideoH263 :
+            videoDecoderKind = M4VD_kH263VideoDec;
+            break;
+
+        default :
+        case M4DA_StreamTypeVideoMpeg4 :
+            videoDecoderKind = M4VD_kMpeg4VideoDec;
+            break;
+    }
+
+    err = pStreamContext->m_VD_Interface->m_pFctOpenDecoder(pStreamContext->m_VD_Context,
+         videoDecoderKind, pStreamContext->m_pStreamInfo, &outputFormat,
+             ((M4DECODER_EXTERNAL_UserDataType)pUserData)->externalUserData);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create : m_pFctOpenDecoder() error 0x%x", err);
+        return err;
+    }
+
+    /* Parse the VOL header */
+    err = M4DECODER_EXTERNAL_ParseVideoDSI((M4OSA_UInt8 *)pStreamContext->m_pStreamInfo->\
+                                           decoderConfiguration.pBuffer,
+                                           pStreamContext->m_pStreamInfo->\
+                                           decoderConfiguration.aSize,
+                                           &pStreamContext->m_Dci, &pStreamContext->m_VideoSize);
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_create :\
+             M4DECODER_EXTERNAL_ParseVideoDSI() error 0x%x", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   destroy the instance of the decoder
+ * @note    after this call the context is invalid
+ *
+ * @param   pVS_Context:   (IN) Context of the decoder
+ *
+ * @return  M4NO_ERROR          There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_destroy(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_destroy");
+
+    if(M4OSA_NULL != pStreamContext)
+    {
+        /* Call external API destroy function */
+        pStreamContext->m_VD_Interface->m_pFctClose(pStreamContext->m_VD_Context);
+
+        /* Destroy context */
+        pStreamContext->m_VD_Interface->m_pFctCleanUp(pStreamContext->m_VD_Context);
+
+        if(M4OSA_NULL != pStreamContext->m_pStreamInfo)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pStreamContext->m_pStreamInfo);
+            pStreamContext->m_pStreamInfo = M4OSA_NULL;
+        }
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+        if (M4OSA_NULL != pStreamContext->m_SemSync)
+        {
+            M4OSA_semaphoreClose(pStreamContext->m_SemSync);
+        }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+        M4OSA_free((M4OSA_MemAddr32)pStreamContext);
+        pStreamContext = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Get an option value from the decoder
+ * @note    It allows the caller to retrieve a property value:
+ *          - the size (width x height) of the image
+ *          - the DSI properties
+ *
+ * @param   pVS_Context: (IN)       Context of the decoder
+ * @param   optionId:    (IN)       indicates the option to set
+ * @param   pValue:      (IN/OUT)   pointer to structure or value (allocated by user) where option
+ *                                    is stored
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         The context is invalid (in DEBUG only)
+ * @return  M4ERR_BAD_OPTION_ID     when the option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_getOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                             M4OSA_DataOption *pValue)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_getOption");
+
+    switch (optionId)
+    {
+        case M4DECODER_kOptionID_VideoSize:
+            *((M4DECODER_VideoSize*)pValue) = pStreamContext->m_VideoSize;
+            err = M4NO_ERROR;
+            break;
+
+        case M4DECODER_MPEG4_kOptionID_DecoderConfigInfo:
+            *((M4DECODER_MPEG4_DecoderConfigInfo*)pValue) = pStreamContext->m_Dci;
+            err = M4NO_ERROR;
+            break;
+
+        default:
+            err = pStreamContext->m_VD_Interface->m_pFctGetOption(pStreamContext->m_VD_Context,
+                     optionId, pValue);
+            break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   set en option value of the decoder
+ * @note    It allows the caller to set a property value:
+ *          - Nothing implemented at this time
+ *
+ * @param   pVS_Context: (IN)       Context of the external video decoder shell
+ * @param   optionId:    (IN)       Identifier indicating the option to set
+ * @param   pValue:      (IN)       Pointer to structure or value (allocated by user) where
+ *                                    option is stored
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_PARAMETER         The option parameter is invalid
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_setOption(M4OSA_Context pVS_Context, M4OSA_OptionID optionId,
+                                              M4OSA_DataOption pValue)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+    M4OSA_ERR err;
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_setOption");
+
+    switch (optionId)
+    {
+        case M4DECODER_kOptionID_OutputFilter:
+        {
+            M4DECODER_OutputFilter* pOutputFilter = (M4DECODER_OutputFilter*) pValue;
+            err =
+                pStreamContext->m_VD_Interface->m_pFctSetOutputFilter(pStreamContext->m_VD_Context,
+                            (M4VIFI_PlanConverterFunctionType*)pOutputFilter->m_pFilterFunction,
+                            pOutputFilter->m_pFilterUserData);
+        }
+        break;
+
+        case M4DECODER_kOptionID_DeblockingFilter:
+            err = M4NO_ERROR;
+        break;
+
+        default:
+            err = pStreamContext->m_VD_Interface->m_pFctSetOption(pStreamContext->m_VD_Context,
+                 optionId, pValue);
+        break;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Decode video Access Units up to a target time
+ * @note    Parse and decode the video until it can output a decoded image for which
+ *          the composition time is equal or greater to the passed targeted time
+ *          The data are read from the reader data interface passed to M4DECODER_EXTERNAL_create.
+ *          If threaded mode, waits until previous decoding is over,
+ *          and fill decoding parameters used by the decoding thread.
+ *
+ * @param   pVS_Context:(IN)        Context of the external video decoder shell
+ * @param   pTime:      (IN/OUT)    IN: Time to decode up to (in milli secondes)
+ *                                  OUT:Time of the last decoded frame (in ms)
+ * @param   bJump:      (IN)        0 if no jump occured just before this call
+ *                                  1 if a a jump has just been made
+ *
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_PARAMETER         at least one parameter is not properly set
+ * @return  M4WAR_NO_MORE_AU        there is no more access unit to decode (end of stream)
+ * @return  M4WAR_VIDEORENDERER_NO_NEW_FRAME    No frame to render
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_decode(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                             M4OSA_Bool bJump)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_2("M4DECODER_EXTERNAL_decode : up to %lf  bjump = 0x%x", *pTime, bJump);
+
+    pStreamContext->m_DecodeUpToCts = *pTime;
+    pStreamContext->m_bJump = bJump;
+    if (bJump)
+    {
+        pStreamContext->m_CurrentDecodeCts = -1.0;
+        pStreamContext->m_CurrentRenderCts = -1.0;
+    }
+
+    if(pStreamContext->m_DecodeUpToCts < pStreamContext->m_nextAUCts &&
+        pStreamContext->m_CurrentRenderCts > pStreamContext->m_DecodeUpToCts)
+    {
+        /* It means that we do not need to launch another predecode, as we will reuse
+             the previously decoded frame*/
+        /* a warning is returned to the service to warn it about that .*/
+        /* In that case, the service MUST NOT call render function, and must keep the
+             previous frame */
+        /* if necessary (i.e force render case)*/
+        M4OSA_TRACE2_0("No decode is needed, same frame reused");
+        return M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+    }
+
+    /* If render has not been called for frame n, it means that n+1 frame decoding has
+         not been launched
+    -> do not wait for its decoding completion ...*/
+    if(pStreamContext->m_bIsWaitNextDecode == M4OSA_TRUE)
+    {
+        /* wait for decode n+1 to complete */
+        //M4semvalue--;
+        //printf("Semaphore wait: %d\n", M4semvalue);
+        pStreamContext->m_bIsWaitNextDecode = M4OSA_FALSE;
+        M4OSA_semaphoreWait(pStreamContext->m_SemSync, M4OSA_WAIT_FOREVER);
+    }
+    if(pStreamContext->m_CurrentDecodeCts >= *pTime)
+    {
+        /* If we are not in this condition, it means that we ask for a frame after the
+             "predecoded" frame */
+        *pTime = pStreamContext->m_CurrentDecodeCts;
+        return M4NO_ERROR;
+    }
+
+    pStreamContext->m_NbDecodedFrames = 0;
+    pStreamContext->m_uiDecodeError = M4NO_ERROR;
+    pStreamContext->m_bDataDecodePending = M4OSA_TRUE;
+    pStreamContext->m_uiDecodeError = M4NO_ERROR;
+
+    /* Launch DecodeUpTo process in synchronous mode */
+    while(pStreamContext->m_uiDecodeError == M4NO_ERROR)
+    {
+        M4DECODER_EXTERNAL_SynchronousDecode(pVS_Context);
+        /* return code is ignored, it is used only in M4OSA_Thread api */
+    }
+
+    *pTime = pStreamContext->m_CurrentDecodeCts;
+
+    if ( (M4WAR_DECODE_FINISHED == pStreamContext->m_uiDecodeError)
+        || (M4WAR_VIDEORENDERER_NO_NEW_FRAME == pStreamContext->m_uiDecodeError) )
+    {
+        pStreamContext->m_uiDecodeError = M4NO_ERROR;
+    }
+
+    return pStreamContext->m_uiDecodeError;
+}
+
+/**
+ ************************************************************************
+ * @brief   Renders the video at the specified time.
+ * @note    If threaded mode, this function unlock the decoding thread,
+ *          which also call the external rendering function.
+ *          Else, just call external rendering function, and waits for its
+ *          completion.
+ *
+ * @param   pVS_Context: (IN)       Context of the video decoder shell
+ * @param   pTime:       (IN/OUT)   IN: Time to render to (in milli secondes)
+ *                                  OUT:Time of the effectively rendered frame (in ms)
+ * @param   pOutputPlane:(OUT)      Output plane filled with decoded data (converted)
+ *                                  If NULL, the rendering is made by the external
+ *                                  component.
+ * @param   bForceRender:(IN)       1 if the image must be rendered even it has already been
+ *                                  0 if not (in which case the function can return
+ *                                    M4WAR_VIDEORENDERER_NO_NEW_FRAME)
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_PARAMETER         At least one parameter is not properly set
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_ALLOC             There is no more available memory
+ * @return  M4WAR_VIDEORENDERER_NO_NEW_FRAME    If the frame to render has already been rendered
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_render(M4OSA_Context pVS_Context, M4_MediaTime* pTime,
+                                           M4VIFI_ImagePlane* pOutputPlane,
+                                           M4OSA_Bool bForceRender)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_2("M4DECODER_EXTERNAL_render : pTime = %lf, forceRender: %d ", *pTime,
+         bForceRender);
+
+    pStreamContext->m_TargetRenderCts = *pTime;
+    pStreamContext->m_pOutputPlane = pOutputPlane;
+    pStreamContext->m_bForceRender = bForceRender;
+    pStreamContext->m_uiRenderError = M4NO_ERROR;
+    pStreamContext->m_bDataRenderPending = M4OSA_TRUE;
+
+    /* Launch Render process in synchronous mode */
+    while(pStreamContext->m_uiRenderError == M4NO_ERROR)
+    {
+        M4DECODER_EXTERNAL_AsynchronousRender(pVS_Context);
+        /* return code is ignored, it is used only in M4OSA_Thread */
+    }
+
+
+    *pTime = pStreamContext->m_CurrentRenderCts;
+
+
+    if (M4WAR_RENDER_FINISHED == pStreamContext->m_uiRenderError)
+    {
+        pStreamContext->m_uiRenderError = M4NO_ERROR;
+    }
+
+    return pStreamContext->m_uiRenderError;
+}
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                        Signaling functions                        |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief   Called by the HW video decoder to signal that a decoding is
+ *          over
+ * @note    The function gets another AU in the internal AU buffer, and
+ *          launches the decoding.
+ *          If no more AU are available, the M4DECODER_EXTERNAL_decode
+ *          (or M4DECODER_EXTERNAL_render if threaded) function is unlocked
+ *
+ * @param   pVS_Context:    (IN)    context of the video hw shell
+ * @param   aTime:          (IN)    time of the decoded frame
+ * @param   aUserError      (IN)    error code returned to the VPS
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_HW_DECODER_xxx    A fatal error occured
+ * @return  M4ERR_PARAMETER         At least one parameter is NULL
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_signalDecoderOver(M4OSA_Context pVS_Context,
+                                                      M4_MediaTime aTime, M4OSA_ERR aUserError)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_1("M4DECODER_EXTERNAL_signalDecoderOver : aTime = %lf", aTime);
+
+    pStreamContext->m_NbDecodedFrames++;
+    pStreamContext->m_uiDecodeError = aUserError;
+    pStreamContext->m_CurrentDecodeCts = aTime;
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+    /* give control back to stepDecode */
+    //M4semvalue++;
+    //printf("Semaphore post: %d\n", M4semvalue);
+    M4OSA_semaphorePost(pStreamContext->m_SemSync);
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Called by the HW video renderer to signal that a rendering is
+ *          over
+ * @note    The function just post a semaphore to unblock
+ *          M4DECODER_EXTERNAL_render function
+ *
+ * @param   pVS_Context:    (IN)    context of the video hw shell
+ * @param   aTime:          (IN)    time of the decoded frame
+ * @param   aUserError      (IN)    error code returned to the VPS
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_HW_DECODER_xxx    A fatal error occured
+ * @return  M4ERR_PARAMETER         At least one parameter is NULL
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_signalRenderOver(M4OSA_Context pVS_Context,
+                                                     M4_MediaTime aTime, M4OSA_ERR aUserError)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE3_1("M4DECODER_EXTERNAL_signalRenderOver : aTime = %lf", aTime);
+
+    pStreamContext->m_uiRenderError = aUserError;
+    pStreamContext->m_CurrentRenderCts = aTime;
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+    /* give control back to stepRender */
+    //M4semvalue++;
+    //printf("Semaphore post: %d\n", M4semvalue);
+    M4OSA_semaphorePost(pStreamContext->m_SemSync);
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+    return M4NO_ERROR;
+}
+
+
+/* ___________________________________________________________________ */
+/*|                                                                   |*/
+/*|                            Internals                              |*/
+/*|___________________________________________________________________|*/
+
+/**
+ ************************************************************************
+ * @brief    Initializes the video decoder shell/handler
+ * @note     allocates an execution context
+ *
+ * @param    pVS_Context:    (OUT)   Output context allocated
+ * @param    p_HWInterface:  (IN)    Pointer on the set of external HW codec functions
+ * @param    pStreamHandler: (IN)    Pointer to a video stream description
+ *
+ * @return   M4NO_ERROR     There is no error
+ * @return   M4ERR_ALLOC    There is no more available memory
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_Init(M4OSA_Context* pVS_Context,
+                                         M4VD_Interface* p_HWInterface,
+                                         M4_StreamHandler *pStreamHandler)
+{
+    M4VS_VideoDecoder_Context* pStreamContext;
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_Init");
+
+    /* Allocate the internal context */
+    *pVS_Context = M4OSA_NULL;
+
+    pStreamContext = (M4VS_VideoDecoder_Context*)M4OSA_malloc(sizeof(M4VS_VideoDecoder_Context),
+         M4DECODER_EXTERNAL,(M4OSA_Char *) "M4VS_VideoDecoder_Context");
+    if (M4OSA_NULL == pStreamContext)
+    {
+        M4OSA_TRACE1_0("M4DECODER_EXTERNAL_Init : error, cannot allocate context !");
+        return M4ERR_ALLOC;
+    }
+
+    /* Reset internal context structure */
+    *pVS_Context = pStreamContext;
+
+    /* --- READER --- */
+    pStreamContext->m_pReader = M4OSA_NULL;
+    pStreamContext->m_pNextAccessUnitToDecode = M4OSA_NULL;
+    pStreamContext->m_bJump = M4OSA_FALSE;
+    pStreamContext->m_nextAUCts = -1;
+
+    /* --- DECODER --- */
+    pStreamContext->m_DecodeUpToCts = -1;
+    pStreamContext->m_CurrentDecodeCts = -1;
+    pStreamContext->m_NbDecodedFrames = 0;
+    pStreamContext->m_uiDecodeError = M4NO_ERROR;
+    pStreamContext->m_bDataDecodePending = M4OSA_FALSE;
+    pStreamContext->m_PreviousDecodeCts = 0;
+    pStreamContext->m_bIsWaitNextDecode = M4OSA_FALSE;
+
+    /* --- RENDER --- */
+    pStreamContext->m_TargetRenderCts = -1;
+    pStreamContext->m_CurrentRenderCts = -1;
+    pStreamContext->m_uiRenderError = M4NO_ERROR;
+    pStreamContext->m_bForceRender = M4OSA_TRUE;
+    pStreamContext->m_bDataRenderPending = M4OSA_FALSE;
+
+    /* --- STREAM PARAMS --- */
+    pStreamContext->m_pVideoStreamhandler = (M4_VideoStreamHandler*)pStreamHandler;
+    pStreamContext->m_pStreamInfo = M4OSA_NULL;
+    pStreamContext->m_pOutputPlane = M4OSA_NULL;
+
+    /* --- VD API --- */
+    pStreamContext->m_VD_Interface = p_HWInterface;
+    pStreamContext->m_VD_Context = M4OSA_NULL;
+
+    pStreamContext->m_VD_SignalingInterface.m_pSignalTarget = pStreamContext;
+    pStreamContext->m_VD_SignalingInterface.m_pFctSignalDecoderOver =
+         M4DECODER_EXTERNAL_signalDecoderOver;
+    pStreamContext->m_VD_SignalingInterface.m_pFctSignalRenderOver =
+         M4DECODER_EXTERNAL_signalRenderOver;
+
+    /* --- THREAD STUFF --- */
+
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+    pStreamContext->m_SemSync = M4OSA_NULL;
+    //M4semvalue=0;
+    err = M4OSA_semaphoreOpen(&(pStreamContext->m_SemSync), 0);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_Init: can't open sync semaphore (err 0x%08X)", err);
+        return err;
+    }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief   Fills the stream info structure
+ * @note    This function is called at decoder's creation time,
+ *          allocates and fills video info structure
+ *
+ * @param   ppStreamInfo    (OUT)   Video info structure
+ * @param   pStreamHandler  (IN)    Pointer to a video stream description
+ *
+ * @return  M4ERR_ALLOC     Memory allocation error
+ * @return  M4NO_ERROR      There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_StreamDescriptionInit(M4VD_StreamInfo** ppStreamInfo,
+                                                          M4_StreamHandler *pStreamHandler)
+{
+    M4_VideoStreamHandler* pVideoStreamHandler  = M4OSA_NULL;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_StreamDescriptionInit");
+
+    pVideoStreamHandler = (M4_VideoStreamHandler*)pStreamHandler;
+
+    /* M4VD_StreamInfo allocation */
+    *ppStreamInfo = (M4VD_StreamInfo*)M4OSA_malloc(sizeof(M4VD_StreamInfo),
+         M4DECODER_EXTERNAL, (M4OSA_Char *)"M4VD_StreamInfo");
+    if(M4OSA_NULL == *ppStreamInfo)
+    {
+        return M4ERR_ALLOC;
+    }
+
+    /* init values */
+    (*ppStreamInfo)->anImageSize.aWidth  = pVideoStreamHandler->m_videoWidth;
+    (*ppStreamInfo)->anImageSize.aHeight = pVideoStreamHandler->m_videoHeight;
+
+    (*ppStreamInfo)->decoderConfiguration.pBuffer =
+         (M4OSA_MemAddr8)pStreamHandler->m_pDecoderSpecificInfo;
+    (*ppStreamInfo)->decoderConfiguration.aSize   = pStreamHandler->m_decoderSpecificInfoSize;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Initializes current AU parameters
+ * @note    It is called at decoder's creation time to initialize
+ *          current decoder's AU.
+ *
+ * @param   pVS_Context (IN)    Context of the video decoder shell
+ * @param   pReader     (IN)    Reader interface
+ * @param   pAccessUnit (IN)    Access Unit structure used bu decoder
+ *
+ * @return
+ * @return
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_SetUpReadInput(M4OSA_Context pVS_Context,
+                                                    M4READER_DataInterface* pReader,
+                                                    M4_AccessUnit* pAccessUnit)
+{
+    M4VS_VideoDecoder_Context* pStreamContext=(M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_TRACE2_0("M4DECODER_EXTERNAL_SetUpReadInput");
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pStreamContext), M4ERR_PARAMETER,
+         "M4DECODER_EXTERNAL_SetUpReadInput: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pReader),        M4ERR_PARAMETER,
+         "M4DECODER_EXTERNAL_SetUpReadInput: invalid pReader pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pAccessUnit),    M4ERR_PARAMETER,
+         "M4DECODER_EXTERNAL_SetUpReadInput: invalid pAccessUnit pointer");
+
+    pStreamContext->m_pReader = pReader;
+    pStreamContext->m_pNextAccessUnitToDecode = pAccessUnit;
+
+    pAccessUnit->m_streamID = 0;
+    pAccessUnit->m_size = 0;
+    pAccessUnit->m_CTS = 0;
+    pAccessUnit->m_DTS = 0;
+    pAccessUnit->m_attribute = 0;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * @brief   Gets the next AU from internal AU buffer
+ * @note    This function is necessary to be able to have a decodeUpTo
+ *          interface with the VPS.
+ *          The AU are read from file by M4DECODER_EXTERNAL_decode function
+ *          and stored into a buffer. This function is called internally
+ *          to get these stored AU.
+ *
+ * @param   pStreamContext: (IN)        context of the video hw shell
+ * @param   nextFrameTime:  (IN/OUT)    time of the AU
+ *
+ * @return  M4NO_ERROR          There is no error
+ * @return  M4WAR_NO_MORE_AU    No more AU in internal buffer
+ * @return  M4ERR_PARAMETER     One invalid parameter
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_GetNextAu(M4VS_VideoDecoder_Context* pStreamContext,
+                                                 M4VD_VideoBuffer *nextBuffer,
+                                                 M4_MediaTime* nextFrameTime)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4_AccessUnit* pAccessUnit;
+
+    M4OSA_TRACE3_0("M4DECODER_EXTERNAL_GetNextAu");
+
+    /* Check context is valid */
+    if(M4OSA_NULL == pStreamContext)
+    {
+        M4OSA_TRACE1_0("M4DECODER_EXTERNAL_GetNextAu : error pStreamContext is NULL");
+        return M4ERR_PARAMETER;
+    }
+
+    /* Read the AU */
+    pAccessUnit = pStreamContext->m_pNextAccessUnitToDecode;
+
+    err = pStreamContext->m_pReader->m_pFctGetNextAu(pStreamContext->m_pReader->m_readerContext,
+         (M4_StreamHandler*)pStreamContext->m_pVideoStreamhandler, pAccessUnit);
+
+    if((err == M4WAR_NO_DATA_YET) || (err == M4WAR_NO_MORE_AU))
+    {
+        M4OSA_TRACE2_1("M4DECODER_EXTERNAL_GetNextAu : no data avalaible 0x%x", err);
+    }
+    else if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4DECODER_EXTERNAL_GetNextAu : filesystem error 0x%x", err);
+
+        *nextFrameTime         = 0;
+        nextBuffer->pBuffer    = M4OSA_NULL;
+        nextBuffer->bufferSize = 0;
+
+        return err;
+    }
+
+    /* Fill buffer */
+    *nextFrameTime         = pAccessUnit->m_CTS;
+    nextBuffer->pBuffer    = (M4OSA_MemAddr32)pAccessUnit->m_dataAddress;
+    nextBuffer->bufferSize = pAccessUnit->m_size;
+
+    M4OSA_TRACE3_1("M4DECODER_EXTERNAL_GetNextAu: AU obtained, time is %f", *nextFrameTime);
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief
+ * @note
+ *
+ * @param    pVS_Context:    (IN)    Context of the video hw shell
+ *
+ * @return    M4NO_ERROR        There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_SynchronousDecode(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VD_VideoBuffer nextBuffer;
+
+
+    /* ----- decode process ----- */
+
+    if(M4OSA_TRUE == pStreamContext->m_bDataDecodePending)
+    {
+        /* Targeted time is reached */
+        if( pStreamContext->m_CurrentDecodeCts >= pStreamContext->m_DecodeUpToCts )
+        {
+            M4OSA_TRACE2_0("M4DECODER_EXTERNAL_SynchronousDecode :\
+                 skip decode because synchronisation");
+
+            if(pStreamContext->m_NbDecodedFrames > 0)
+            {
+                pStreamContext->m_uiDecodeError = M4WAR_DECODE_FINISHED;
+            }
+            else
+            {
+                pStreamContext->m_uiDecodeError = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+            }
+
+            M4ERR_EXIT(M4NO_ERROR);
+        }
+
+        pStreamContext->m_PreviousDecodeCts = pStreamContext->m_CurrentDecodeCts;
+
+        /* Get the next AU */
+        pStreamContext->m_uiDecodeError = M4DECODER_EXTERNAL_GetNextAu(pStreamContext,
+             &nextBuffer, &pStreamContext->m_CurrentDecodeCts);
+
+        if( M4NO_ERROR != pStreamContext->m_uiDecodeError )
+        {
+            if ( M4WAR_NO_MORE_AU != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_SynchronousDecode :\
+                     M4DECODER_EXTERNAL_GetNextAu error 0x%x", pStreamContext->m_uiDecodeError);
+            }
+            M4ERR_EXIT(pStreamContext->m_uiDecodeError);
+        }
+
+        /* Decode the AU */
+        if(nextBuffer.bufferSize > 0)
+        {
+            pStreamContext->m_uiDecodeError =
+                 pStreamContext->m_VD_Interface->m_pFctStepDecode(pStreamContext->m_VD_Context,
+                     &nextBuffer, pStreamContext->m_CurrentDecodeCts);
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+            if ( (M4NO_ERROR == pStreamContext->m_uiDecodeError)
+                /*|| (M4WAR_IO_PENDING == pStreamContext->m_uiDecodeError)*/ )
+            {
+                /* wait for decode to complete */
+                //M4semvalue--;
+                //printf("Semaphore wait 2: %d\n", M4semvalue);
+                M4OSA_semaphoreWait(pStreamContext->m_SemSync, M4OSA_WAIT_FOREVER);
+                /* by now the actual m_uiDecodeError has been set by signalDecode */
+            }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+            if(M4NO_ERROR != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_SynchronousDecode : HW decoder error 0x%x",
+                     pStreamContext->m_uiDecodeError);
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+        }
+        else
+        {
+            M4ERR_EXIT(M4NO_ERROR);
+        }
+    }
+
+    return M4NO_ERROR;
+
+
+/* ----- Release resources if an error occured */
+exit_with_error:
+
+    /* Abort decoding */
+    pStreamContext->m_bDataDecodePending = M4OSA_FALSE;
+
+    if((M4NO_ERROR == pStreamContext->m_uiDecodeError) && (M4NO_ERROR != err))
+    {
+        pStreamContext->m_uiDecodeError = err;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief
+ * @note
+ *
+ * @param    pVS_Context:    (IN)    Context of the video hw shell
+ *
+ * @return    M4NO_ERROR        There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousDecode(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VD_VideoBuffer nextBuffer;
+
+
+    /* ----- decode process ----- */
+
+    if(M4OSA_TRUE == pStreamContext->m_bDataDecodePending)
+    {
+        pStreamContext->m_PreviousDecodeCts = pStreamContext->m_CurrentDecodeCts;
+
+        /* Get the next AU */
+        pStreamContext->m_uiDecodeError = M4DECODER_EXTERNAL_GetNextAu(pStreamContext,
+             &nextBuffer, &pStreamContext->m_nextAUCts);
+
+        if( M4NO_ERROR != pStreamContext->m_uiDecodeError )
+        {
+            if ( M4WAR_NO_MORE_AU != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_AsynchronousDecode :\
+                     M4DECODER_EXTERNAL_GetNextAu error 0x%x", pStreamContext->m_uiDecodeError);
+            }
+            //M4semvalue++;
+            //printf("Semaphore post: %d\n", M4semvalue);
+            //M4OSA_semaphorePost(pStreamContext->m_SemSync);
+            M4ERR_EXIT(pStreamContext->m_uiDecodeError);
+        }
+
+        /* Decode the AU if needed */
+        if(nextBuffer.bufferSize > 0)
+        {
+            pStreamContext->m_uiDecodeError =
+                 pStreamContext->m_VD_Interface->m_pFctStepDecode(pStreamContext->m_VD_Context,
+                    &nextBuffer, pStreamContext->m_nextAUCts\
+                        /*pStreamContext->m_CurrentDecodeCts*/);
+            if(M4NO_ERROR != pStreamContext->m_uiDecodeError)
+            {
+                M4OSA_TRACE1_1("M4DECODER_EXTERNAL_AsynchronousDecode : HW decoder error 0x%x",
+                     pStreamContext->m_uiDecodeError);
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+            pStreamContext->m_bIsWaitNextDecode = M4OSA_TRUE;
+        }
+        else
+        {
+            M4ERR_EXIT(M4NO_ERROR);
+        }
+    }
+
+    return M4NO_ERROR;
+
+
+/* ----- Release resources if an error occured */
+exit_with_error:
+
+    /* Abort decoding */
+    pStreamContext->m_bDataDecodePending = M4OSA_FALSE;
+
+    if((M4NO_ERROR == pStreamContext->m_uiDecodeError) && (M4NO_ERROR != err))
+    {
+        pStreamContext->m_uiDecodeError = err;
+    }
+
+    return err;
+}
+
+/**
+ ************************************************************************
+ * @brief
+ * @note
+ *
+ * @param    pVS_Context:    (IN)    Context of the video hw shell
+ *
+ * @return    M4NO_ERROR        There is no error
+ ************************************************************************
+ */
+static M4OSA_ERR M4DECODER_EXTERNAL_AsynchronousRender(M4OSA_Context pVS_Context)
+{
+    M4VS_VideoDecoder_Context* pStreamContext = (M4VS_VideoDecoder_Context*)pVS_Context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+
+    /* ----- Render one frame ----- */
+
+    if(M4OSA_TRUE == pStreamContext->m_bDataRenderPending)
+    {
+#if 0
+        if (!pStreamContext->m_bForceRender)
+        {
+            /* Targeted time is reached */
+            if(pStreamContext->m_TargetRenderCts - pStreamContext->m_CurrentRenderCts < 1.0)
+             /* some +0.5 issues */
+            {
+                M4OSA_TRACE2_0("M4DECODER_EXTERNAL_AsynchronousRender :\
+                     skip render because synchronisation");
+                pStreamContext->m_uiRenderError = M4WAR_RENDER_FINISHED;
+
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+
+            if ( (M4WAR_NO_MORE_AU == pStreamContext->m_uiDecodeError)
+                && (pStreamContext->m_CurrentDecodeCts \
+                    - pStreamContext->m_CurrentRenderCts < 1.0) )
+            {
+                pStreamContext->m_uiRenderError = M4WAR_RENDER_FINISHED;
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+
+            if(pStreamContext->m_NbDecodedFrames == 0)
+            {
+                pStreamContext->m_uiRenderError = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+                M4ERR_EXIT(M4NO_ERROR);
+            }
+        }
+#endif
+        /* Render the frame */
+        pStreamContext->m_CurrentRenderCts = pStreamContext->m_CurrentDecodeCts;
+
+        pStreamContext->m_uiRenderError =
+             pStreamContext->m_VD_Interface->m_pFctStepRender(pStreamContext->m_VD_Context,
+                 pStreamContext->m_pOutputPlane, pStreamContext->m_CurrentRenderCts);
+#ifndef M4DECODER_EXTERNAL_SYNC_EXT_DECODE
+        if ( (M4NO_ERROR == pStreamContext->m_uiRenderError)
+            /* || (M4WAR_IO_PENDING == pStreamContext->m_uiRenderError) */ )
+        {
+            /* wait for render to complete */
+            //M4semvalue--;
+            //printf("Semaphore wait: %d\n", M4semvalue);
+            M4OSA_semaphoreWait(pStreamContext->m_SemSync, M4OSA_WAIT_FOREVER);
+            /* by now the actual m_uiRenderError has been set by signalRender */
+        }
+#endif /* not M4DECODER_EXTERNAL_SYNC_EXT_DECODE */
+        if(M4NO_ERROR != pStreamContext->m_uiRenderError)
+        {
+            M4OSA_TRACE1_1("M4DECODER_EXTERNAL_AsynchronousRender : HW render error 0x%x", err);
+            pStreamContext->m_bDataRenderPending = M4OSA_FALSE;
+
+            return M4NO_ERROR;
+        }
+
+        /* Launch in asynchronous mode the predecoding of the next frame */
+        pStreamContext->m_NbDecodedFrames = 0;
+        pStreamContext->m_uiDecodeError = M4NO_ERROR;
+        pStreamContext->m_bDataDecodePending = M4OSA_TRUE;
+        M4DECODER_EXTERNAL_AsynchronousDecode(pVS_Context);
+
+        pStreamContext->m_uiRenderError = M4WAR_RENDER_FINISHED;
+    }
+
+    return M4NO_ERROR;
+
+
+/* ----- Release resources if an error occured */
+exit_with_error:
+
+    /* Abort the rendering */
+    pStreamContext->m_bDataRenderPending = M4OSA_FALSE;
+
+    if((M4NO_ERROR == pStreamContext->m_uiRenderError) && (M4NO_ERROR != err))
+    {
+        pStreamContext->m_uiRenderError = err;
+    }
+
+
+    return err;
+}
+
diff --git a/libvideoeditor/vss/src/M4VD_Tools.c b/libvideoeditor/vss/src/M4VD_Tools.c
new file mode 100644
index 0000000..4a737b2
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VD_Tools.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+
+#include "M4VD_Tools.h"
+
+/**
+ ************************************************************************
+ * @file   M4VD_Tools.c
+ * @brief
+ * @note   This file implements helper functions for Bitstream parser
+ ************************************************************************
+ */
+
+M4OSA_UInt32 M4VD_Tools_GetBitsFromMemory(M4VS_Bitstream_ctxt* parsingCtxt,
+     M4OSA_UInt32 nb_bits)
+{
+    M4OSA_UInt32    code;
+    M4OSA_UInt32    i;
+    code = 0;
+    for (i = 0; i < nb_bits; i++)
+    {
+        if (parsingCtxt->stream_index == 8)
+        {
+            //M4OSA_memcpy( (M4OSA_MemAddr8)&(parsingCtxt->stream_byte), parsingCtxt->in,
+            //     sizeof(unsigned char));
+            parsingCtxt->stream_byte = (unsigned char)(parsingCtxt->in)[0];
+            parsingCtxt->in++;
+            //fread(&stream_byte, sizeof(unsigned char),1,in);
+            parsingCtxt->stream_index = 0;
+        }
+        code = (code << 1);
+        code |= ((parsingCtxt->stream_byte & 0x80) >> 7);
+
+        parsingCtxt->stream_byte = (parsingCtxt->stream_byte << 1);
+        parsingCtxt->stream_index++;
+    }
+
+    return code;
+}
+
+M4OSA_ERR M4VD_Tools_WriteBitsToMemory(M4OSA_UInt32 bitsToWrite,
+                                     M4OSA_MemAddr32 dest_bits,
+                                     M4OSA_UInt8 offset, M4OSA_UInt8 nb_bits)
+{
+    M4OSA_UInt8 i,j;
+    M4OSA_UInt32 temp_dest = 0, mask = 0, temp = 1;
+    M4OSA_UInt32 input = bitsToWrite;
+    input = (input << (32 - nb_bits - offset));
+
+    /* Put destination buffer to 0 */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                mask |= (temp << ((7*(j+1))-i+j));
+            }
+        }
+    }
+    mask = ~mask;
+    *dest_bits &= mask;
+
+    /* Parse input bits, and fill output buffer */
+    for(j=0;j<3;j++)
+    {
+        for(i=0;i<8;i++)
+        {
+            if((j*8)+i >= offset && (j*8)+i < nb_bits + offset)
+            {
+                temp = ((input & (0x80000000 >> offset)) >> (31-offset));
+                //*dest_bits |= (temp << (31 - i));
+                *dest_bits |= (temp << ((7*(j+1))-i+j));
+                input = (input << 1);
+            }
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+
+
diff --git a/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c b/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
new file mode 100755
index 0000000..8f00d08
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VIFI_xVSS_RGB565toYUV420.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file     M4VIFI_RGB565toYUV420.c
+ * @brief    Contain video library function
+ * @note     Color Conversion Filter
+ *           -# Contains the format conversion filters from RGB565 to YUV420
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include    "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include    "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include    "M4VIFI_Clip.h"
+
+
+/**
+ ******************************************************************************
+ * M4VIFI_UInt8 M4VIFI_RGB565toYUV420 (void *pUserData,
+ *                                     M4VIFI_ImagePlane *pPlaneIn,
+ *                                   M4VIFI_ImagePlane *pPlaneOut)
+ * @brief   transform RGB565 image to a YUV420 image.
+ * @note    Convert RGB565 to YUV420,
+ *          Loop on each row ( 2 rows by 2 rows )
+ *              Loop on each column ( 2 col by 2 col )
+ *                  Get 4 RGB samples from input data and build 4 output Y samples
+ *                  and each single U & V data
+ *              end loop on col
+ *          end loop on row
+ * @param   pUserData: (IN) User Specific Data
+ * @param   pPlaneIn: (IN) Pointer to RGB565 Plane
+ * @param   pPlaneOut: (OUT) Pointer to  YUV420 buffer Plane
+ * @return  M4VIFI_OK: there is no error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
+ ******************************************************************************
+*/
+M4VIFI_UInt8    M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+                                                      M4VIFI_ImagePlane *pPlaneOut)
+{
+    M4VIFI_UInt32   u32_width, u32_height;
+    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+    M4VIFI_UInt32   u32_stride_rgb, u32_stride_2rgb;
+    M4VIFI_UInt32   u32_col, u32_row;
+
+    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
+    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
+    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
+    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
+    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
+    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
+    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
+    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
+    M4VIFI_UInt16   u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+    M4VIFI_UInt8 count_null=0;
+
+    /* Check planes height are appropriate */
+    if( (pPlaneIn->u_height != pPlaneOut[0].u_height)           ||
+        (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1))   ||
+        (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+    {
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+    }
+
+    /* Check planes width are appropriate */
+    if( (pPlaneIn->u_width != pPlaneOut[0].u_width)         ||
+        (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+        (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+    {
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+    }
+
+    /* Set the pointer to the beginning of the output data buffers */
+    pu8_y_data = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+    pu8_u_data = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+    pu8_v_data = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+    /* Set the pointer to the beginning of the input data buffers */
+    pu8_rgbn_data   = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+    /* Get the size of the output image */
+    u32_width = pPlaneOut[0].u_width;
+    u32_height = pPlaneOut[0].u_height;
+
+    /* Set the size of the memory jumps corresponding to row jump in each output plane */
+    u32_stride_Y = pPlaneOut[0].u_stride;
+    u32_stride2_Y = u32_stride_Y << 1;
+    u32_stride_U = pPlaneOut[1].u_stride;
+    u32_stride_V = pPlaneOut[2].u_stride;
+
+    /* Set the size of the memory jumps corresponding to row jump in input plane */
+    u32_stride_rgb = pPlaneIn->u_stride;
+    u32_stride_2rgb = u32_stride_rgb << 1;
+
+
+    /* Loop on each row of the output image, input coordinates are estimated from output ones */
+    /* Two YUV rows are computed at each pass */
+    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+    {
+        /* Current Y plane row pointers */
+        pu8_yn = pu8_y_data;
+        /* Next Y plane row pointers */
+        pu8_ys = pu8_yn + u32_stride_Y;
+        /* Current U plane row pointer */
+        pu8_u = pu8_u_data;
+        /* Current V plane row pointer */
+        pu8_v = pu8_v_data;
+
+        pu8_rgbn = pu8_rgbn_data;
+
+        /* Loop on each column of the output image */
+        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+        {
+            /* Get four RGB 565 samples from input data */
+            u16_pix1 = *( (M4VIFI_UInt16 *) pu8_rgbn);
+            u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_rgbn + CST_RGB_16_SIZE));
+            u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb));
+            u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_rgbn + u32_stride_rgb + CST_RGB_16_SIZE));
+
+            /* Unpack RGB565 to 8bit R, G, B */
+#if 0
+            /* (x,y) */
+            GET_RGB565(i32_r00,i32_g00,i32_b00,u16_pix1);
+            /* (x+1,y) */
+            GET_RGB565(i32_r10,i32_g10,i32_b10,u16_pix2);
+            /* (x,y+1) */
+            GET_RGB565(i32_r01,i32_g01,i32_b01,u16_pix3);
+            /* (x+1,y+1) */
+            GET_RGB565(i32_r11,i32_g11,i32_b11,u16_pix4);
+#else
+            /* (x,y) */
+            GET_RGB565(i32_b00,i32_g00,i32_r00,u16_pix1);
+            /* (x+1,y) */
+            GET_RGB565(i32_b10,i32_g10,i32_r10,u16_pix2);
+            /* (x,y+1) */
+            GET_RGB565(i32_b01,i32_g01,i32_r01,u16_pix3);
+            /* (x+1,y+1) */
+            GET_RGB565(i32_b11,i32_g11,i32_r11,u16_pix4);
+#endif
+#if 1 /* Solution to avoid green effects due to transparency */
+            /* If RGB is transparent color (0, 63, 0), we transform it to white (31,63,31) */
+            if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+            {
+                i32_b00 = 31;
+                i32_r00 = 31;
+            }
+            if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
+            {
+                i32_b10 = 31;
+                i32_r10 = 31;
+            }
+            if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+            {
+                i32_b01 = 31;
+                i32_r01 = 31;
+            }
+            if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
+            {
+                i32_b11 = 31;
+                i32_r11 = 31;
+            }
+#endif
+            /* Convert RGB value to YUV */
+            i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+            i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+            /* luminance value */
+            i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+            i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+            i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+            /* luminance value */
+            i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+            i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+            i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+            /* luminance value */
+            i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+            i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+            i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+            /* luminance value */
+            i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+            /* Store luminance data */
+            pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+            pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+            pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+#if 0 /* Temporary solution to avoid green effects due to transparency -> To be removed */
+            count_null = 4;
+            /* Store chroma data */
+            if(i32_b00 == 0 && i32_g00 == 63 && i32_r00 == 0)
+            {
+                i32_u00 = 0;
+                i32_v00 = 0;
+                count_null --;
+            }
+            if(i32_b10 == 0 && i32_g10 == 63 && i32_r10 == 0)
+            {
+                i32_u10 = 0;
+                i32_v10 = 0;
+                count_null --;
+            }
+            if(i32_b01 == 0 && i32_g01 == 63 && i32_r01 == 0)
+            {
+                i32_u01 = 0;
+                i32_v01 = 0;
+                count_null --;
+            }
+            if(i32_b11 == 0 && i32_g11 == 63 && i32_r11 == 0)
+            {
+                i32_u11 = 0;
+                i32_v11 = 0;
+                count_null --;
+            }
+
+            if(count_null == 0)
+            {
+#endif
+            *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+            *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+#if 0 /* Temporary solution to avoid green effects due to transparency -> To be removed */
+            }
+            else
+            {
+                *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) / count_null);
+                *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) / count_null);
+            }
+#endif
+            /* Prepare for next column */
+            pu8_rgbn += (CST_RGB_16_SIZE<<1);
+            /* Update current Y plane line pointer*/
+            pu8_yn += 2;
+            /* Update next Y plane line pointer*/
+            pu8_ys += 2;
+            /* Update U plane line pointer*/
+            pu8_u ++;
+            /* Update V plane line pointer*/
+            pu8_v ++;
+        } /* End of horizontal scanning */
+
+        /* Prepare pointers for the next row */
+        pu8_y_data += u32_stride2_Y;
+        pu8_u_data += u32_stride_U;
+        pu8_v_data += u32_stride_V;
+        pu8_rgbn_data += u32_stride_2rgb;
+
+
+    } /* End of vertical scanning */
+
+    return M4VIFI_OK;
+}
+/* End of file M4VIFI_RGB565toYUV420.c */
+
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c b/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
new file mode 100755
index 0000000..6f6ba3c
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_AudioMixing.c
@@ -0,0 +1,4210 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_AudioMixing.c
+ * @brief    Video Studio Service 3GPP audio mixing implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/* Put the definition of silence frames here */
+#define M4VSS3GPP_SILENCE_FRAMES
+#include "M4VSS3GPP_InternalConfig.h"
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h"  /**< OSAL debug management */
+
+
+#include "gLVAudioResampler.h"
+/**
+ ******************************************************************************
+ * @brief    Static functions
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
+                             M4VSS3GPP_AudioMixingSettings *pSettings );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC );
+static M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
+                                                M4OSA_Int32 storeCount,
+                                                M4OSA_Int32 thresholdValue );
+/**
+ *    Internal warning */
+#define M4VSS3GPP_WAR_END_OF_ADDED_AUDIO    M4OSA_ERR_CREATE( M4_WAR, M4VSS3GPP, 0x0030)
+
+/* A define used with SSRC 1.04 and above to avoid taking
+blocks smaller that the minimal block size */
+#define M4VSS_SSRC_MINBLOCKSIZE        600
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingInit(M4VSS3GPP_AudioMixingContext* pContext,
+ *                                     M4VSS3GPP_AudioMixingSettings* pSettings)
+ * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param    pSettings        (IN) Pointer to valid audio mixing settings
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_audioMixingInit( M4VSS3GPP_AudioMixingContext *pContext,
+                                    M4VSS3GPP_AudioMixingSettings *pSettings,
+                                    M4OSA_FileReadPointer *pFileReadPtrFct,
+                                    M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+    M4VSS3GPP_InternalAudioMixingContext *pC;
+    M4OSA_ERR err;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_audioMixingInit called with pContext=0x%x, pSettings=0x%x",
+        pContext, pSettings);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pSettings is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pFileReadPtrFct is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingInit: pFileWritePtrFct is M4OSA_NULL");
+
+    if( pSettings->uiBeginLoop > pSettings->uiEndLoop )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_audioMixingInit: Begin loop time is higher than end loop time!");
+        return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
+    }
+
+    /**
+    * Allocate the VSS audio mixing context and return it to the user */
+    pC = (M4VSS3GPP_InternalAudioMixingContext
+        *)M4OSA_malloc(sizeof(M4VSS3GPP_InternalAudioMixingContext),
+        M4VSS3GPP,(M4OSA_Char *)"M4VSS3GPP_InternalAudioMixingContext");
+    *pContext = pC;
+
+    if( M4OSA_NULL == pC )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_audioMixingInit(): unable to allocate \
+            M4VSS3GPP_InternalAudioMixingContext,returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /* Initialization of context Variables */
+    M4OSA_memset((M4OSA_MemAddr8)pC ,
+                 sizeof(M4VSS3GPP_InternalAudioMixingContext),0);
+    /**
+    * Copy this setting in context */
+    pC->iAddCts = pSettings->uiAddCts;
+    pC->bRemoveOriginal = pSettings->bRemoveOriginal;
+    pC->b_DuckingNeedeed = pSettings->b_DuckingNeedeed;
+    pC->InDucking_threshold = pSettings->InDucking_threshold;
+    pC->fBTVolLevel = pSettings->fBTVolLevel;
+    pC->fPTVolLevel = pSettings->fPTVolLevel;
+    pC->InDucking_lowVolume = pSettings->InDucking_lowVolume;
+    pC->bDoDucking = M4OSA_FALSE;
+    pC->bLoop = pSettings->bLoop;
+    pC->bNoLooping = M4OSA_FALSE;
+    pC->bjumpflag = M4OSA_TRUE;
+    /**
+    * Init some context variables */
+
+    pC->pInputClipCtxt = M4OSA_NULL;
+    pC->pAddedClipCtxt = M4OSA_NULL;
+    pC->fOrigFactor = 1.0F;
+    pC->fAddedFactor = 0.0F;
+    pC->bSupportSilence = M4OSA_FALSE;
+    pC->bHasAudio = M4OSA_FALSE;
+    pC->bAudioMixingIsNeeded = M4OSA_FALSE;
+
+    /* Init PC->ewc members */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+    pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
+    pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+    pC->ewc.bActivateEmp = M4OSA_FALSE;
+    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+    pC->ewc.uiNbChannels = 1;
+    pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+    pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
+    pC->ewc.pSilenceFrameData = M4OSA_NULL;
+    pC->ewc.pEncContext = M4OSA_NULL;
+    pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+    pC->ewc.p3gpWriterContext = M4OSA_NULL;
+    /**
+    * Set the OSAL filesystem function set */
+    pC->pOsaFileReadPtr = pFileReadPtrFct;
+    pC->pOsaFileWritPtr = pFileWritePtrFct;
+
+    /**
+    * Ssrc stuff */
+    pC->b_SSRCneeded = M4OSA_FALSE;
+    pC->pSsrcBufferIn = M4OSA_NULL;
+    pC->pSsrcBufferOut = M4OSA_NULL;
+    pC->pTempBuffer = M4OSA_NULL;
+    pC->pPosInTempBuffer = M4OSA_NULL;
+    pC->pPosInSsrcBufferIn = M4OSA_NULL;
+    pC->pPosInSsrcBufferOut = M4OSA_NULL;
+    pC->SsrcScratch = M4OSA_NULL;
+    pC->uiBeginLoop = pSettings->uiBeginLoop;
+    pC->uiEndLoop = pSettings->uiEndLoop;
+
+    /*
+    * Reset pointers for media and codecs interfaces */
+    err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /*  Call the media and codecs subscription module */
+    err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Open input clip, added clip and output clip and proceed with the settings */
+    err = M4VSS3GPP_intAudioMixingOpen(pC, pSettings);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Update main state automaton */
+    if( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream )
+        pC->State = M4VSS3GPP_kAudioMixingState_VIDEO;
+    else
+        pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
+
+    pC->ewc.iOutputDuration = (M4OSA_Int32)pC->pInputClipCtxt->pSettings->
+        ClipProperties.uiClipDuration;
+    /*gInputParams.lvBTChannelCount*/
+    pC->pLVAudioResampler = (M4OSA_Int32)LVAudioResamplerCreate(16,
+        pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels,
+        /* gInputParams.lvOutSampleRate*/pSettings->outputASF, 1);
+        LVAudiosetSampleRate(pC->pLVAudioResampler,
+        /*gInputParams.lvInSampleRate*/
+        pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency);
+
+    LVAudiosetVolume(pC->pLVAudioResampler,
+                    (M4OSA_Int16)(0x1000 ),
+                    (M4OSA_Int16)(0x1000 ));
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_audioMixingInit(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingStep(M4VSS3GPP_AudioMixingContext pContext)
+ * @brief    Perform one step of audio mixing.
+ * @note
+ * @param     pContext          (IN) VSS audio mixing context
+ * @return    M4NO_ERROR:       No error
+ * @return    M4ERR_PARAMETER:  pContext is M4OSA_NULL (debug only)
+ * @param     pProgress         (OUT) Progress percentage (0 to 100) of the finalization operation
+ * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
+ * @return    M4VSS3GPP_WAR_END_OF_AUDIO_MIXING: Audio mixing is over, user should now call
+ *                                               M4VSS3GPP_audioMixingCleanUp()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingStep( M4VSS3GPP_AudioMixingContext pContext,
+                                    M4OSA_UInt8 *pProgress )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_InternalAudioMixingContext *pC =
+        (M4VSS3GPP_InternalAudioMixingContext *)pContext;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_audioMixingStep called with pContext=0x%x",
+        pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingStep: pContext is M4OSA_NULL");
+
+    /**
+    * State automaton */
+    switch( pC->State )
+    {
+        case M4VSS3GPP_kAudioMixingState_VIDEO:
+            err = M4VSS3GPP_intAudioMixingStepVideo(pC);
+
+            /**
+            * Compute the progress percentage
+            * Note: audio and video CTS are not initialized before
+            * the call of M4VSS3GPP_intAudioMixingStepVideo */
+
+            /* P4ME00003276: First 0-50% segment is dedicated to state :
+               M4VSS3GPP_kAudioMixingState_VIDEO */
+            *pProgress = (M4OSA_UInt8)(50 * (pC->ewc.WriterVideoAU.CTS)
+                / pC->pInputClipCtxt->pVideoStream->
+                m_basicProperties.m_duration);
+
+            /**
+            * There may be no audio track (Remove audio track feature).
+            * In that case we double the current percentage */
+            if( M4SYS_kAudioUnknown == pC->ewc.WriterAudioStream.streamType )
+            {
+                ( *pProgress) <<= 1; /**< x2 */
+            }
+            else if( *pProgress >= 50 )
+            {
+                *pProgress =
+                    49; /**< Video processing is not greater than 50% */
+            }
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                if( pC->bHasAudio )
+                {
+                    /**
+                    * Video is over, state transition to audio and return OK */
+                    if( pC->iAddCts > 0 )
+                        pC->State =
+                        M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT;
+                    else
+                        pC->State =
+                        M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+                }
+                else
+                {
+                    /**
+                    * No audio, state transition to FINISHED */
+                    pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+                }
+
+                return M4NO_ERROR;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepVideo returns 0x%x!",
+                    err);
+                return err;
+            }
+            else
+            {
+                return M4NO_ERROR;
+            }
+            break;
+
+        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+            if( pC->pAddedClipCtxt->iAudioFrameCts
+                != -pC->pAddedClipCtxt->iSilenceFrameDuration
+                && (pC->pAddedClipCtxt->iAudioFrameCts - 0.5)
+                / pC->pAddedClipCtxt->scale_audio > pC->uiEndLoop
+                && pC->uiEndLoop > 0 )
+            {
+            if(pC->bLoop == M4OSA_FALSE)
+            {
+                pC->bNoLooping = M4OSA_TRUE;
+            }
+            else
+            {
+                M4OSA_Int32 jumpCTS = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                    pC->pAddedClipCtxt->pReaderContext,
+                    (M4_StreamHandler *)pC->pAddedClipCtxt->
+                    pAudioStream, &jumpCTS);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_audioMixingStep: error when jumping in added audio clip: 0x%x",
+                        err);
+                    return err;
+                }
+                /**
+                * Use offset to give a correct CTS ... */
+                pC->pAddedClipCtxt->iAoffset =
+                    (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+            }
+
+            }
+
+            if( M4OSA_FALSE == pC->bRemoveOriginal )
+            {
+                err = M4VSS3GPP_intAudioMixingStepAudioMix(pC);
+            }
+            else
+            {
+                err = M4VSS3GPP_intAudioMixingStepAudioReplace(pC);
+            }
+
+            /**
+            * Compute the progress percentage
+            * Note: audio and video CTS are not initialized before
+            * the call of M4VSS3GPP_intAudioMixingStepAudio */
+            if( 0 != pC->ewc.iOutputDuration )
+            {
+                /* P4ME00003276: Second 50-100% segment is dedicated to states :
+                M4VSS3GPP_kAudioMixingState_AUDIO... */
+                /* For Audio the progress computation is based on dAto and offset,
+                   it is more accurate */
+                *pProgress = (M4OSA_UInt8)(50
+                    + (50 * pC->ewc.dATo - pC->pInputClipCtxt->iVoffset)
+                    / (pC->ewc.iOutputDuration)); /**< 50 for 100/2 **/
+
+                if( *pProgress >= 100 )
+                {
+                    *pProgress =
+                        99; /**< It's not really finished, I prefer to return less than 100% */
+                }
+            }
+            else
+            {
+                *pProgress = 99;
+            }
+
+            if( M4WAR_NO_MORE_AU == err )
+            {
+                /**
+                * Audio is over, state transition to FINISHED */
+                pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+                return M4NO_ERROR;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_audioMixingStep: M4VSS3GPP_intAudioMixingStepAudio returns 0x%x!",
+                    err);
+                return err;
+            }
+            else
+            {
+                return M4NO_ERROR;
+            }
+            break;
+
+        case M4VSS3GPP_kAudioMixingState_FINISHED:
+
+            /**
+            * Progress percentage: finalize finished -> 100% */
+            *pProgress = 100;
+
+            /**
+            * Audio mixing is finished, return correct warning */
+            return M4VSS3GPP_WAR_END_OF_AUDIO_MIXING;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingStep: State error (0x%x)! Returning M4ERR_STATE",
+                pC->State);
+            return M4ERR_STATE;
+    }
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_audioMixingCleanUp(M4VSS3GPP_AudioMixingContext pContext)
+ * @brief    Free all resources used by the VSS audio mixing operation.
+ * @note    The context is no more valid after this call
+ * @param    pContext            (IN) VSS audio mixing context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_audioMixingCleanUp( M4VSS3GPP_AudioMixingContext pContext )
+{
+    M4VSS3GPP_InternalAudioMixingContext *pC =
+        (M4VSS3GPP_InternalAudioMixingContext *)pContext;
+    M4OSA_ERR err;
+    M4OSA_UInt32 lastCTS;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_audioMixingCleanUp called with pContext=0x%x",
+        pContext);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_audioMixingCleanUp: pContext is M4OSA_NULL");
+
+    /**
+    * Check input parameter */
+    if( M4OSA_NULL == pContext )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_audioMixingCleanUp(): M4VSS3GPP_audioMixingCleanUp: pContext is\
+             M4OSA_NULL, returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    /**
+    * Close Input 3GPP file */
+    if( M4OSA_NULL != pC->pInputClipCtxt )
+    {
+        M4VSS3GPP_intClipCleanUp(pC->pInputClipCtxt);
+        pC->pInputClipCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Close Added 3GPP file */
+    if( M4OSA_NULL != pC->pAddedClipCtxt )
+    {
+        M4VSS3GPP_intClipCleanUp(pC->pAddedClipCtxt);
+        pC->pAddedClipCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Close the 3GP writer. In normal use case it has already been closed,
+      but not in abort use case */
+    if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+    {
+        /* Update last Video CTS */
+        lastCTS = pC->ewc.iOutputDuration;
+
+        err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
+            pC->ewc.p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+                err);
+        }
+
+        err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
+            pC->ewc.p3gpWriterContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: pWriterGlobalFcts->pFctCloseWrite returns 0x%x!",
+                err);
+            /**< don't return the error because we have other things to free! */
+        }
+        pC->ewc.p3gpWriterContext = M4OSA_NULL;
+    }
+
+    /**
+    * Free the Audio encoder context */
+    if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
+    {
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_audioMixingCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the ssrc stuff */
+
+    if( M4OSA_NULL != pC->SsrcScratch )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->SsrcScratch);
+        pC->SsrcScratch = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pSsrcBufferIn )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pSsrcBufferIn);
+        pC->pSsrcBufferIn = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pSsrcBufferOut
+        && (M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0) )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pSsrcBufferOut);
+        pC->pSsrcBufferOut = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->pTempBuffer )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pTempBuffer);
+        pC->pTempBuffer = M4OSA_NULL;
+    }
+
+    /**
+    * Free the shells interfaces */
+    M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
+
+    /**
+    * Free the context */
+    M4OSA_free((M4OSA_MemAddr32)pContext);
+    pContext = M4OSA_NULL;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_audioMixingCleanUp(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/******************************************************************************/
+/******************************************************************************/
+/*********                  STATIC FUNCTIONS                         **********/
+/******************************************************************************/
+/******************************************************************************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingOpen()
+ * @brief    Initializes the VSS audio mixing operation (allocates an execution context).
+ * @note
+ * @param    pContext        (OUT) Pointer on the VSS audio mixing context to allocate
+ * @param    pSettings        (IN) Pointer to valid audio mixing settings
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intAudioMixingOpen( M4VSS3GPP_InternalAudioMixingContext *pC,
+                             M4VSS3GPP_AudioMixingSettings *pSettings )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 outputASF = 0;
+    M4ENCODER_Header *encHeader;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intAudioMixingOpen called with pContext=0x%x, pSettings=0x%x",
+        pC, pSettings);
+
+    /**
+    * The Add Volume must be (strictly) superior than zero */
+    if( pSettings->uiAddVolume == 0 )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingOpen(): AddVolume is zero,\
+            returning M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
+        return M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO;
+    }
+    /*
+    else if(pSettings->uiAddVolume >= 100) // If volume is set to 100, no more original audio ...
+    {
+    pC->bRemoveOriginal = M4OSA_TRUE;
+    }
+    */
+    /**
+    * Build the input clip settings */
+    pC->InputClipSettings.pFile =
+        pSettings->pOriginalClipFile; /**< Input 3GPP file descriptor */
+    pC->InputClipSettings.FileType = M4VIDEOEDITING_kFileType_3GPP;
+    pC->InputClipSettings.uiBeginCutTime =
+        0; /**< No notion of cut for the audio mixing feature */
+    pC->InputClipSettings.uiEndCutTime =
+        0; /**< No notion of cut for the audio mixing feature */
+
+    /**
+    * Open the original Audio/Video 3GPP clip */
+    err = M4VSS3GPP_intClipInit(&pC->pInputClipCtxt, pC->pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(orig) returns 0x%x",
+            err);
+        return err;
+    }
+
+    err = M4VSS3GPP_intClipOpen(pC->pInputClipCtxt, &pC->InputClipSettings,
+        M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(orig) returns 0x%x",
+            err);
+        return err;
+    }
+
+    if( M4OSA_NULL == pC->pInputClipCtxt->pAudioStream )
+        {
+        pC->bRemoveOriginal = M4OSA_TRUE;
+        }
+    /**
+    * If there is no video, it's an error */
+    if( M4OSA_NULL == pC->pInputClipCtxt->pVideoStream )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingOpen(): no video stream in clip,\
+            returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+        return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
+    }
+
+    /**
+    * Compute clip properties */
+    err = M4VSS3GPP_intBuildAnalysis(pC->pInputClipCtxt,
+        &pC->pInputClipCtxt->pSettings->ClipProperties);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(orig) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Build the added clip settings */
+    pC->AddedClipSettings.pFile =
+        pSettings->pAddedAudioTrackFile; /**< Added file descriptor */
+    pC->AddedClipSettings.FileType = pSettings->AddedAudioFileType;
+    pC->AddedClipSettings.uiBeginCutTime =
+        0; /**< No notion of cut for the audio mixing feature */
+    pC->AddedClipSettings.uiEndCutTime   = 0;/**< No notion of cut for the audio mixing feature */
+    pC->AddedClipSettings.ClipProperties.uiNbChannels=
+        pSettings->uiNumChannels;
+    pC->AddedClipSettings.ClipProperties.uiSamplingFrequency=    pSettings->uiSamplingFrequency;
+
+    if( M4OSA_NULL != pC->AddedClipSettings.pFile )
+    {
+        /**
+        * Open the added Audio clip */
+        err = M4VSS3GPP_intClipInit(&pC->pAddedClipCtxt, pC->pOsaFileReadPtr);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipInit(added) returns 0x%x",
+                err);
+            return err;
+        }
+
+        err = M4VSS3GPP_intClipOpen(pC->pAddedClipCtxt, &pC->AddedClipSettings,
+            M4OSA_FALSE, M4OSA_FALSE, M4OSA_TRUE);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intClipOpen(added) returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * If there is no audio, it's an error */
+        if( M4OSA_NULL == pC->pAddedClipCtxt->pAudioStream )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen(): no audio nor video stream in clip,\
+                returning M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+            return M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE;
+        }
+
+        /**
+        * Compute clip properties */
+        err = M4VSS3GPP_intBuildAnalysis(pC->pAddedClipCtxt,
+            &pC->pAddedClipCtxt->pSettings->ClipProperties);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intBuildAnalysis(added) returns 0x%x",
+                err);
+            return err;
+        }
+
+        switch( pSettings->outputASF )
+        {
+            case M4VIDEOEDITING_k8000_ASF:
+                outputASF = 8000;
+                break;
+
+            case M4VIDEOEDITING_k16000_ASF:
+                outputASF = 16000;
+                break;
+
+            case M4VIDEOEDITING_k22050_ASF:
+                outputASF = 22050;
+                break;
+
+            case M4VIDEOEDITING_k24000_ASF:
+                outputASF = 24000;
+                break;
+
+            case M4VIDEOEDITING_k32000_ASF:
+                outputASF = 32000;
+                break;
+
+            case M4VIDEOEDITING_k44100_ASF:
+                outputASF = 44100;
+                break;
+
+            case M4VIDEOEDITING_k48000_ASF:
+                outputASF = 48000;
+                break;
+
+            default:
+                M4OSA_TRACE1_0("Bad parameter in output ASF ");
+                return M4ERR_PARAMETER;
+                break;
+        }
+
+        if( pC->bRemoveOriginal == M4OSA_TRUE
+            && (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+            == M4VIDEOEDITING_kMP3 || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType
+            != pSettings->outputAudioFormat
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiSamplingFrequency != outputASF
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiNbChannels
+            != pSettings->outputNBChannels) )
+        {
+
+            if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+            {
+                pSettings->outputASF = M4VIDEOEDITING_k8000_ASF;
+                pSettings->outputNBChannels = 1;
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize = 320;
+            }
+            else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+            {
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize =
+                    2048 * pSettings->outputNBChannels;
+            }
+
+            pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency =
+                outputASF;
+
+            if( outputASF != pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency )
+            {
+                /* We need to call SSRC in order to align ASF and/or nb of channels */
+                /* Moreover, audio encoder may be needed in case of audio replacing... */
+                pC->b_SSRCneeded = M4OSA_TRUE;
+            }
+
+            if( pSettings->outputNBChannels
+                < pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+            {
+                /* Stereo to Mono */
+                pC->ChannelConversion = 1;
+            }
+            else if( pSettings->outputNBChannels
+                > pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+            {
+                /* Mono to Stereo */
+                pC->ChannelConversion = 2;
+            }
+
+            pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels =
+                pSettings->outputNBChannels;
+        }
+
+        /**
+        * Check compatibility chart */
+        err = M4VSS3GPP_intAudioMixingCompatibility(pC,
+            &pC->pInputClipCtxt->pSettings->ClipProperties,
+            &pC->pAddedClipCtxt->pSettings->ClipProperties);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                M4VSS3GPP_intAudioMixingCompatibility returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * Check loop parameters */
+        if( pC->uiBeginLoop > pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiClipAudioDuration )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                begin loop time is higher than added clip audio duration");
+            return M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP;
+        }
+
+        /**
+        * Ok, let's go with this audio track */
+        pC->bHasAudio = M4OSA_TRUE;
+    }
+    else
+    {
+        /* No added file, force remove original */
+        pC->AddedClipSettings.FileType = M4VIDEOEDITING_kFileType_Unsupported;
+        pC->bRemoveOriginal = M4OSA_TRUE;
+        pC->bHasAudio = M4OSA_FALSE;
+    }
+
+    /**
+    * Copy the video properties of the input clip to the output properties */
+    pC->ewc.uiVideoBitrate =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
+    pC->ewc.uiVideoWidth =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoWidth;
+    pC->ewc.uiVideoHeight =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoHeight;
+    pC->ewc.uiVideoTimeScale =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoTimeScale;
+    pC->ewc.bVideoDataPartitioning =
+        pC->pInputClipCtxt->pSettings->ClipProperties.bMPEG4dataPartition;
+
+    switch( pC->pInputClipCtxt->pSettings->ClipProperties.VideoStreamType )
+    {
+        case M4VIDEOEDITING_kH263:
+            pC->ewc.VideoStreamType = M4SYS_kH263;
+            break;
+
+        case M4VIDEOEDITING_kMPEG4_EMP:
+            pC->ewc.bActivateEmp = M4OSA_TRUE; /* no break */
+
+        case M4VIDEOEDITING_kMPEG4:
+            pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
+            break;
+
+        case M4VIDEOEDITING_kH264:
+            pC->ewc.VideoStreamType = M4SYS_kH264;
+            break;
+
+        default:
+            pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+            break;
+    }
+
+    /* Add a link to video dsi */
+    if( M4SYS_kH264 == pC->ewc.VideoStreamType )
+    {
+
+        /* For H.264 encoder case
+        * Fetch the DSI from the shell video encoder, and feed it to the writer */
+
+        M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen: get DSI for H264 stream");
+
+        if( M4OSA_NULL == pC->ewc.pEncContext )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL");
+            err = M4VSS3GPP_intAudioMixingCreateVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen:\
+                    M4VSS3GPP_intAudioMixingCreateVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+
+        if( M4OSA_NULL != pC->ewc.pEncContext )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
+                pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
+                (M4OSA_DataOption) &encHeader);
+
+            if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen: failed to get the encoder header (err 0x%x)",
+                    err);
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_intAudioMixingOpen: encHeader->pBuf=0x%x, size=0x%x",
+                    encHeader->pBuf, encHeader->Size);
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen: send DSI for H264 stream to 3GP writer");
+
+                /**
+                * Allocate and copy the new DSI */
+                pC->ewc.pVideoOutputDsi =
+                    (M4OSA_MemAddr8)M4OSA_malloc(encHeader->Size, M4VSS3GPP,
+                    (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
+
+                if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intAudioMixingOpen():\
+                        unable to allocate pVideoOutputDsi (H264), returning M4ERR_ALLOC");
+                    return M4ERR_ALLOC;
+                }
+                pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
+                M4OSA_memcpy(pC->ewc.pVideoOutputDsi, encHeader->pBuf,
+                    encHeader->Size);
+            }
+
+            err = M4VSS3GPP_intAudioMixingDestroyVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen:\
+                    M4VSS3GPP_intAudioMixingDestroyVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen: pC->ewc.pEncContext is NULL, cannot get the DSI");
+        }
+    }
+    else
+    {
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intAudioMixingOpen: input clip video stream type = 0x%x",
+            pC->ewc.VideoStreamType);
+        pC->ewc.uiVideoOutputDsiSize =
+            (M4OSA_UInt16)pC->pInputClipCtxt->pVideoStream->
+            m_basicProperties.m_decoderSpecificInfoSize;
+        pC->ewc.pVideoOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pVideoStream->
+            m_basicProperties.m_pDecoderSpecificInfo;
+    }
+
+    /**
+    * Copy the audio properties of the added clip to the output properties */
+    if( pC->bHasAudio )
+    {
+        if( pC->bRemoveOriginal == M4OSA_TRUE )
+        {
+            pC->ewc.uiNbChannels =
+                pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
+            pC->ewc.uiAudioBitrate =
+                pC->pAddedClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
+            pC->ewc.uiSamplingFrequency = pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency;
+            pC->ewc.uiSilencePcmSize =
+                pC->pAddedClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
+            pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+            /* if output settings are differents from added clip settings,
+            we need to reencode BGM */
+            if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+                != pSettings->outputAudioFormat
+                || pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency != outputASF
+                || pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels
+                != pSettings->outputNBChannels
+                || pC->pAddedClipCtxt->pSettings->
+                ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
+            {
+                /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
+                if( pC->pAddedClipCtxt->pAudioStream->
+                    m_basicProperties.m_pDecoderSpecificInfo != M4OSA_NULL )
+                {
+
+                    /*
+                     M4OSA_free((M4OSA_MemAddr32)pC->pAddedClipCtxt->pAudioStream->\
+                       m_basicProperties.m_pDecoderSpecificInfo);
+                       */
+                    pC->pAddedClipCtxt->pAudioStream->
+                        m_basicProperties.m_decoderSpecificInfoSize = 0;
+                    pC->pAddedClipCtxt->pAudioStream->
+                        m_basicProperties.m_pDecoderSpecificInfo = M4OSA_NULL;
+                }
+
+                pC->ewc.uiNbChannels =
+                    pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+                pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
+                    ClipProperties.uiSamplingFrequency;
+                pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+                if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+                {
+                    pC->ewc.AudioStreamType = M4SYS_kAMR;
+                    pC->ewc.pSilenceFrameData =
+                        (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                    pC->ewc.uiSilenceFrameSize =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                    pC->ewc.iSilenceFrameDuration =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                    pC->ewc.uiAudioBitrate = 12200;
+                    pC->ewc.uiSamplingFrequency = 8000;
+                    pC->ewc.uiSilencePcmSize = 320;
+                    pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+                }
+                else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+                {
+                    pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+                    if( pSettings->outputAudioBitrate
+                        == M4VIDEOEDITING_kUndefinedBitrate )
+                    {
+                        switch( pC->ewc.uiSamplingFrequency )
+                        {
+                            case 16000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k24_KBPS;
+                                break;
+
+                            case 22050:
+                            case 24000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k32_KBPS;
+                                break;
+
+                            case 32000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k48_KBPS;
+                                break;
+
+                            case 44100:
+                            case 48000:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+
+                            default:
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+                        }
+
+                        if( pC->ewc.uiNbChannels == 2 )
+                        {
+                            /* Output bitrate have to be doubled */
+                            pC->ewc.uiAudioBitrate += pC->ewc.uiAudioBitrate;
+                        }
+                    }
+                    else
+                    {
+                        pC->ewc.uiAudioBitrate = pSettings->outputAudioBitrate;
+                    }
+
+                    if( pC->ewc.uiNbChannels == 1 )
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                    }
+                    else
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                    }
+                    pC->ewc.iSilenceFrameDuration =
+                        1024; /* AAC is always 1024/Freq sample duration */
+                }
+            }
+            else
+            {
+                switch( pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.AudioStreamType )
+                {
+                    case M4VIDEOEDITING_kAMR_NB:
+                        pC->ewc.AudioStreamType = M4SYS_kAMR;
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                        pC->ewc.iSilenceFrameDuration =
+                            M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                        break;
+
+                    case M4VIDEOEDITING_kAAC:
+                    case M4VIDEOEDITING_kAACplus:
+                    case M4VIDEOEDITING_keAACplus:
+                        pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+                        if( pC->ewc.uiNbChannels == 1 )
+                        {
+                            pC->ewc.pSilenceFrameData =
+                                (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                            pC->ewc.uiSilenceFrameSize =
+                                M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                        }
+                        else
+                        {
+                            pC->ewc.pSilenceFrameData =
+                                (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                            pC->ewc.uiSilenceFrameSize =
+                                M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                        }
+                        pC->ewc.iSilenceFrameDuration =
+                            1024; /* AAC is always 1024/Freq sample duration */
+                        break;
+
+                    case M4VIDEOEDITING_kEVRC:
+                        pC->ewc.AudioStreamType = M4SYS_kEVRC;
+                        pC->ewc.pSilenceFrameData = M4OSA_NULL;
+                        pC->ewc.uiSilenceFrameSize = 0;
+                        pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
+                                            (makes it easier to factorize amr and evrc code) */
+                        break;
+
+                    case M4VIDEOEDITING_kPCM:
+                        /* Set reader DSI to NULL (unknown), we will use encoder DSI later */
+                        pC->pAddedClipCtxt->pAudioStream->
+                            m_basicProperties.m_decoderSpecificInfoSize = 0;
+                        pC->pAddedClipCtxt->pAudioStream->
+                            m_basicProperties.m_pDecoderSpecificInfo =
+                            M4OSA_NULL;
+
+                        if( pC->pAddedClipCtxt->pSettings->
+                            ClipProperties.uiSamplingFrequency == 8000 )
+                        {
+                            pC->ewc.AudioStreamType = M4SYS_kAMR;
+                            pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+                                *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                            pC->ewc.uiSilenceFrameSize =
+                                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                            pC->ewc.iSilenceFrameDuration =
+                                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                            pC->ewc.uiAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+                        }
+                        else if( pC->pAddedClipCtxt->pSettings->
+                            ClipProperties.uiSamplingFrequency == 16000 )
+                        {
+                            if( pC->ewc.uiNbChannels == 1 )
+                            {
+                                pC->ewc.AudioStreamType = M4SYS_kAAC;
+                                pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+                                    *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                                pC->ewc.uiSilenceFrameSize =
+                                    M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                                pC->ewc.iSilenceFrameDuration =
+                                    1024; /* AAC is always 1024/Freq sample duration */
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k32_KBPS;
+                            }
+                            else
+                            {
+                                pC->ewc.AudioStreamType = M4SYS_kAAC;
+                                pC->ewc.pSilenceFrameData = (M4OSA_UInt8
+                                    *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                                pC->ewc.uiSilenceFrameSize =
+                                    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                                pC->ewc.iSilenceFrameDuration =
+                                    1024; /* AAC is always 1024/Freq sample duration */
+                                pC->ewc.uiAudioBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                            }
+                        }
+                        else
+                        {
+                            pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+                        }
+                        break;
+
+                    default:
+                        pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+                        break;
+                }
+            }
+
+            /* Add a link to audio dsi */
+            pC->ewc.uiAudioOutputDsiSize =
+                (M4OSA_UInt16)pC->pAddedClipCtxt->pAudioStream->
+                m_basicProperties.m_decoderSpecificInfoSize;
+            pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pAddedClipCtxt->pAudioStream->
+                m_basicProperties.m_pDecoderSpecificInfo;
+        }
+        else
+        {
+            pC->ewc.uiNbChannels =
+                pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+            pC->ewc.uiAudioBitrate =
+                pC->pInputClipCtxt->pSettings->ClipProperties.uiAudioBitrate;
+            pC->ewc.uiSamplingFrequency = pC->pInputClipCtxt->pSettings->
+                ClipProperties.uiSamplingFrequency;
+            pC->ewc.uiSilencePcmSize =
+                pC->pInputClipCtxt->pSettings->ClipProperties.uiDecodedPcmSize;
+            pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+            switch( pC->pInputClipCtxt->pSettings->
+                ClipProperties.AudioStreamType )
+            {
+                case M4VIDEOEDITING_kAMR_NB:
+                    pC->ewc.AudioStreamType = M4SYS_kAMR;
+                    pC->ewc.pSilenceFrameData =
+                        (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+                    pC->ewc.uiSilenceFrameSize =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+                    pC->ewc.iSilenceFrameDuration =
+                        M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+                    break;
+
+                case M4VIDEOEDITING_kAAC:
+                case M4VIDEOEDITING_kAACplus:
+                case M4VIDEOEDITING_keAACplus:
+                    pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+                    if( pC->ewc.uiNbChannels == 1 )
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                    }
+                    else
+                    {
+                        pC->ewc.pSilenceFrameData =
+                            (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                        pC->ewc.uiSilenceFrameSize =
+                            M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                    }
+                    pC->ewc.iSilenceFrameDuration =
+                        1024; /* AAC is always 1024/Freq sample duration */
+                    break;
+
+                default:
+                    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intAudioMixingOpen: No audio track in input file.");
+                    return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+                    break;
+            }
+
+            /* Add a link to audio dsi */
+            pC->ewc.uiAudioOutputDsiSize =
+                (M4OSA_UInt16)pC->pInputClipCtxt->pAudioStream->
+                m_basicProperties.m_decoderSpecificInfoSize;
+            pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)pC->pInputClipCtxt->pAudioStream->
+                m_basicProperties.m_pDecoderSpecificInfo;
+        }
+    }
+
+    /**
+    * Copy common 'silence frame stuff' to ClipContext */
+    pC->pInputClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+    pC->pInputClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+    pC->pInputClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+    pC->pInputClipCtxt->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
+    pC->pInputClipCtxt->scale_audio = pC->ewc.scale_audio;
+
+    pC->pInputClipCtxt->iAudioFrameCts =
+        -pC->pInputClipCtxt->iSilenceFrameDuration; /* Reset time */
+
+    /**
+    * Copy common 'silence frame stuff' to ClipContext */
+    if( pC->bHasAudio )
+    {
+        pC->pAddedClipCtxt->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+        pC->pAddedClipCtxt->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+        pC->pAddedClipCtxt->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+        pC->pAddedClipCtxt->iSilenceFrameDuration =
+            pC->ewc.iSilenceFrameDuration;
+        pC->pAddedClipCtxt->scale_audio = pC->ewc.scale_audio;
+
+        pC->pAddedClipCtxt->iAudioFrameCts =
+            -pC->pAddedClipCtxt->iSilenceFrameDuration; /* Reset time */
+    }
+
+    /**
+    * Check AddCts is lower than original clip duration */
+    if( ( M4OSA_NULL != pC->pInputClipCtxt->pVideoStream)
+        && (pC->iAddCts > (M4OSA_Int32)pC->pInputClipCtxt->pVideoStream->
+        m_basicProperties.m_duration) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingOpen(): uiAddCts is larger than video duration,\
+            returning M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
+        return M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION;
+    }
+
+    /**
+    * If the audio tracks are not compatible, replace input track by silence */
+    if( M4OSA_FALSE == pC->pInputClipCtxt->pSettings->
+        ClipProperties.bAudioIsCompatibleWithMasterClip )
+    {
+        M4VSS3GPP_intClipDeleteAudioTrack(pC->pInputClipCtxt);
+    }
+
+    /**
+    * Check if audio mixing is required */
+    if( ( ( pC->bHasAudio) && (M4OSA_FALSE
+        == pC->pAddedClipCtxt->pSettings->ClipProperties.bAudioIsEditable))
+        || (M4OSA_TRUE == pC->bRemoveOriginal) ) /*||
+                                                 (pSettings->uiAddVolume >= 100)) */
+    {
+        pC->bAudioMixingIsNeeded = M4OSA_FALSE;
+    }
+    else
+    {
+        pC->bAudioMixingIsNeeded = M4OSA_TRUE;
+    }
+
+    /**
+    * Check if output audio can support silence frames
+    Trick i use bAudioIsCompatibleWithMasterClip filed to store that  */
+    if( pC->bHasAudio )
+    {
+        pC->bSupportSilence = pC->pAddedClipCtxt->pSettings->
+            ClipProperties.bAudioIsCompatibleWithMasterClip;
+
+        if( M4OSA_FALSE == pC->bSupportSilence )
+        {
+            if( pC->iAddCts > 0 )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen():\
+                    iAddCts should be set to 0 with this audio track !");
+                return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
+            }
+
+            if( 0 < pC->uiEndLoop )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen():\
+                    uiEndLoop should be set to 0 with this audio track !");
+                return M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK;
+            }
+        }
+    }
+#if 0
+    /**
+    * Compute the volume factors */
+    if( (M4OSA_TRUE
+        == pC->bRemoveOriginal) )
+    {
+        /**
+        * In the remove original case, we keep only the added audio */
+        pC->fAddedFactor = 1.0F;
+        pC->fOrigFactor = 0.0F;
+    }
+    else
+    {
+        /**
+        * Compute the factor to apply to sample to do the mixing */
+        pC->fAddedFactor = pSettings->uiAddVolume / 100.0F;
+        pC->fOrigFactor = 1.0F - pC->fAddedFactor;
+    }
+#endif
+    if( pC->b_DuckingNeedeed == M4OSA_FALSE)
+    {
+        /**
+        * Compute the factor to apply to sample to do the mixing */
+        pC->fAddedFactor = 0.50F;
+        pC->fOrigFactor = 0.50F;
+    }
+
+
+    /**
+    * Check if SSRC is needed */
+    if( M4OSA_TRUE == pC->b_SSRCneeded )
+    {
+        M4OSA_UInt32 numerator, denominator, ratio, ratioBuffer;
+
+        /**
+        * Init the SSRC module */
+        SSRC_ReturnStatus_en
+            ReturnStatus; /* Function return status                       */
+        LVM_INT16 NrSamplesMin =
+            0; /* Minimal number of samples on the input or on the output */
+        LVM_INT32
+            ScratchSize; /* The size of the scratch memory               */
+        LVM_INT16
+            *pInputInScratch; /* Pointer to input in the scratch buffer       */
+        LVM_INT16
+            *
+            pOutputInScratch; /* Pointer to the output in the scratch buffer  */
+        SSRC_Params_t ssrcParams;          /* Memory for init parameters                    */
+
+        switch( pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiSamplingFrequency )
+        {
+            case 8000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_8000;
+                break;
+
+            case 11025:
+                ssrcParams.SSRC_Fs_In = LVM_FS_11025;
+                break;
+
+            case 12000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_12000;
+                break;
+
+            case 16000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_16000;
+                break;
+
+            case 22050:
+                ssrcParams.SSRC_Fs_In = LVM_FS_22050;
+                break;
+
+            case 24000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_24000;
+                break;
+
+            case 32000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_32000;
+                break;
+
+            case 44100:
+                ssrcParams.SSRC_Fs_In = LVM_FS_44100;
+                break;
+
+            case 48000:
+                ssrcParams.SSRC_Fs_In = LVM_FS_48000;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen: invalid added clip sampling frequency (%d Hz),\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM",
+                    pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiSamplingFrequency);
+                return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
+        }
+
+        if( 1 == pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels )
+        {
+            ssrcParams.SSRC_NrOfChannels = LVM_MONO;
+        }
+        else
+        {
+            ssrcParams.SSRC_NrOfChannels = LVM_STEREO;
+        }
+
+        switch( pC->ewc.uiSamplingFrequency )
+        {
+            case 8000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_8000;
+                break;
+
+            case 16000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_16000;
+                break;
+
+            case 22050:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_22050;
+                break;
+
+            case 24000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_24000;
+                break;
+
+            case 32000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_32000;
+                break;
+
+            case 44100:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_44100;
+                break;
+
+            case 48000:
+                ssrcParams.SSRC_Fs_Out = LVM_FS_48000;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingOpen: invalid output sampling frequency (%d Hz),\
+                    returning M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED",
+                    pC->ewc.uiSamplingFrequency);
+                return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+                break;
+        }
+        ReturnStatus = 0;
+
+        switch (ssrcParams.SSRC_Fs_In){
+        case LVM_FS_8000:
+            ssrcParams.NrSamplesIn = 320;
+            break;
+        case LVM_FS_11025:
+            ssrcParams.NrSamplesIn =441;
+            break;
+        case LVM_FS_12000:
+            ssrcParams.NrSamplesIn =    480;
+            break;
+        case LVM_FS_16000:
+            ssrcParams.NrSamplesIn =    640;
+            break;
+        case LVM_FS_22050:
+            ssrcParams.NrSamplesIn =    882;
+            break;
+        case LVM_FS_24000:
+            ssrcParams.NrSamplesIn =    960;
+            break;
+        case LVM_FS_32000:
+            ssrcParams.NrSamplesIn = 1280;
+            break;
+        case LVM_FS_44100:
+            ssrcParams.NrSamplesIn = 1764;
+            break;
+        case LVM_FS_48000:
+            ssrcParams.NrSamplesIn = 1920;
+            break;
+        default:
+            ReturnStatus = -1;
+            break;
+        }
+
+        switch (ssrcParams.SSRC_Fs_Out){
+        case LVM_FS_8000:
+            ssrcParams.NrSamplesOut= 320;
+            break;
+        case LVM_FS_11025:
+            ssrcParams.NrSamplesOut =441;
+            break;
+        case LVM_FS_12000:
+            ssrcParams.NrSamplesOut=    480;
+            break;
+        case LVM_FS_16000:
+            ssrcParams.NrSamplesOut=    640;
+            break;
+        case LVM_FS_22050:
+            ssrcParams.NrSamplesOut=    882;
+            break;
+        case LVM_FS_24000:
+            ssrcParams.NrSamplesOut=    960;
+            break;
+        case LVM_FS_32000:
+            ssrcParams.NrSamplesOut = 1280;
+            break;
+        case LVM_FS_44100:
+            ssrcParams.NrSamplesOut= 1764;
+            break;
+        case LVM_FS_48000:
+            ssrcParams.NrSamplesOut = 1920;
+            break;
+        default:
+            ReturnStatus = -1;
+            break;
+        }
+        if( ReturnStatus != SSRC_OK )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen:\
+                Error code %d returned by the SSRC_GetNrSamples function",
+                ReturnStatus);
+            return M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED;
+        }
+
+        NrSamplesMin =
+            (LVM_INT16)((ssrcParams.NrSamplesIn > ssrcParams.NrSamplesOut)
+            ? ssrcParams.NrSamplesOut : ssrcParams.NrSamplesIn);
+
+        while( NrSamplesMin < M4VSS_SSRC_MINBLOCKSIZE )
+        { /* Don't take blocks smaller that the minimal block size */
+            ssrcParams.NrSamplesIn = (LVM_INT16)(ssrcParams.NrSamplesIn << 1);
+            ssrcParams.NrSamplesOut = (LVM_INT16)(ssrcParams.NrSamplesOut << 1);
+            NrSamplesMin = (LVM_INT16)(NrSamplesMin << 1);
+        }
+        pC->iSsrcNbSamplIn = (LVM_INT16)(
+            ssrcParams.
+            NrSamplesIn); /* multiplication by NrOfChannels is done below */
+        pC->iSsrcNbSamplOut = (LVM_INT16)(ssrcParams.NrSamplesOut);
+
+        numerator =
+            pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+            * pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels;
+        denominator =
+            pC->pInputClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+            * pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+
+        if( numerator % denominator == 0 )
+        {
+            ratioBuffer = (M4OSA_UInt32)(numerator / denominator);
+        }
+        else
+        {
+            ratioBuffer = (M4OSA_UInt32)(numerator / denominator) + 1;
+        }
+
+        ratio =
+            (M4OSA_UInt32)(( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+            * ratioBuffer) / (pC->iSsrcNbSamplIn * sizeof(short)
+            * pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiNbChannels));
+
+        if( ratio == 0 )
+        {
+            /* It means that the input size of SSRC bufferIn is bigger than the asked buffer */
+            pC->minimumBufferIn = pC->iSsrcNbSamplIn * sizeof(short)
+                * pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels;
+        }
+        else
+        {
+            ratio++; /* We use the immediate superior integer */
+            pC->minimumBufferIn = ratio * (pC->iSsrcNbSamplIn * sizeof(short)
+                * pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels);
+        }
+
+        /**
+        * Allocate buffer for the input of the SSRC */
+        pC->pSsrcBufferIn =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->minimumBufferIn
+            + pC->pAddedClipCtxt->
+            AudioDecBufferOut.
+            m_bufferSize,
+            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
+
+        if( M4OSA_NULL == pC->pSsrcBufferIn )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+        /**
+        * Allocate buffer for the output of the SSRC */
+        /* The "3" value below should be optimized ... one day ... */
+        pC->pSsrcBufferOut =
+            (M4OSA_MemAddr8)M4OSA_malloc(3 * pC->iSsrcNbSamplOut * sizeof(short)
+            * pC->ewc.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+        if( M4OSA_NULL == pC->pSsrcBufferOut )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+
+        /**
+        * Allocate temporary buffer needed in case of channel conversion */
+        if( pC->ChannelConversion > 0 )
+        {
+            /* The "3" value below should be optimized ... one day ... */
+            pC->pTempBuffer =
+                (M4OSA_MemAddr8)M4OSA_malloc(3 * pC->iSsrcNbSamplOut
+                * sizeof(short) * pC->pAddedClipCtxt->pSettings->
+                ClipProperties.uiNbChannels, M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+            if( M4OSA_NULL == pC->pTempBuffer )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingOpen():\
+                    unable to allocate pTempBuffer, returning M4ERR_ALLOC");
+                return M4ERR_ALLOC;
+            }
+            pC->pPosInTempBuffer = pC->pTempBuffer;
+        }
+    }
+    else if( pC->ChannelConversion > 0 )
+    {
+        pC->minimumBufferIn =
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+
+        /**
+        * Allocate buffer for the input of the SSRC */
+        pC->pSsrcBufferIn =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->minimumBufferIn
+            + pC->pAddedClipCtxt->
+            AudioDecBufferOut.
+            m_bufferSize,
+            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferIn");
+
+        if( M4OSA_NULL == pC->pSsrcBufferIn )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen(): \
+                unable to allocate pSsrcBufferIn, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+        /**
+        * Allocate buffer for the output of the SSRC */
+        /* The "3" value below should be optimized ... one day ... */
+        pC->pSsrcBufferOut = (M4OSA_MemAddr8)M4OSA_malloc(
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize,
+            M4VSS3GPP, (M4OSA_Char *)"pSsrcBufferOut");
+
+        if( M4OSA_NULL == pC->pSsrcBufferOut )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen():\
+                unable to allocate pSsrcBufferOut, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferOut = pC->pSsrcBufferOut;
+    }
+    else if( (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3)||
+         (pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM))
+    {
+        M4OSA_UInt32 minbuffer = 0;
+
+        if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAAC )
+        {
+            pC->minimumBufferIn = 2048 * pC->ewc.uiNbChannels;
+            minbuffer = pC->minimumBufferIn;
+        }
+        else if( pSettings->outputAudioFormat == M4VIDEOEDITING_kAMR_NB )
+        {
+            pC->minimumBufferIn = 320;
+
+            if( pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize > 320 )
+            {
+                minbuffer = pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+            }
+            else
+            {
+                minbuffer = pC->minimumBufferIn; /* Not really possible ...*/
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0("Bad output audio format, in case of MP3 replacing");
+            return M4ERR_PARAMETER;
+        }
+
+        /**
+        * Allocate buffer for the input of the SSRC */
+        pC->pSsrcBufferIn =
+            (M4OSA_MemAddr8)M4OSA_malloc(2 * minbuffer, M4VSS3GPP,
+            (M4OSA_Char *)"pSsrcBufferIn");
+
+        if( M4OSA_NULL == pC->pSsrcBufferIn )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingOpen(): unable to allocate pSsrcBufferIn,\
+                returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->pPosInSsrcBufferIn = (M4OSA_MemAddr8)pC->pSsrcBufferIn;
+
+        pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
+        pC->pSsrcBufferOut = pC->pSsrcBufferIn;
+    }
+
+    /**
+    * Check if audio encoder is needed to do audio mixing or audio resampling */
+    if( M4OSA_TRUE == pC->bAudioMixingIsNeeded || M4VIDEOEDITING_kPCM
+        == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        || M4VIDEOEDITING_kMP3
+        == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        != pSettings->outputAudioFormat
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.uiSamplingFrequency
+        != outputASF
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels
+        != pSettings->outputNBChannels )
+    {
+        /**
+        * Init the audio encoder */
+        err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
+            pC->ewc.uiAudioBitrate);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreateAudioEncoder() returns 0x%x",
+                err);
+            return err;
+        }
+
+        /* In case of PCM, MP3 or audio replace with reencoding, use encoder DSI */
+        if( pC->ewc.uiAudioOutputDsiSize == 0 && (M4VIDEOEDITING_kPCM
+            == pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+            || M4VIDEOEDITING_kMP3 == pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.AudioStreamType
+            != pSettings->outputAudioFormat
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiSamplingFrequency != outputASF
+            || pC->pAddedClipCtxt->pSettings->
+            ClipProperties.uiNbChannels
+            != pSettings->outputNBChannels) )
+        {
+            pC->ewc.uiAudioOutputDsiSize =
+                (M4OSA_UInt16)pC->ewc.pAudioEncDSI.infoSize;
+            pC->ewc.pAudioOutputDsi = pC->ewc.pAudioEncDSI.pInfo;
+        }
+    }
+
+    /**
+    * Init the output 3GPP file */
+    /*11/12/2008 CR3283 add the max output file size for the MMS use case in VideoArtist*/
+    err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+        pC->pOsaFileWritPtr, pSettings->pOutputClipFile,
+        pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingOpen(): M4VSS3GPP_intCreate3GPPOutputFile() returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingOpen(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence()
+ * @brief    Write an audio silence frame into the writer
+ * @note    Mainly used when padding with silence
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingWriteSilence(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingWriteSilence:\
+         pWriterDataFcts->pStartAU(audio) returns 0x%x!", err);
+        return err;
+    }
+
+    M4OSA_TRACE2_0("A #### silence AU");
+
+    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+        (M4OSA_MemAddr8)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+
+    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+    pC->ewc.WriterAudioAU.CTS =
+        (M4OSA_Time)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+    M4OSA_TRACE2_2("B ---- write : cts  = %ld [ 0x%x ]",
+        (M4OSA_Int32)(pC->ewc.dATo), pC->ewc.WriterAudioAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingWriteSilence:\
+            pWriterDataFcts->pProcessAU(silence) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Perform one step of video.
+ * @note
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepVideo(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt16 offset;
+
+    M4OSA_TRACE2_3("  VIDEO step : dVTo = %f  state = %d  offset = %ld",
+        pC->ewc.dOutputVidCts, pC->State, pC->pInputClipCtxt->iVoffset);
+
+    /**
+    * Read the input video AU */
+    err = pC->pInputClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+        pC->pInputClipCtxt->pReaderContext,
+        (M4_StreamHandler *)pC->pInputClipCtxt->pVideoStream,
+        &pC->pInputClipCtxt->VideoAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intAudioMixingStepVideo(): m_pFctGetNextAu(video) returns 0x%x",
+            err);
+        return err;
+    }
+
+    M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %ld [ 0x%x ]",
+        pC->pInputClipCtxt->VideoAU.m_CTS, pC->pInputClipCtxt->iVoffset,
+        pC->pInputClipCtxt->VideoAU.m_size);
+
+    /**
+    * Get the output AU to write into */
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    offset = 0;
+    /* for h.264 stream do not read the 1st 4 bytes as they are header indicators */
+    if( pC->pInputClipCtxt->pVideoStream->m_basicProperties.m_streamType
+        == M4DA_StreamTypeVideoMpeg4Avc )
+    {
+        M4OSA_TRACE3_0(
+            "M4VSS3GPP_intAudioMixingStepVideo(): input stream type H264");
+        offset = 4;
+    }
+    pC->pInputClipCtxt->VideoAU.m_size  -=  offset;
+    /**
+    * Check that the video AU is not larger than expected */
+    if( pC->pInputClipCtxt->VideoAU.m_size > pC->ewc.uiVideoMaxAuSize )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_intAudioMixingStepVideo: AU size greater than MaxAuSize (%d>%d)!\
+            returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
+            pC->pInputClipCtxt->VideoAU.m_size, pC->ewc.uiVideoMaxAuSize);
+        return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
+    }
+
+    /**
+    * Copy the input AU payload to the output AU */
+    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterVideoAU.dataAddress,
+        (M4OSA_MemAddr8)(pC->pInputClipCtxt->VideoAU.m_dataAddress + offset),
+        (pC->pInputClipCtxt->VideoAU.m_size));
+
+    /**
+    * Copy the input AU parameters to the output AU */
+    pC->ewc.WriterVideoAU.size = pC->pInputClipCtxt->VideoAU.m_size;
+    pC->ewc.WriterVideoAU.CTS =
+        (M4OSA_UInt32)(pC->pInputClipCtxt->VideoAU.m_CTS + 0.5);
+    pC->ewc.WriterVideoAU.attribute = pC->pInputClipCtxt->VideoAU.m_attribute;
+
+    /**
+    * Write the AU */
+    M4OSA_TRACE2_2("D ---- write : cts  = %lu [ 0x%x ]",
+        pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, &pC->ewc.WriterVideoAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingStepVideo: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingStepVideo(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Perform one step of audio.
+ * @note
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioMix(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_3("  AUDIO mix  : dATo = %f  state = %d  offset = %ld",
+        pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
+
+    switch( pC->State )
+    {
+        /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+            {
+                err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                        M4VSS3GPP_intAudioMixingCopyOrig(1) returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the AddCts */
+                if( pC->ewc.dATo >= pC->iAddCts )
+                {
+                    /**
+                    * First segment is over, state transition to second and return OK */
+                    pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+
+                    /* Transition from reading state to encoding state */
+                    err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Return with no error so the step function will be called again */
+                    pC->pAddedClipCtxt->iAoffset =
+                        (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+                    M4OSA_TRACE2_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR (1->2)");
+
+                    return M4NO_ERROR;
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+            {
+                if( M4OSA_TRUE == pC->bAudioMixingIsNeeded ) /**< Mix */
+                {
+                    /**
+                    * Read the added audio AU */
+                    if( pC->ChannelConversion > 0 || pC->b_SSRCneeded == M4OSA_TRUE
+                        || pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 )
+                    {
+                        /* In case of sampling freq conversion and/or channel conversion,
+                           the read next AU will be    called by the
+                           M4VSS3GPP_intAudioMixingDoMixing function */
+                    }
+                    else
+                    {
+                        err =
+                            M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+
+                        M4OSA_TRACE2_3("E .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pAddedClipCtxt->iAudioFrameCts
+                            / pC->pAddedClipCtxt->scale_audio,
+                            pC->pAddedClipCtxt->iAoffset
+                            / pC->pAddedClipCtxt->scale_audio,
+                            pC->pAddedClipCtxt->uiAudioFrameSize);
+
+                        if( M4WAR_NO_MORE_AU == err )
+                        {
+                            /**
+                            * Decide what to do when audio is over */
+                            if( pC->uiEndLoop > 0 )
+                            {
+                                /**
+                                * Jump at the Begin loop time */
+                                M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->
+                                    m_pFctJump(
+                                    pC->pAddedClipCtxt->pReaderContext,
+                                    (M4_StreamHandler
+                                    *)pC->pAddedClipCtxt->pAudioStream,
+                                    &time);
+
+                                if( M4NO_ERROR != err )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                        m_pReader->m_pFctJump(audio returns 0x%x",
+                                        err);
+                                    return err;
+                                }
+                            }
+                            else
+                            {
+                                /* Transition from encoding state to reading state */
+                                err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                                if( M4NO_ERROR != err )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                        pre-encode fails err = 0x%x",
+                                        err);
+                                    return err;
+                                }
+
+                                /**
+                                * Second segment is over, state transition to third and
+                                 return OK */
+                                pC->State =
+                                    M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                                /**
+                                * Return with no error so the step function will be
+                                 called again */
+                                M4OSA_TRACE2_0(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    returning M4NO_ERROR (2->3) a");
+                                return M4NO_ERROR;
+                            }
+                        }
+                        else if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                m_pFctGetNextAu(audio) returns 0x%x",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Read the original audio AU */
+                    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+                    M4OSA_TRACE2_3("F .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                        pC->pInputClipCtxt->iAudioFrameCts
+                        / pC->pInputClipCtxt->scale_audio,
+                        pC->pInputClipCtxt->iAoffset
+                        / pC->pInputClipCtxt->scale_audio,
+                        pC->pInputClipCtxt->uiAudioFrameSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE3_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                            m_pFctGetNextAu(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    if( pC->ChannelConversion == 0
+                        && pC->b_SSRCneeded == M4OSA_FALSE
+                        && pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.AudioStreamType != M4VIDEOEDITING_kMP3 )
+                    {
+                        /**
+                        * Get the output AU to write into */
+                        err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                            pC->ewc.p3gpWriterContext,
+                            M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                            &pC->ewc.WriterAudioAU);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Perform the audio mixing */
+                    err = M4VSS3GPP_intAudioMixingDoMixing(pC);
+
+                    if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+                    {
+                        return M4NO_ERROR;
+                    }
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                            M4VSS3GPP_intAudioMixingDoMixing returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< No mix, just copy added audio */
+                {
+                    err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
+
+                    if( M4WAR_NO_MORE_AU == err )
+                    {
+                        /**
+                        * Decide what to do when audio is over */
+                        if( pC->uiEndLoop > 0 )
+                        {
+                            /**
+                            * Jump at the Begin loop time */
+                            M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                            err =
+                                pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                                pC->pAddedClipCtxt->pReaderContext,
+                                (M4_StreamHandler
+                                *)pC->pAddedClipCtxt->pAudioStream,
+                                &time);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    m_pReader->m_pFctJump(audio returns 0x%x",
+                                    err);
+                                return err;
+                            }
+
+                            /**
+                            * 'BZZZ' bug fix:
+                            * add a silence frame */
+                            err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                                    err);
+                                return err;
+                            }
+
+                            /**
+                            * Return with no error so the step function will be called again to
+                              read audio data */
+                            pC->pAddedClipCtxt->iAoffset =
+                                (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio
+                                + 0.5);
+
+                            M4OSA_TRACE2_0(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    returning M4NO_ERROR (loop)");
+                            return M4NO_ERROR;
+                        }
+                        else
+                        {
+                            /* Transition to begin cut */
+                            err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                    pre-encode fails err = 0x%x",
+                                    err);
+                                return err;
+                            }
+
+                            /**
+                            * Second segment is over, state transition to third */
+                            pC->State =
+                                M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                            /**
+                            * Return with no error so the step function will be called again */
+                            M4OSA_TRACE2_0(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                returning M4NO_ERROR (2->3) b");
+                            return M4NO_ERROR;
+                        }
+                    }
+                    else if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                            M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Check if we reached the end of the video */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix(): Video duration reached,\
+                        returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+            {
+                err = M4VSS3GPP_intAudioMixingCopyOrig(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                        M4VSS3GPP_intAudioMixingCopyOrig(3) returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the end of the video */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                        Video duration reached, returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+       default:
+            break;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingStepAudioMix(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Perform one step of audio.
+ * @note
+ * @param    pC            (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingStepAudioReplace(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4OSA_TRACE2_3("  AUDIO repl : dATo = %f  state = %d  offset = %ld",
+        pC->ewc.dATo, pC->State, pC->pInputClipCtxt->iAoffset);
+
+    switch( pC->State )
+    {
+        /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_FIRST_SEGMENT:
+            {
+                /**
+                * Replace the SID (silence) payload in the writer AU */
+                err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                        M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the AddCts */
+                if( pC->ewc.dATo >= pC->iAddCts )
+                {
+                    /**
+                    * First segment is over, state transition to second and return OK */
+                    pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT;
+
+                    /**
+                    * Return with no error so the step function will be called again */
+                    pC->pAddedClipCtxt->iAoffset =
+                        (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+                    M4OSA_TRACE2_0("M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                         returning M4NO_ERROR (1->2)");
+                    return M4NO_ERROR;
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_SECOND_SEGMENT:
+            {
+                err = M4VSS3GPP_intAudioMixingCopyAdded(pC);
+
+                if( M4WAR_NO_MORE_AU == err )
+                {
+                    /**
+                    * Decide what to do when audio is over */
+
+                    if( pC->uiEndLoop > 0 )
+                    {
+                        /**
+                        * Jump at the Begin loop time */
+                        M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                        err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                            pC->pAddedClipCtxt->pReaderContext,
+                            (M4_StreamHandler
+                            *)pC->pAddedClipCtxt->pAudioStream, &time);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                                m_pReader->m_pFctJump(audio returns 0x%x",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * 'BZZZ' bug fix:
+                        * add a silence frame */
+                        err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                                M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * Return with no error so the step function will be called again to
+                          read audio data */
+                        pC->pAddedClipCtxt->iAoffset =
+                            (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+                        M4OSA_TRACE2_0(
+                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                            returning M4NO_ERROR (loop)");
+
+                        return M4NO_ERROR;
+                    }
+                    else if( M4OSA_TRUE == pC->bSupportSilence )
+                    {
+                        /**
+                        * Second segment is over, state transition to third and return OK */
+                        pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                        /**
+                        * Return with no error so the step function will be called again */
+                        M4OSA_TRACE2_0(
+                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                                 returning M4NO_ERROR (2->3)");
+                        return M4NO_ERROR;
+                    }
+                    else
+                    {
+                        /**
+                        * The third segment (silence) is only done if supported.
+                        * In other case, we finish here. */
+                        pC->State = M4VSS3GPP_kAudioMixingState_FINISHED;
+
+                        /**
+                        * Return with no error so the step function will be called again */
+                        M4OSA_TRACE2_0(
+                            "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                                 returning M4NO_ERROR (2->F)");
+                        return M4NO_ERROR;
+                    }
+                }
+                else if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                        M4VSS3GPP_intAudioMixingCopyOrig(2) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the end of the clip */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioReplace(): Clip duration reached,\
+                        returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+
+            /**********************************************************/
+        case M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT:
+            {
+                /**
+                * Replace the SID (silence) payload in the writer AU */
+                err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix():\
+                        M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check if we reached the end of the video */
+                if( pC->ewc.dATo >= pC->ewc.iOutputDuration )
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intAudioMixingStepAudioReplace():\
+                        Video duration reached, returning M4WAR_NO_MORE_AU");
+                    return M4WAR_NO_MORE_AU; /**< Simulate end of file error */
+                }
+            }
+            break;
+        default:
+            break;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingStepAudioReplace(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Read one AU from the original audio file and write it to the output
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyOrig(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Read the input original audio AU */
+    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+    M4OSA_TRACE2_3("G .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+        pC->pInputClipCtxt->iAudioFrameCts / pC->pInputClipCtxt->scale_audio,
+        pC->pInputClipCtxt->iAoffset / pC->pInputClipCtxt->scale_audio,
+        pC->pInputClipCtxt->uiAudioFrameSize);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intAudioMixingCopyOrig(): m_pFctGetNextAu(audio) returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Get the output AU to write into */
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Copy the input AU properties to the output AU */
+    pC->ewc.WriterAudioAU.size = pC->pInputClipCtxt->uiAudioFrameSize;
+    pC->ewc.WriterAudioAU.CTS =
+        pC->pInputClipCtxt->iAudioFrameCts + pC->pInputClipCtxt->iAoffset;
+
+    /**
+    * Copy the AU itself */
+    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+        pC->pInputClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+
+    /**
+    * Write the mixed AU */
+    M4OSA_TRACE2_2("H ---- write : cts  = %ld [ 0x%x ]",
+        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+        pC->ewc.WriterAudioAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCopyOrig: pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Increment the audio CTS for the next step */
+    pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyOrig(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Read one AU from the added audio file and write it to the output
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCopyAdded(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    if(pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kMP3 ||
+        pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType == M4VIDEOEDITING_kPCM ||
+        pC->b_SSRCneeded == M4OSA_TRUE ||
+        pC->ChannelConversion > 0)
+    {
+        M4ENCODER_AudioBuffer pEncInBuffer; /**< Encoder input buffer for api */
+        M4ENCODER_AudioBuffer
+            pEncOutBuffer; /**< Encoder output buffer for api */
+        M4OSA_Time
+            frameTimeDelta; /**< Duration of the encoded (then written) data */
+        M4OSA_MemAddr8 tempPosBuffer;
+
+        err = M4VSS3GPP_intAudioMixingConvert(pC);
+
+        if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+        {
+            M4OSA_TRACE2_0(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                M4VSS3GPP_intAudioMixingConvert end of added file");
+            return M4NO_ERROR;
+        }
+        else if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingCopyAdded:\
+                M4VSS3GPP_intAudioMixingConvert returned 0x%x", err);
+            return err;
+        }
+
+        /**
+        * Get the output AU to write into */
+        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /* [Mono] or [Stereo interleaved] : all is in one buffer */
+        pEncInBuffer.pTableBuffer[0] = pC->pSsrcBufferOut;
+        pEncInBuffer.pTableBufferSize[0] =
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+        pEncInBuffer.pTableBufferSize[1] = 0;
+
+        /* Time in ms from data size, because it is PCM16 samples */
+        frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+            / pC->ewc.uiNbChannels;
+
+        /**
+        * Prepare output buffer */
+        pEncOutBuffer.pTableBuffer[0] =
+            (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+        pEncOutBuffer.pTableBufferSize[0] = 0;
+
+        M4OSA_TRACE2_0("K **** blend AUs");
+#if 0
+
+        {
+            M4OSA_Char filename[13];
+            M4OSA_Context pGIFFileInDebug = M4OSA_NULL;
+            M4OSA_FilePosition pos = 0;
+
+            sprintf(filename, "toto.pcm");
+
+            err = pC->pOsaFileWritPtr->openWrite(&pGIFFileInDebug, filename,
+                M4OSA_kFileWrite | M4OSA_kFileAppend);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't open input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+
+            err = pC->pOsaFileWritPtr->seek(pGIFFileInDebug, M4OSA_kFileSeekEnd,
+                &pos);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't seek input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+
+            err = pC->pOsaFileWritPtr->writeData(pGIFFileInDebug,
+                pC->pSsrcBufferOut,
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't write input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+
+            err = pC->pOsaFileWritPtr->closeWrite(pGIFFileInDebug);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't close input gif file %s, error: 0x%x\n",
+                    pFile, err);
+                return err;
+            }
+        }
+
+#endif
+        /**
+        * Encode the PCM audio */
+
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+            pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing():\
+                pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * Set AU cts and size */
+        pC->ewc.WriterAudioAU.size =
+            pEncOutBuffer.
+            pTableBufferSize[0]; /**< Get the size of encoded data */
+        pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+        /* Update decoded buffer here */
+        if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
+        {
+            tempPosBuffer = pC->pSsrcBufferOut
+                + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+            M4OSA_memmove(pC->pSsrcBufferOut, tempPosBuffer,
+                pC->pPosInSsrcBufferOut - tempPosBuffer);
+            pC->pPosInSsrcBufferOut -=
+                pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        }
+        else
+        {
+            tempPosBuffer = pC->pSsrcBufferIn + pC->minimumBufferIn;
+            M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                pC->pPosInSsrcBufferIn - tempPosBuffer);
+            pC->pPosInSsrcBufferIn -= pC->minimumBufferIn;
+        }
+
+        /**
+        * Write the mixed AU */
+        M4OSA_TRACE2_2("J ---- write : cts  = %ld [ 0x%x ]",
+            (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+            pC->ewc.WriterAudioAU.size);
+
+        err =
+            pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Increment the audio CTS for the next step */
+        pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
+    }
+    else
+    {
+        /**
+        * Read the added audio AU */
+        err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+
+        M4OSA_TRACE2_3("I .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+            pC->pAddedClipCtxt->iAudioFrameCts
+            / pC->pAddedClipCtxt->scale_audio,
+            pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
+            pC->pAddedClipCtxt->uiAudioFrameSize);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded(): m_pFctGetNextAu(audio) returns 0x%x",
+                err);
+            return err;
+        }
+
+        /**
+        * Get the output AU to write into */
+        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Copy the input AU properties to the output AU */
+
+        /** THE CHECK BELOW IS ADDED TO PREVENT ISSUES LINKED TO PRE-ALLOCATED MAX AU SIZE
+        max AU size is set based on M4VSS3GPP_AUDIO_MAX_AU_SIZE defined in file
+        M4VSS3GPP_InternalConfig.h, If this error occurs increase the limit set in this file
+        */
+        if( pC->pAddedClipCtxt->uiAudioFrameSize > pC->ewc.WriterAudioAU.size )
+        {
+            M4OSA_TRACE1_2(
+                "ERROR: audio AU size (%d) to copy larger than allocated one (%d) => abort",
+                pC->pAddedClipCtxt->uiAudioFrameSize,
+                pC->ewc.WriterAudioAU.size);
+            M4OSA_TRACE1_0(
+                "PLEASE CONTACT SUPPORT TO EXTEND MAX AU SIZE IN THE PRODUCT LIBRARY");
+            err = M4ERR_UNSUPPORTED_MEDIA_TYPE;
+            return err;
+        }
+        pC->ewc.WriterAudioAU.size = pC->pAddedClipCtxt->uiAudioFrameSize;
+        pC->ewc.WriterAudioAU.CTS =
+            pC->pAddedClipCtxt->iAudioFrameCts + pC->pAddedClipCtxt->iAoffset;
+
+        /**
+        * Copy the AU itself */
+        M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+            pC->pAddedClipCtxt->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+
+        /**
+        * Write the mixed AU */
+        M4OSA_TRACE2_2("J ---- write : cts  = %ld [ 0x%x ]",
+            (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+            pC->ewc.WriterAudioAU.size);
+
+        err =
+            pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCopyAdded:\
+                pWriterDataFcts->pProcessAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Increment the audio CTS for the next step */
+        pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingCopyAdded(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intAudioMixingConvert(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Convert PCM of added track to the right ASF / nb of Channels
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingConvert(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    int ssrcErr; /**< Error while ssrc processing */
+    M4OSA_UInt32 uiChannelConvertorNbSamples =
+        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short)
+        / pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+    M4OSA_MemAddr8 tempPosBuffer;
+
+    M4OSA_UInt32 outFrameCount = uiChannelConvertorNbSamples;
+    /* Do we need to feed SSRC buffer In ? */
+    /**
+    * RC: This is not really optimum (memmove). We should handle this with linked list. */
+    while( pC->pPosInSsrcBufferIn - pC->pSsrcBufferIn < (M4OSA_Int32)pC->minimumBufferIn )
+    {
+        /* We need to get more PCM data */
+        if (pC->bNoLooping == M4OSA_TRUE)
+        {
+            err = M4WAR_NO_MORE_AU;
+        }
+        else
+        {
+        err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pAddedClipCtxt);
+        }
+        if(pC->bjumpflag)
+        {
+        /**
+            * Jump at the Begin loop time */
+            M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+            err =
+                pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump\
+                    (pC->pAddedClipCtxt->pReaderContext,
+                     (M4_StreamHandler*)pC->pAddedClipCtxt->pAudioStream, &time);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingConvert():\
+                     m_pReader->m_pFctJump(audio returns 0x%x", err);
+                return err;
+            }
+            pC->bjumpflag = M4OSA_FALSE;
+        }
+        M4OSA_TRACE2_3("E .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+             pC->pAddedClipCtxt->iAudioFrameCts / pC->pAddedClipCtxt->scale_audio,
+                 pC->pAddedClipCtxt->iAoffset / pC->pAddedClipCtxt->scale_audio,
+                     pC->pAddedClipCtxt->uiAudioFrameSize);
+        if( M4WAR_NO_MORE_AU == err )
+        {
+            if(pC->bNoLooping == M4OSA_TRUE)
+            {
+                pC->uiEndLoop =0; /* Value 0 means no looping is required */
+            }
+            /**
+            * Decide what to do when audio is over */
+            if( pC->uiEndLoop > 0 )
+            {
+                /**
+                * Jump at the Begin loop time */
+                M4OSA_Int32 time = (M4OSA_Int32)(pC->uiBeginLoop);
+
+                err = pC->pAddedClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                    pC->pAddedClipCtxt->pReaderContext,
+                    (M4_StreamHandler *)pC->pAddedClipCtxt->
+                    pAudioStream, &time);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingConvert():\
+                        m_pReader->m_pFctJump(audio returns 0x%x",
+                        err);
+                    return err;
+                }
+            }
+            else
+            {
+                /* Transition from encoding state to reading state */
+                err = M4VSS3GPP_intAudioMixingTransition(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingStepAudioMix(): pre-encode fails err = 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Second segment is over, state transition to third and return OK */
+                pC->State = M4VSS3GPP_kAudioMixingState_AUDIO_THIRD_SEGMENT;
+
+                /**
+                * Return with no error so the step function will be called again */
+                M4OSA_TRACE2_0(
+                    "M4VSS3GPP_intAudioMixingConvert():\
+                    returning M4VSS3GPP_WAR_END_OF_ADDED_AUDIO (2->3) a");
+                return M4VSS3GPP_WAR_END_OF_ADDED_AUDIO;
+            }
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingConvert(): m_pFctGetNextAu(audio) returns 0x%x",
+                err);
+            return err;
+        }
+
+        err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing:\
+                M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
+                err);
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+
+        /* Copy decoded data into SSRC buffer in */
+        M4OSA_memcpy(pC->pPosInSsrcBufferIn,
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress,
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize);
+        /* Update position pointer into SSRC buffer In */
+
+        pC->pPosInSsrcBufferIn +=
+            pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize;
+    }
+
+    /* Do the resampling / channel conversion if needed (=feed buffer out) */
+    if( pC->b_SSRCneeded == M4OSA_TRUE )
+    {
+        pC->ChannelConversion = 0;
+        if( pC->ChannelConversion > 0 )
+        {
+            while( pC->pPosInTempBuffer - pC->pTempBuffer
+                < (M4OSA_Int32)(pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+                *pC->pAddedClipCtxt->pSettings->ClipProperties.uiNbChannels)
+                / pC->ChannelConversion )
+                /* We use ChannelConversion variable because in case 2, we need twice less data */
+            {
+                ssrcErr = 0;
+                M4OSA_memset(pC->pPosInTempBuffer,
+                    (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels),0);
+
+                LVAudioresample_LowQuality((short*)pC->pPosInTempBuffer,
+                    (short*)pC->pSsrcBufferIn,
+                    pC->iSsrcNbSamplOut,
+                    pC->pLVAudioResampler);
+                if( 0 != ssrcErr )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
+                        ssrcErr);
+                    return ssrcErr;
+                }
+
+                pC->pPosInTempBuffer += pC->iSsrcNbSamplOut * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels;
+
+                /* Update SSRC bufferIn */
+                tempPosBuffer =
+                    pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels);
+                M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                    pC->pPosInSsrcBufferIn - tempPosBuffer);
+                pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels;
+            }
+        }
+        else
+        {
+            while( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+            {
+                ssrcErr = 0;
+                M4OSA_memset(pC->pPosInSsrcBufferOut,
+                    (pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels),0);
+
+                LVAudioresample_LowQuality((short*)pC->pPosInSsrcBufferOut,
+                    (short*)pC->pSsrcBufferIn,
+                    pC->iSsrcNbSamplOut,
+                    pC->pLVAudioResampler);
+                if( 0 != ssrcErr )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingConvert: SSRC_Process returns 0x%x, returning ",
+                        ssrcErr);
+                    return ssrcErr;
+                }
+                pC->pPosInSsrcBufferOut +=
+                    pC->iSsrcNbSamplOut * sizeof(short) * pC->ewc.uiNbChannels;
+
+                /* Update SSRC bufferIn */
+                tempPosBuffer =
+                    pC->pSsrcBufferIn + (pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels);
+                M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                    pC->pPosInSsrcBufferIn - tempPosBuffer);
+                pC->pPosInSsrcBufferIn -= pC->iSsrcNbSamplIn * sizeof(short)
+                    * pC->pAddedClipCtxt->pSettings->
+                    ClipProperties.uiNbChannels;
+            }
+        }
+
+        /* Convert Stereo<->Mono */
+        switch( pC->ChannelConversion )
+        {
+            case 0: /* No channel conversion */
+                break;
+
+            case 1: /* stereo to mono */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    From2iToMono_16((short *)pC->pTempBuffer,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)(uiChannelConvertorNbSamples));
+                    /* Update pTempBuffer */
+                    tempPosBuffer = pC->pTempBuffer
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.
+                        uiNbChannels); /* Buffer is in bytes */
+                    M4OSA_memmove(pC->pTempBuffer, tempPosBuffer,
+                        pC->pPosInTempBuffer - tempPosBuffer);
+                    pC->pPosInTempBuffer -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+
+            case 2: /* mono to stereo */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    MonoTo2I_16((short *)pC->pTempBuffer,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)uiChannelConvertorNbSamples);
+                    tempPosBuffer = pC->pTempBuffer
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    M4OSA_memmove(pC->pTempBuffer, tempPosBuffer,
+                        pC->pPosInTempBuffer - tempPosBuffer);
+                    pC->pPosInTempBuffer -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+        }
+    }
+    else if( pC->ChannelConversion > 0 )
+    {
+        //M4OSA_UInt32 uiChannelConvertorNbSamples =
+        // pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize / sizeof(short) /
+        // pC->pInputClipCtxt->pSettings->ClipProperties.uiNbChannels;
+        /* Convert Stereo<->Mono */
+        switch( pC->ChannelConversion )
+        {
+            case 0: /* No channel conversion */
+                break;
+
+            case 1: /* stereo to mono */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    From2iToMono_16((short *)pC->pSsrcBufferIn,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)(uiChannelConvertorNbSamples));
+                    /* Update pTempBuffer */
+                    tempPosBuffer = pC->pSsrcBufferIn
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.
+                        uiNbChannels); /* Buffer is in bytes */
+                    M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                        pC->pPosInSsrcBufferIn - tempPosBuffer);
+                    pC->pPosInSsrcBufferIn -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+
+            case 2: /* mono to stereo */
+                if( pC->pPosInSsrcBufferOut - pC->pSsrcBufferOut
+                    < (M4OSA_Int32)pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize )
+                {
+                    MonoTo2I_16((short *)pC->pSsrcBufferIn,
+                        (short *)pC->pSsrcBufferOut,
+                        (short)uiChannelConvertorNbSamples);
+                    tempPosBuffer = pC->pSsrcBufferIn
+                        + (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+                        pC->pPosInSsrcBufferIn - tempPosBuffer);
+                    pC->pPosInSsrcBufferIn -=
+                        (uiChannelConvertorNbSamples * sizeof(short)
+                        * pC->pAddedClipCtxt->pSettings->
+                        ClipProperties.uiNbChannels);
+                    pC->pPosInSsrcBufferOut +=
+                        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                }
+                break;
+        }
+    }
+    else
+    {
+        /* No channel conversion nor sampl. freq. conversion needed, just buffer management */
+        pC->pPosInSsrcBufferOut = pC->pPosInSsrcBufferIn;
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_Int32 M4VSS3GPP_getDecibelSound( M4OSA_UInt32 value )
+    {
+    int dbSound = 1;
+
+    if( value == 0 )
+        return 0;
+
+    if( value > 0x4000 && value <= 0x8000 )      // 32768
+        dbSound = 90;
+
+    else if( value > 0x2000 && value <= 0x4000 ) // 16384
+        dbSound = 84;
+
+    else if( value > 0x1000 && value <= 0x2000 ) // 8192
+        dbSound = 78;
+
+    else if( value > 0x0800 && value <= 0x1000 ) // 4028
+        dbSound = 72;
+
+    else if( value > 0x0400 && value <= 0x0800 ) // 2048
+        dbSound = 66;
+
+    else if( value > 0x0200 && value <= 0x0400 ) // 1024
+        dbSound = 60;
+
+    else if( value > 0x0100 && value <= 0x0200 ) // 512
+        dbSound = 54;
+
+    else if( value > 0x0080 && value <= 0x0100 ) // 256
+        dbSound = 48;
+
+    else if( value > 0x0040 && value <= 0x0080 ) // 128
+        dbSound = 42;
+
+    else if( value > 0x0020 && value <= 0x0040 ) // 64
+        dbSound = 36;
+
+    else if( value > 0x0010 && value <= 0x0020 ) // 32
+        dbSound = 30;
+
+    else if( value > 0x0008 && value <= 0x0010 ) //16
+        dbSound = 24;
+
+    else if( value > 0x0007 && value <= 0x0008 ) //8
+        dbSound = 24;
+
+    else if( value > 0x0003 && value <= 0x0007 ) // 4
+        dbSound = 18;
+
+    else if( value > 0x0001 && value <= 0x0003 ) //2
+        dbSound = 12;
+
+    else if( value > 0x000 && value <= 0x0001 )  // 1
+        dbSound = 6;
+
+    else
+        dbSound = 0;
+
+    return dbSound;
+    }
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intAudioMixingDoMixing(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Mix the current audio AUs (decoder, mix, encode)
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDoMixing(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_Int16 *pPCMdata1;
+    M4OSA_Int16 *pPCMdata2;
+    M4OSA_UInt32 uiPCMsize;
+
+    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
+    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+    M4OSA_Time
+        frameTimeDelta; /**< Duration of the encoded (then written) data */
+    M4OSA_MemAddr8 tempPosBuffer;
+    /* ducking variable */
+    M4OSA_UInt16 loopIndex = 0;
+    M4OSA_Int16 *pPCM16Sample = M4OSA_NULL;
+    M4OSA_Int32 peakDbValue = 0;
+    M4OSA_Int32 previousDbValue = 0;
+    M4OSA_UInt32 i;
+
+    /**
+    * Decode original audio track AU */
+
+    err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pInputClipCtxt);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingDoMixing:\
+            M4VSS3GPP_intClipDecodeCurrentAudioFrame(orig) returns 0x%x",
+            err);
+        return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+    }
+
+    if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0
+        || pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        == M4VIDEOEDITING_kMP3 )
+    {
+        err = M4VSS3GPP_intAudioMixingConvert(pC);
+
+        if( err == M4VSS3GPP_WAR_END_OF_ADDED_AUDIO )
+        {
+            return err;
+        }
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing: M4VSS3GPP_intAudioMixingConvert returned 0x%x",
+                err);
+            return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+        }
+
+        /**
+        * Get the output AU to write into */
+        err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+            M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingStepAudioMix:\
+                pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        pPCMdata2 = (M4OSA_Int16 *)pC->pSsrcBufferOut;
+    }
+    else
+    {
+        /**
+        * Decode added audio track AU */
+        err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pAddedClipCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDoMixing:\
+                M4VSS3GPP_intClipDecodeCurrentAudioFrame(added) returns 0x%x",
+                err);
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+
+        /**
+        * Check both clips decoded the same amount of PCM samples */
+        if( pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+            != pC->pAddedClipCtxt->AudioDecBufferOut.m_bufferSize )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingDoMixing:\
+                both clips AU must have the same decoded PCM size!");
+            return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+        }
+        pPCMdata2 = (M4OSA_Int16 *)pC->pAddedClipCtxt->AudioDecBufferOut.m_dataAddress;
+    }
+
+    /**
+    * Mix the two decoded PCM audios */
+    pPCMdata1 =
+        (M4OSA_Int16 *)pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+    uiPCMsize = pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+        / 2; /*buffer size (bytes) to number of sample (int16)*/
+
+    if( pC->b_DuckingNeedeed )
+    {
+        loopIndex = 0;
+        peakDbValue = 0;
+        previousDbValue = peakDbValue;
+
+        pPCM16Sample = (M4OSA_Int16 *)pC->pInputClipCtxt->
+            AudioDecBufferOut.m_dataAddress;
+
+        //Calculate the peak value
+         while( loopIndex
+             < pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize
+            / sizeof(M4OSA_Int16) )
+        {
+            if( pPCM16Sample[loopIndex] >= 0 )
+            {
+                peakDbValue = previousDbValue > pPCM16Sample[loopIndex]
+                ? previousDbValue : pPCM16Sample[loopIndex];
+                previousDbValue = peakDbValue;
+            }
+            else
+            {
+                peakDbValue = previousDbValue > -pPCM16Sample[loopIndex]
+                ? previousDbValue : -pPCM16Sample[loopIndex];
+                previousDbValue = peakDbValue;
+            }
+            loopIndex++;
+        }
+
+        pC->audioVolumeArray[pC->audVolArrIndex] =
+            M4VSS3GPP_getDecibelSound(peakDbValue);
+
+        /* WINDOW_SIZE is 10 by default and check for threshold is done after 10 cycles */
+        if( pC->audVolArrIndex >= WINDOW_SIZE - 1 )
+        {
+            pC->bDoDucking =
+                M4VSS3GPP_isThresholdBreached((M4OSA_Int32 *)&(pC->audioVolumeArray),
+                pC->audVolArrIndex, pC->InDucking_threshold);
+
+            pC->audVolArrIndex = 0;
+        }
+        else
+        {
+            pC->audVolArrIndex++;
+        }
+
+        /*
+        *Below logic controls the mixing weightage for Background Track and Primary Track
+        *for the duration of window under analysis to give fade-out for Background and fade-in
+        *for primary
+        *
+        *Current fading factor is distributed in equal range over the defined window size.
+        *
+        *For a window size = 25 (500 ms (window under analysis) / 20 ms (sample duration))
+        *
+        */
+
+        if( pC->bDoDucking )
+        {
+            if( pC->duckingFactor
+                > pC->InDucking_lowVolume ) // FADE OUT BG Track
+            {
+                    // decrement ducking factor in total steps in factor of low volume steps to reach
+                    // low volume level
+                pC->duckingFactor -= (pC->InDucking_lowVolume);
+            }
+            else
+            {
+                pC->duckingFactor = pC->InDucking_lowVolume;
+            }
+        }
+        else
+        {
+            if( pC->duckingFactor < 1.0 ) // FADE IN BG Track
+            {
+                // increment ducking factor in total steps of low volume factor to reach
+                // orig.volume level
+                pC->duckingFactor += (pC->InDucking_lowVolume);
+            }
+        else
+           {
+                pC->duckingFactor = 1.0;
+            }
+        }
+        /* endif - ducking_enable */
+
+        /* Mixing Logic */
+
+        while( uiPCMsize-- > 0 )
+        {
+            M4OSA_Int32 temp;
+
+           /* set vol factor for BT and PT */
+            *pPCMdata2 = (M4OSA_Int16)(*pPCMdata2 * pC->fBTVolLevel);
+
+            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fPTVolLevel);
+
+            /* mix the two samples */
+
+            *pPCMdata2 = (M4OSA_Int16)(( *pPCMdata2) * (pC->duckingFactor));
+            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata2 / 2 + *pPCMdata1 / 2);
+
+
+            if( *pPCMdata1 < 0 )
+            {
+                temp = -( *pPCMdata1)
+                    * 2; // bring to same Amplitude level as it was original
+
+                if( temp > 32767 )
+                {
+                    *pPCMdata1 = -32766; // less then max allowed value
+                }
+                else
+                {
+                    *pPCMdata1 = (M4OSA_Int16)(-temp);
+               }
+        }
+        else
+        {
+            temp = ( *pPCMdata1)
+                * 2; // bring to same Amplitude level as it was original
+
+            if( temp > 32768 )
+            {
+                *pPCMdata1 = 32767; // less than max allowed value
+            }
+            else
+            {
+                *pPCMdata1 = (M4OSA_Int16)temp;
+            }
+        }
+
+            pPCMdata2++;
+            pPCMdata1++;
+        }
+    }
+    else
+    {
+        while( uiPCMsize-- > 0 )
+       {
+        /* mix the two samples */
+            *pPCMdata1 = (M4OSA_Int16)(*pPCMdata1 * pC->fOrigFactor * pC->fPTVolLevel
+               + *pPCMdata2 * pC->fAddedFactor * pC->fBTVolLevel );
+
+            pPCMdata1++;
+            pPCMdata2++;
+        }
+    }
+
+    /* Update pC->pSsrcBufferOut buffer */
+
+    if( M4OSA_TRUE == pC->b_SSRCneeded || pC->ChannelConversion > 0 )
+    {
+        tempPosBuffer = pC->pSsrcBufferOut
+            + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        M4OSA_memmove(pC->pSsrcBufferOut, tempPosBuffer,
+            pC->pPosInSsrcBufferOut - tempPosBuffer);
+        pC->pPosInSsrcBufferOut -=
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+    }
+    else if( pC->pAddedClipCtxt->pSettings->ClipProperties.AudioStreamType
+        == M4VIDEOEDITING_kMP3 )
+    {
+        tempPosBuffer = pC->pSsrcBufferIn
+            + pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+        M4OSA_memmove(pC->pSsrcBufferIn, tempPosBuffer,
+            pC->pPosInSsrcBufferIn - tempPosBuffer);
+        pC->pPosInSsrcBufferIn -=
+            pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+    }
+
+    /* [Mono] or [Stereo interleaved] : all is in one buffer */
+    pEncInBuffer.pTableBuffer[0] =
+        pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+    pEncInBuffer.pTableBufferSize[0] =
+        pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+    pEncInBuffer.pTableBufferSize[1] = 0;
+
+    /* Time in ms from data size, because it is PCM16 samples */
+    frameTimeDelta =
+        pEncInBuffer.pTableBufferSize[0] / sizeof(short) / pC->ewc.uiNbChannels;
+
+    /**
+    * Prepare output buffer */
+    pEncOutBuffer.pTableBuffer[0] =
+        (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+    pEncOutBuffer.pTableBufferSize[0] = 0;
+
+    M4OSA_TRACE2_0("K **** blend AUs");
+
+    /**
+    * Encode the PCM audio */
+    err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(pC->ewc.pAudioEncCtxt,
+        &pEncInBuffer, &pEncOutBuffer);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingDoMixing(): pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Set AU cts and size */
+    pC->ewc.WriterAudioAU.size =
+        pEncOutBuffer.pTableBufferSize[0]; /**< Get the size of encoded data */
+    pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+    /**
+    * Write the AU */
+    M4OSA_TRACE2_2("L ---- write : cts  = %ld [ 0x%x ]",
+        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+        pC->ewc.WriterAudioAU.size);
+
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_AUDIO_STREAM_ID, &pC->ewc.WriterAudioAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingDoMixing: pWriterDataFcts->pProcessAU returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Increment the audio CTS for the next step */
+    pC->ewc.dATo += frameTimeDelta / pC->ewc.scale_audio;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioMixingDoMixing(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intAudioMixingTransition(M4VSS3GPP_InternalAudioMixingContext *pC)
+ * @brief    Decode/encode a few AU backward to initiate the encoder for later Mix segment.
+ * @note
+ * @param    pC    (IN) VSS audio mixing internal context
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingTransition(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
+    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+    M4OSA_Time
+        frameTimeDelta = 0; /**< Duration of the encoded (then written) data */
+
+    M4OSA_Int32 iTargetCts, iCurrentCts;
+
+    /**
+    * 'BZZZ' bug fix:
+    * add a silence frame */
+    err = M4VSS3GPP_intAudioMixingWriteSilence(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingTransition():\
+            M4VSS3GPP_intAudioMixingWriteSilence returns 0x%x",
+            err);
+        return err;
+    }
+
+    iCurrentCts = (M4OSA_Int32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+    /* Do not do pre-encode step if there is no mixing (remove, 100 %, or not editable) */
+    if( M4OSA_FALSE == pC->bAudioMixingIsNeeded )
+    {
+        /**
+        * Advance in the original audio stream to reach the current time
+        * (We don't want iAudioCTS to be modified by the jump function,
+        * so we have to use a local variable). */
+        err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iCurrentCts);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_intAudioMixingTransition:\
+             M4VSS3GPP_intClipJumpAudioAt() returns 0x%x!", err);
+            return err;
+        }
+    }
+    else
+    {
+        /**< don't try to pre-decode if clip is at its beginning... */
+        if( iCurrentCts > 0 )
+        {
+            /**
+            * Get the output AU to write into */
+            err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                &pC->ewc.WriterAudioAU);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingTransition:\
+                    pWriterDataFcts->pStartAU(audio) returns 0x%x!",
+                    err);
+                return err;
+            }
+
+            /**
+            * Jump a few AUs backward */
+            iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+                * pC->ewc.iSilenceFrameDuration;
+
+            if( iTargetCts < 0 )
+            {
+                iTargetCts = 0; /**< Sanity check */
+            }
+
+            err = M4VSS3GPP_intClipJumpAudioAt(pC->pInputClipCtxt, &iTargetCts);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+                    M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+                    err);
+                return err;
+            }
+
+            /**
+            * Decode/encode up to the wanted position */
+            while( pC->pInputClipCtxt->iAudioFrameCts < iCurrentCts )
+            {
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pInputClipCtxt);
+
+                M4OSA_TRACE2_3("M .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pInputClipCtxt->iAudioFrameCts
+                    / pC->pInputClipCtxt->scale_audio,
+                    pC->pInputClipCtxt->iAoffset
+                    / pC->pInputClipCtxt->scale_audio,
+                    pC->pInputClipCtxt->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+                        M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(
+                    pC->pInputClipCtxt);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition: DECODE_ENCODE-prefetch:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                pEncInBuffer.pTableBuffer[0] =
+                    pC->pInputClipCtxt->AudioDecBufferOut.m_dataAddress;
+                pEncInBuffer.pTableBufferSize[0] =
+                    pC->pInputClipCtxt->AudioDecBufferOut.m_bufferSize;
+                pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                pEncInBuffer.pTableBufferSize[1] = 0;
+
+                /* Time in ms from data size, because it is PCM16 samples */
+                frameTimeDelta =
+                    pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                    / pC->ewc.uiNbChannels;
+
+                /**
+                * Prepare output buffer */
+                pEncOutBuffer.pTableBuffer[0] =
+                    (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                M4OSA_TRACE2_0("N **** pre-encode");
+
+                /**
+                * Encode the PCM audio */
+                err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                    pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition():\
+                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                        err);
+                    return err;
+                }
+            }
+
+            /**
+            * Set AU cts and size */
+            pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+                0]; /**< Get the size of encoded data */
+                pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+                /**
+                * Write the AU */
+                M4OSA_TRACE2_2("O ---- write : cts  = %ld [ 0x%x ]",
+                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                    pC->ewc.WriterAudioAU.size);
+
+                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingTransition:\
+                        pWriterDataFcts->pProcessAU returns 0x%x!",    err);
+                    return err;
+                }
+
+                /**
+                * Increment the audio CTS for the next step */
+                pC->ewc.dATo += pC->ewc.iSilenceFrameDuration / pC->ewc.scale_audio;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder()
+ * @brief    Creates the video encoder
+ * @note
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingCreateVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err;
+    M4ENCODER_AdvancedParams EncParams;
+
+    /**
+    * Simulate a writer interface with our specific function */
+    pC->ewc.OurWriterDataInterface.pProcessAU =
+        M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
+                                but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pStartAU =
+        M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
+                              but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pWriterContext =
+        (M4WRITER_Context)
+        pC; /**< We give the internal context as writer context */
+
+    /**
+    * Get the encoder interface, if not already done */
+    if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
+    {
+        err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
+            pC->ewc.VideoStreamType);
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCreateVideoEncoder: setCurrentEncoder returns 0x%x",
+            err);
+        M4ERR_CHECK_RETURN(err);
+    }
+
+    /**
+    * Set encoder shell parameters according to VSS settings */
+
+    /* Common parameters */
+    EncParams.InputFormat = M4ENCODER_kIYUV420;
+    EncParams.FrameWidth = pC->ewc.uiVideoWidth;
+    EncParams.FrameHeight = pC->ewc.uiVideoHeight;
+    EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
+
+    /* No strict regulation in video editor */
+    /* Because of the effects and transitions we should allow more flexibility */
+    /* Also it prevents to drop important frames
+      (with a bad result on sheduling and block effetcs) */
+    EncParams.bInternalRegulation = M4OSA_FALSE;
+    EncParams.FrameRate = M4ENCODER_kVARIABLE_FPS;
+
+    /**
+    * Other encoder settings (defaults) */
+    EncParams.uiHorizontalSearchRange = 0;     /* use default */
+    EncParams.uiVerticalSearchRange = 0;       /* use default */
+    EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+    EncParams.uiIVopPeriod = 0;                /* use default */
+    EncParams.uiMotionEstimationTools = 0;     /* M4V_MOTION_EST_TOOLS_ALL */
+    EncParams.bAcPrediction = M4OSA_TRUE;      /* use AC prediction */
+    EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+    EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+
+    switch( pC->ewc.VideoStreamType )
+    {
+        case M4SYS_kH263:
+
+            EncParams.Format = M4ENCODER_kH263;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            break;
+
+        case M4SYS_kMPEG_4:
+
+            EncParams.Format = M4ENCODER_kMPEG4;
+
+            EncParams.uiStartingQuantizerValue = 8;
+            EncParams.uiRateFactor = 1;
+
+            if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
+            {
+                EncParams.bErrorResilience = M4OSA_FALSE;
+                EncParams.bDataPartitioning = M4OSA_FALSE;
+            }
+            else
+            {
+                EncParams.bErrorResilience = M4OSA_TRUE;
+                EncParams.bDataPartitioning = M4OSA_TRUE;
+            }
+            break;
+
+        case M4SYS_kH264:
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intAudioMixingCreateVideoEncoder: M4SYS_H264");
+
+            EncParams.Format = M4ENCODER_kH264;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCreateVideoEncoder: Unknown videoStreamType 0x%x",
+                pC->ewc.VideoStreamType);
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    /* In case of EMP we overwrite certain parameters */
+    if( M4OSA_TRUE == pC->ewc.bActivateEmp )
+    {
+        EncParams.uiHorizontalSearchRange = 15;    /* set value */
+        EncParams.uiVerticalSearchRange = 15;      /* set value */
+        EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+        EncParams.uiIVopPeriod = 15; /* one I frame every 15 frames */
+        EncParams.uiMotionEstimationTools = 1; /* M4V_MOTION_EST_TOOLS_NO_4MV */
+        EncParams.bAcPrediction = M4OSA_FALSE;     /* no AC prediction */
+        EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+        EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+    }
+
+    EncParams.Bitrate =
+        pC->pInputClipCtxt->pSettings->ClipProperties.uiVideoBitrate;
+
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctInit");
+    /**
+    * Init the video encoder (advanced settings version of the encoder Open function) */
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
+        &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
+        pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
+        pC->ShellAPI.pCurrentVideoEncoderUserData);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+            pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctOpen");
+
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
+        &pC->ewc.WriterVideoAU, &EncParams);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+            pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: calling encoder pFctStart");
+
+    if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
+    {
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingCreateVideoEncoder:\
+                pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingCreateVideoEncoder: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder()
+ * @brief    Destroy the video encoder
+ * @note
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioMixingDestroyVideoEncoder(
+    M4VSS3GPP_InternalAudioMixingContext *pC )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( M4OSA_NULL != pC->ewc.pEncContext )
+    {
+        if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
+        {
+            if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+            {
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
+                    pC->ewc.pEncContext);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+                        pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+                        err);
+                }
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+        }
+
+        /* Has the encoder actually been opened? Don't close it if that's not the case. */
+        if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
+                pC->ewc.pEncContext);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+                    pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+                    err);
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+        }
+
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioMixingDestroyVideoEncoder:\
+                pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+        }
+
+        pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+        /**
+        * Reset variable */
+        pC->ewc.pEncContext = M4OSA_NULL;
+    }
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_intAudioMixingDestroyVideoEncoder: returning 0x%x", err);
+    return err;
+}
+
+M4OSA_Bool M4VSS3GPP_isThresholdBreached( M4OSA_Int32 *averageValue,
+                                         M4OSA_Int32 storeCount, M4OSA_Int32 thresholdValue )
+{
+    M4OSA_Bool result = 0;
+    int i;
+    int finalValue = 0;
+
+    for ( i = 0; i < storeCount; i++ )
+        finalValue += averageValue[i];
+
+    finalValue = finalValue / storeCount;
+
+
+    if( finalValue > thresholdValue )
+        result = M4OSA_TRUE;
+    else
+        result = M4OSA_FALSE;
+
+    return result;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Clip.c b/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
new file mode 100755
index 0000000..0a3b737
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Clip.c
@@ -0,0 +1,2035 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_Clip.c
+ * @brief    Implementation of functions related to input clip management.
+ * @note    All functions in this file are static, i.e. non public
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ *    Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+
+/**
+ *    OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h"  /* OSAL debug management */
+
+
+/**
+ * Common headers (for aac) */
+#include "M4_Common.h"
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+/* Osal header fileno */
+#include "M4OSA_CharStar.h"
+
+/**
+ ******************************************************************************
+ * define    Static function prototypes
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
+    M4VSS3GPP_ClipContext *pClipCtxt );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipOpen()
+ * @brief    Open a clip. Creates a clip context.
+ * @note
+ * @param   hClipCtxt            (OUT) Return the internal clip context
+ * @param   pClipSettings        (IN) Edit settings of this clip. The module will keep a
+ *                               reference to this pointer
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param    bSkipAudioTrack        (IN) If true, do not open the audio
+ * @param    bFastOpenMode        (IN) If true, use the fast mode of the 3gpp reader
+ *                             (only the first AU is read)
+ * @return    M4NO_ERROR:                No error
+ * @return    M4ERR_ALLOC:            There is no more available memory
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_intClipInit( M4VSS3GPP_ClipContext ** hClipCtxt,
+                                M4OSA_FileReadPointer *pFileReadPtrFct )
+{
+    M4VSS3GPP_ClipContext *pClipCtxt;
+    M4OSA_ERR err;
+
+    M4OSA_DEBUG_IF2((M4OSA_NULL == hClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipInit: hClipCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipInit: pFileReadPtrFct is M4OSA_NULL");
+
+    /**
+    * Allocate the clip context */
+    *hClipCtxt =
+        (M4VSS3GPP_ClipContext *)M4OSA_malloc(sizeof(M4VSS3GPP_ClipContext),
+        M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_ClipContext");
+
+    if( M4OSA_NULL == *hClipCtxt )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipInit(): unable to allocate M4VSS3GPP_ClipContext,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+    M4OSA_TRACE3_1("M4VSS3GPP_intClipInit(): clipCtxt=0x%x", *hClipCtxt);
+
+
+    /**
+    * Use this shortcut to simplify the code */
+    pClipCtxt = *hClipCtxt;
+
+    /* Inialization of context Variables */
+    M4OSA_memset((M4OSA_MemAddr8)pClipCtxt, sizeof(M4VSS3GPP_ClipContext), 0);
+
+    pClipCtxt->pSettings = M4OSA_NULL;
+
+    /**
+    * Init the clip context */
+    pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_READ;
+    pClipCtxt->Astatus = M4VSS3GPP_kClipStatus_READ;
+
+    pClipCtxt->pReaderContext = M4OSA_NULL;
+    pClipCtxt->pVideoStream = M4OSA_NULL;
+    pClipCtxt->pAudioStream = M4OSA_NULL;
+    pClipCtxt->VideoAU.m_dataAddress = M4OSA_NULL;
+    pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+
+    pClipCtxt->pViDecCtxt = M4OSA_NULL;
+    pClipCtxt->bVideoAuAvailable = M4OSA_FALSE;
+    pClipCtxt->bFirstAuWritten = M4OSA_FALSE;
+
+    pClipCtxt->bMpeg4GovState = M4OSA_FALSE;
+
+    pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
+    pClipCtxt->pAudioFramePtr = M4OSA_NULL;
+    pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+
+    pClipCtxt->pFileReadPtrFct = pFileReadPtrFct;
+
+    /*
+    * Reset pointers for media and codecs interfaces */
+    err = M4VSS3GPP_clearInterfaceTables(&pClipCtxt->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /*
+    *  Call the media and codecs subscription module */
+    err = M4VSS3GPP_subscribeMediaAndCodec(&pClipCtxt->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    return M4NO_ERROR;
+}
+
+/* Note: if the clip is opened in fast mode, it can only be used for analysis and nothing else. */
+M4OSA_ERR M4VSS3GPP_intClipOpen( M4VSS3GPP_ClipContext *pClipCtxt,
+                                M4VSS3GPP_ClipSettings *pClipSettings, M4OSA_Bool bSkipAudioTrack,
+                                M4OSA_Bool bFastOpenMode, M4OSA_Bool bAvoidOpeningVideoDec )
+{
+    M4OSA_ERR err;
+    M4READER_MediaFamily mediaFamily;
+    M4_StreamHandler *pStreamHandler;
+    M4OSA_Int32 iDuration;
+    M4OSA_Void *decoderUserData;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4DECODER_MPEG4_DecoderConfigInfo dummy;
+    M4DECODER_VideoSize videoSizeFromDSI;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    M4DECODER_OutputFilter FilterOption;
+    M4OSA_Char pTempFile[100];
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipOpen: pClipCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipOpen: pClipSettings is M4OSA_NULL");
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intClipOpen: called with pClipCtxt: 0x%x, bAvoidOpeningVideoDec=0x%x",
+        pClipCtxt, bAvoidOpeningVideoDec);
+    /**
+    * Keep a pointer to the clip settings. Remember that we don't possess it! */
+    pClipCtxt->pSettings = pClipSettings;
+
+    /**
+    * Get the correct reader interface */
+    err = M4VSS3GPP_setCurrentReader(&pClipCtxt->ShellAPI,
+        pClipCtxt->pSettings->FileType);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Init the 3GPP or MP3 reader */
+    err =
+        pClipCtxt->ShellAPI.m_pReader->m_pFctCreate(&pClipCtxt->pReaderContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctCreate returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Link the reader interface to the reader context (used by the decoder to know the reader) */
+    pClipCtxt->ShellAPI.m_pReaderDataIt->m_readerContext =
+        pClipCtxt->pReaderContext;
+
+    /**
+    * Set the OSAL read function set */
+    err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+        pClipCtxt->pReaderContext,
+        M4READER_kOptionID_SetOsaFileReaderFctsPtr,
+        (M4OSA_DataOption)(pClipCtxt->pFileReadPtrFct));
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Set the fast open mode if asked (3GPP only) */
+    if( M4VIDEOEDITING_kFileType_3GPP == pClipCtxt->pSettings->FileType )
+    {
+        if( M4OSA_TRUE == bFastOpenMode )
+        {
+            err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+                pClipCtxt->pReaderContext,
+                M4READER_3GP_kOptionID_FastOpenMode, M4OSA_NULL);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipOpen():\
+                    m_pReader->m_pFctSetOption(FastOpenMode) returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+
+        /**
+        * Set the skip audio option if asked */
+        if( M4OSA_TRUE == bSkipAudioTrack )
+        {
+            err = pClipCtxt->ShellAPI.m_pReader->m_pFctSetOption(
+                pClipCtxt->pReaderContext,
+                M4READER_3GP_kOptionID_VideoOnly, M4OSA_NULL);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctSetOption(VideoOnly) returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+    }
+    if(pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM)
+    {
+
+
+
+
+        M4OSA_chrNCopy(pTempFile,pClipSettings->pFile,M4OSA_chrLength(pClipSettings->pFile));
+
+
+    switch (pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency)
+    {
+        case 8000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_8000",6);
+        break;
+        case 11025:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_11025",6);
+        break;
+        case 12000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_12000",6);
+        break;
+        case 16000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_16000",6);
+        break;
+        case 22050:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_22050",6);
+        break;
+        case 24000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_24000",6);
+        break;
+        case 32000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_32000",6);
+        break;
+        case 44100:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_44100",6);
+        break;
+        case 48000:
+        M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_48000",6);
+        break;
+        default:
+            M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: invalid input for BG tracksampling \
+                frequency (%d Hz), returning M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY"\
+                    ,pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency );
+            return M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
+    }
+
+
+
+        //M4OSA_chrNCat(pTempFile,
+        //    itoa(pClipCtxt->pSettings->ClipProperties.uiSamplingFrequency),5);
+        switch(pClipCtxt->pSettings->ClipProperties.uiNbChannels)
+        {
+            case 1:
+                M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_1.pcm",6);
+            break;
+            case 2:
+                M4OSA_chrNCat(pTempFile,(M4OSA_Char *)"_2.pcm",6);
+            break;
+            default:
+            M4OSA_TRACE1_1("M4VSS3GPP_intClipOpen: invalid input for BG track no.\
+                 of channels (%d ), returning M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS",\
+                    pClipCtxt->pSettings->ClipProperties.uiNbChannels);
+            return    M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
+        }
+        //M4OSA_chrNCat(pTempFile,itoa(pClipCtxt->pSettings->ClipProperties.uiNbChannels),1);
+
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext, pTempFile);
+
+    }
+    else
+    {
+    /**
+        * Open the 3GPP/MP3 clip file */
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctOpen( pClipCtxt->pReaderContext,
+             pClipSettings->pFile);
+    }
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_UInt32 uiDummy, uiCoreId;
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctOpen returns 0x%x", err);
+
+        /**
+        * If the error is from the core reader, we change it to a public VSS3GPP error */
+        M4OSA_ERR_SPLIT(err, uiDummy, uiCoreId, uiDummy);
+
+        if( M4MP4_READER == uiCoreId )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intClipOpen(): returning M4VSS3GPP_ERR_INVALID_3GPP_FILE");
+            return M4VSS3GPP_ERR_INVALID_3GPP_FILE;
+        }
+        return err;
+    }
+
+    /**
+    * Get the audio and video streams */
+    while( err == M4NO_ERROR )
+    {
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetNextStream(
+            pClipCtxt->pReaderContext, &mediaFamily, &pStreamHandler);
+
+        /*in case we found a BIFS stream or something else...*/
+        if( ( err == ((M4OSA_UInt32)M4ERR_READER_UNKNOWN_STREAM_TYPE))
+            || (err == ((M4OSA_UInt32)M4WAR_TOO_MUCH_STREAMS)) )
+        {
+            err = M4NO_ERROR;
+            continue;
+        }
+
+        if( M4NO_ERROR == err ) /**< One stream found */
+        {
+            /**
+            * Found a video stream */
+            if( ( mediaFamily == M4READER_kMediaFamilyVideo)
+                && (M4OSA_NULL == pClipCtxt->pVideoStream) )
+            {
+                if( ( M4DA_StreamTypeVideoH263 == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeVideoMpeg4
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeVideoMpeg4Avc
+                    == pStreamHandler->m_streamType) )
+                {
+                    M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipOpen():\
+                        Found a H263 or MPEG-4 or H264 video stream in input 3gpp clip; %d",
+                        pStreamHandler->m_streamType);
+
+                    /**
+                    * Keep pointer to the video stream */
+                    pClipCtxt->pVideoStream =
+                        (M4_VideoStreamHandler *)pStreamHandler;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pVideoStream);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(video) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pVideoStream,
+                        &pClipCtxt->VideoAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen():\
+                            m_pReader->m_pFctFillAuStruct(video) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< Not H263 or MPEG-4 (H264, etc.) */
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS_editClipOpen():\
+                        Found an unsupported video stream (0x%x) in input 3gpp clip",
+                        pStreamHandler->m_streamType);
+
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+            /**
+            * Found an audio stream */
+            else if( ( mediaFamily == M4READER_kMediaFamilyAudio)
+                && (M4OSA_NULL == pClipCtxt->pAudioStream) )
+            {
+                if( ( M4DA_StreamTypeAudioAmrNarrowBand
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioAac == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioMp3
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioEvrc
+                    == pStreamHandler->m_streamType)
+                    || (M4DA_StreamTypeAudioPcm
+                    == pStreamHandler->m_streamType) )
+                {
+                    M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipOpen(): \
+                        Found an AMR-NB or AAC or MP3 audio stream in input clip; %d",
+                        pStreamHandler->m_streamType);
+
+                    /**
+                    * Keep pointer to the audio stream */
+                    pClipCtxt->pAudioStream =
+                        (M4_AudioStreamHandler *)pStreamHandler;
+                    pStreamHandler->m_bStreamIsOK = M4OSA_TRUE;
+
+                    /**
+                    * Reset the stream reader */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctReset(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pAudioStream);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctReset(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Initializes an access Unit */
+                    err = pClipCtxt->ShellAPI.m_pReader->m_pFctFillAuStruct(
+                        pClipCtxt->pReaderContext,
+                        (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                        &pClipCtxt->AudioAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipOpen():\
+                            m_pReader->m_pFctFillAuStruct(audio) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+                else /**< Not AMR-NB or AAC (AMR-WB...) */
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intClipOpen():\
+                        Found an unsupported audio stream (0x%x) in input 3gpp/mp3 clip",
+                        pStreamHandler->m_streamType);
+
+                    pStreamHandler->m_bStreamIsOK = M4OSA_FALSE;
+                }
+            }
+        }
+        else if( M4OSA_ERR_IS_ERROR(err) )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipOpen(): m_pReader->m_pFctGetNextStream() returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Init Video decoder */
+    if( M4OSA_NULL != pClipCtxt->pVideoStream )
+    {
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+  /* If external decoders are possible, it's best to avoid opening the decoder if the clip is only
+  going to be used for analysis, as we're not going to use it for the analysis in the case of a
+  possible external decoder anyway, and either there could be no decoder at this point or the HW
+  decoder could be present, which we want to avoid opening for that. See comments in
+  intBuildAnalysis for more details. */
+
+  /* CHANGEME Temporarily only do this for MPEG4, since for now only MPEG4 external decoders are
+  supported, and the following wouldn't work for H263 so a release where external decoders are
+  possible, but not used, wouldn't work with H263 stuff. */
+
+        if( bAvoidOpeningVideoDec && M4DA_StreamTypeVideoMpeg4
+            == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+        {
+            /* Oops! The mere act of opening the decoder also results in the image size being
+            filled in the video stream! Compensate for this by using ParseVideoDSI to fill
+            this info. */
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipOpen: Mpeg4 stream; vid dec not started");
+            err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
+                m_basicProperties.m_pDecoderSpecificInfo,
+                pClipCtxt->pVideoStream->
+                m_basicProperties.m_decoderSpecificInfoSize,
+                &dummy, &videoSizeFromDSI);
+
+            pClipCtxt->pVideoStream->m_videoWidth = videoSizeFromDSI.m_uiWidth;
+            pClipCtxt->pVideoStream->m_videoHeight =
+                videoSizeFromDSI.m_uiHeight;
+        }
+        else
+        {
+
+#endif
+
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipOpen: Mp4/H263/H264 stream; set current vid dec");
+            err = M4VSS3GPP_setCurrentVideoDecoder(&pClipCtxt->ShellAPI,
+                pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
+            M4ERR_CHECK_RETURN(err);
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+            decoderUserData =
+                pClipCtxt->ShellAPI.m_pCurrentVideoDecoderUserData;
+
+#else
+
+            decoderUserData = M4OSA_NULL;
+
+#endif
+
+            err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctCreate(
+                &pClipCtxt->pViDecCtxt,
+                &pClipCtxt->pVideoStream->m_basicProperties,
+                pClipCtxt->ShellAPI.m_pReaderDataIt,
+                &pClipCtxt->VideoAU, decoderUserData);
+
+            if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
+                || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
+            {
+                /**
+                * Our decoder is not compatible with H263 profile other than 0.
+                * So it returns this internal error code.
+                * We translate it to our own error code */
+                return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
+            }
+            else if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctCreate returns 0x%x",
+                    err);
+                return err;
+            }
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipOpen: Vid dec started; pViDecCtxt=0x%x",
+                pClipCtxt->pViDecCtxt);
+
+            if( M4DA_StreamTypeVideoMpeg4Avc
+                == pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+            {
+                FilterOption.m_pFilterFunction =
+                    (M4OSA_Void *) &M4VIFI_ResizeBilinearYUV420toYUV420;
+                FilterOption.m_pFilterUserData = M4OSA_NULL;
+                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctSetOption(
+                    pClipCtxt->pViDecCtxt, M4DECODER_kOptionID_OutputFilter,
+                    (M4OSA_DataOption) &FilterOption);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption returns 0x%x",
+                        err);
+                    return err;
+                }
+                else
+                {
+                    M4OSA_TRACE3_0(
+                        "M4VSS3GPP_intClipOpen: m_pVideoDecoder->m_pFctSetOption\
+                        M4DECODER_kOptionID_OutputFilter OK");
+                }
+            }
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+        }
+
+#endif
+
+    }
+
+    /**
+    * Init Audio decoder */
+    if( M4OSA_NULL != pClipCtxt->pAudioStream )
+    {
+        err = M4VSS3GPP_intClipPrepareAudioDecoder(pClipCtxt);
+        M4ERR_CHECK_RETURN(err);
+        M4OSA_TRACE3_1("M4VSS3GPP_intClipOpen: Audio dec started; context=0x%x",
+            pClipCtxt->pAudioDecCtxt);
+    }
+    else
+    {
+        pClipCtxt->AudioAU.m_streamID = 0;
+        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+        pClipCtxt->AudioAU.m_size = 0;
+        pClipCtxt->AudioAU.m_CTS = 0;
+        pClipCtxt->AudioAU.m_DTS = 0;
+        pClipCtxt->AudioAU.m_attribute = 0;
+        pClipCtxt->AudioAU.m_maxsize = 0;
+        pClipCtxt->AudioAU.m_structSize = sizeof(pClipCtxt->AudioAU);
+    }
+
+    /**
+    * Get the duration of the longest stream */
+    if( M4OSA_TRUE == pClipCtxt->pSettings->ClipProperties.bAnalysed )
+    {
+        /* If already calculated set it to previous value */
+        /* Because fast open and full open can return a different value,
+           it can mismatch user settings */
+        /* Video track is more important than audio track (if video track is shorter than
+           audio track, it can led to cut larger than expected) */
+        iDuration = pClipCtxt->pSettings->ClipProperties.uiClipVideoDuration;
+
+        if( iDuration == 0 )
+        {
+            iDuration = pClipCtxt->pSettings->ClipProperties.uiClipDuration;
+        }
+    }
+    else
+    {
+        /* Else compute it from streams */
+        iDuration = 0;
+
+        if( M4OSA_NULL != pClipCtxt->pVideoStream )
+        {
+            iDuration = (M4OSA_Int32)(
+                pClipCtxt->pVideoStream->m_basicProperties.m_duration);
+        }
+
+        if( ( M4OSA_NULL != pClipCtxt->pAudioStream) && ((M4OSA_Int32)(
+            pClipCtxt->pAudioStream->m_basicProperties.m_duration)
+            > iDuration) && iDuration == 0 )
+        {
+            iDuration = (M4OSA_Int32)(
+                pClipCtxt->pAudioStream->m_basicProperties.m_duration);
+        }
+    }
+
+    /**
+    * If end time is not used, we set it to the video track duration */
+    if( 0 == pClipCtxt->pSettings->uiEndCutTime )
+    {
+        pClipCtxt->pSettings->uiEndCutTime = (M4OSA_UInt32)iDuration;
+    }
+
+    pClipCtxt->iEndTime = pClipCtxt->pSettings->uiEndCutTime;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intClipOpen(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack()
+ * @brief    Delete the audio track. Clip will be like if it had no audio track
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ ******************************************************************************
+ */
+M4OSA_Void M4VSS3GPP_intClipDeleteAudioTrack( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    /**
+    * But we don't have to free the audio stream. It will be freed by the reader when closing it*/
+    pClipCtxt->pAudioStream = M4OSA_NULL;
+
+    /**
+    * We will return a constant silence AMR AU.
+    * We set it here once, instead of at each read audio step. */
+    pClipCtxt->pAudioFramePtr = (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+    pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+
+    /**
+    * Free the decoded audio buffer (it needs to be re-allocated to store silence
+      frame eventually)*/
+    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipCtxt->AudioDecBufferOut.m_dataAddress);
+        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    }
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCurrentTime()
+ * @brief    Jump to the previous RAP and decode up to the current video time
+ * @param   pClipCtxt    (IN) Internal clip context
+ * @param   iCts        (IN) Target CTS
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipDecodeVideoUpToCts( M4VSS3GPP_ClipContext *pClipCtxt,
+                                              M4OSA_Int32 iCts )
+{
+    M4OSA_Int32 iRapCts, iClipCts;
+    M4_MediaTime dDecodeTime;
+    M4OSA_Bool bClipJump = M4OSA_FALSE;
+    M4OSA_ERR err;
+
+    /**
+    * Compute the time in the clip base */
+    iClipCts = iCts - pClipCtxt->iVoffset;
+
+    /**
+    * If we were reading the clip, we must jump to the previous RAP
+    * to decode from that point. */
+    if( M4VSS3GPP_kClipStatus_READ == pClipCtxt->Vstatus )
+    {
+        /**
+        * Jump to the previous RAP in the clip (first get the time, then jump) */
+        iRapCts = iClipCts;
+
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetPrevRapTime(
+            pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pVideoStream, &iRapCts);
+
+        if( M4WAR_READER_INFORMATION_NOT_PRESENT == err )
+        {
+            /* No RAP table, jump backward and predecode */
+            iRapCts = iClipCts - M4VSS3GPP_NO_STSS_JUMP_POINT;
+
+            if( iRapCts < 0 )
+                iRapCts = 0;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctGetPrevRapTime returns 0x%x!",
+                err);
+            return err;
+        }
+
+        err =
+            pClipCtxt->ShellAPI.m_pReader->m_pFctJump(pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pVideoStream, &iRapCts);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctJump returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * The decoder must be told that we jumped */
+        bClipJump = M4OSA_TRUE;
+        pClipCtxt->iVideoDecCts = iRapCts;
+
+        /**
+        * Remember the clip reading state */
+        pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE_UP_TO;
+    }
+
+    /**
+    * If we are in decodeUpTo() process, check if we need to do
+    one more step or if decoding is finished */
+    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pClipCtxt->Vstatus )
+    {
+        /* Do a step of 500 ms decoding */
+        pClipCtxt->iVideoDecCts += 500;
+
+        if( pClipCtxt->iVideoDecCts > iClipCts )
+        {
+            /* Target time reached, we switch back to DECODE mode */
+            pClipCtxt->iVideoDecCts = iClipCts;
+            pClipCtxt->Vstatus = M4VSS3GPP_kClipStatus_DECODE;
+        }
+
+        M4OSA_TRACE2_1("c ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
+    }
+    else
+    {
+        /* Just decode at current clip cts */
+        pClipCtxt->iVideoDecCts = iClipCts;
+
+        M4OSA_TRACE2_1("d ,,,, decode up to : %ld", pClipCtxt->iVideoDecCts);
+    }
+
+    /**
+    * Decode up to the target */
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intClipDecodeVideoUpToCts: Decoding upTo CTS %.3f, pClipCtxt=0x%x",
+        dDecodeTime, pClipCtxt);
+
+    dDecodeTime = (M4OSA_Double)pClipCtxt->iVideoDecCts;
+    pClipCtxt->isRenderDup = M4OSA_FALSE;
+    err =
+        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDecode(pClipCtxt->pViDecCtxt,
+        &dDecodeTime, bClipJump);
+
+    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err)
+        && (err != M4WAR_VIDEORENDERER_NO_NEW_FRAME) )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipDecodeVideoUpToCts: m_pFctDecode returns 0x%x!",
+            err);
+        return err;
+    }
+
+    if( err == M4WAR_VIDEORENDERER_NO_NEW_FRAME )
+    {
+        pClipCtxt->isRenderDup = M4OSA_TRUE;
+    }
+
+    /**
+    * Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeVideoUpToCts: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame()
+ * @brief    Read one AU frame in the clip
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipReadNextAudioFrame(
+    M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err;
+
+    /* ------------------------------ */
+    /* ---------- NO AUDIO ---------- */
+    /* ------------------------------ */
+
+    if( M4OSA_NULL == pClipCtxt->pAudioStream )
+    {
+        /* If there is no audio track, we return silence AUs */
+        pClipCtxt->pAudioFramePtr =
+            (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+        pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+        pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+        M4OSA_TRACE2_0("b #### blank track");
+    }
+
+    /* ---------------------------------- */
+    /* ---------- AMR-NB, EVRC ---------- */
+    /* ---------------------------------- */
+
+    else if( ( M4VIDEOEDITING_kAMR_NB
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+        || (M4VIDEOEDITING_kEVRC
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+    {
+        if( M4OSA_FALSE == pClipCtxt->bAudioFrameAvailable )
+        {
+            /**
+            * No AU available, so we must must read one from the original track reader */
+            err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                pClipCtxt->pReaderContext,
+                (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                &pClipCtxt->AudioAU);
+
+            if( M4NO_ERROR == err )
+            {
+                /**
+                * Set the current AMR frame position at the beginning of the read AU */
+                pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+
+                /**
+                * Set the AMR frame CTS */
+                pClipCtxt->iAudioFrameCts =
+                    (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS
+                    * pClipCtxt->scale_audio + 0.5);
+            }
+            else if( ( M4WAR_NO_MORE_AU == err) && (M4VIDEOEDITING_kAMR_NB
+                == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+            {
+                /**
+                * If there is less audio than the stream duration indicated,
+                * we return silence at the end of the stream. */
+                pClipCtxt->pAudioFramePtr =
+                    (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+                pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+                pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+                M4OSA_TRACE2_0("a #### silence AU");
+
+                /**
+                * Return with M4WAR_NO_MORE_AU */
+                M4OSA_TRACE3_0(
+                    "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: \
+                    returning M4WAR_NO_MORE_AU (silence)");
+                return M4WAR_NO_MORE_AU;
+            }
+            else /**< fatal error (or no silence in EVRC) */
+            {
+                M4OSA_TRACE3_1(
+                    "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: m_pFctGetNextAu() returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+        else /* bAudioFrameAvailable */
+        {
+            /**
+            * Go to the next AMR frame in the AU */
+            pClipCtxt->pAudioFramePtr += pClipCtxt->uiAudioFrameSize;
+
+            /**
+            * Increment CTS: one AMR frame is 20 ms long */
+            pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+        }
+
+        /**
+        * Get the size of the pointed AMR frame */
+        switch( pClipCtxt->pSettings->ClipProperties.AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pClipCtxt->uiAudioFrameSize =
+                    (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_AMRNB(
+                    pClipCtxt->pAudioFramePtr);
+                break;
+
+            case M4VIDEOEDITING_kEVRC:
+                pClipCtxt->uiAudioFrameSize =
+                    (M4OSA_UInt16)M4VSS3GPP_intGetFrameSize_EVRC(
+                    pClipCtxt->pAudioFramePtr);
+                break;
+            default:
+                break;
+        }
+
+        if( 0 == pClipCtxt->uiAudioFrameSize )
+        {
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size == 0,\
+                returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+        else if( pClipCtxt->uiAudioFrameSize > pClipCtxt->AudioAU.m_size )
+        {
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AMR: AU frame size greater than AU size!,\
+                returning M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AMR_AU");
+            return M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU;
+        }
+
+        /**
+        * Check if the end of the current AU has been reached or not */
+        if( ( pClipCtxt->pAudioFramePtr + pClipCtxt->uiAudioFrameSize)
+            < (pClipCtxt->AudioAU.m_dataAddress + pClipCtxt->AudioAU.m_size) )
+        {
+            pClipCtxt->bAudioFrameAvailable = M4OSA_TRUE;
+        }
+        else
+        {
+            pClipCtxt->bAudioFrameAvailable =
+                M4OSA_FALSE; /**< will be used for next call */
+        }
+    }
+
+    /* ------------------------- */
+    /* ---------- AAC ---------- */
+    /* ------------------------- */
+
+    else if( ( M4VIDEOEDITING_kAAC
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+        || (M4VIDEOEDITING_kAACplus
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType)
+        || (M4VIDEOEDITING_keAACplus
+        == pClipCtxt->pSettings->ClipProperties.AudioStreamType) )
+    {
+        err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+            pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pAudioStream,
+            &pClipCtxt->AudioAU);
+
+        if( M4NO_ERROR == err )
+        {
+            pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+            pClipCtxt->uiAudioFrameSize =
+                (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+            pClipCtxt->iAudioFrameCts =
+                (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+                + 0.5);
+
+            /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+            /* (cts is not an integer with frequency 24 kHz for example) */
+            pClipCtxt->iAudioFrameCts = ( ( pClipCtxt->iAudioFrameCts
+                + pClipCtxt->iSilenceFrameDuration / 2)
+                / pClipCtxt->iSilenceFrameDuration)
+                * pClipCtxt->iSilenceFrameDuration;
+        }
+        else if( M4WAR_NO_MORE_AU == err )
+        {
+            /**
+            * If there is less audio than the stream duration indicated,
+            * we return silence at the end of the stream. */
+            pClipCtxt->pAudioFramePtr =
+                (M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData;
+            pClipCtxt->uiAudioFrameSize = pClipCtxt->uiSilenceFrameSize;
+            pClipCtxt->iAudioFrameCts += pClipCtxt->iSilenceFrameDuration;
+
+            M4OSA_TRACE2_0("a #### silence AU");
+
+            /**
+            * Return with M4WAR_NO_MORE_AU */
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AAC:\
+                returning M4WAR_NO_MORE_AU (silence)");
+            return M4WAR_NO_MORE_AU;
+        }
+        else /**< fatal error */
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-AAC: m_pFctGetNextAu() returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /* --------------------------------- */
+    /* ---------- MP3, others ---------- */
+    /* --------------------------------- */
+
+    else
+    {
+        err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+            pClipCtxt->pReaderContext,
+            (M4_StreamHandler *)pClipCtxt->pAudioStream,
+            &pClipCtxt->AudioAU);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipReadNextAudioFrame()-MP3: m_pFctGetNextAu() returns 0x%x",
+                err);
+            return err;
+        }
+
+        pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+        pClipCtxt->uiAudioFrameSize = (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+        pClipCtxt->iAudioFrameCts =
+            (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+            + 0.5);
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intClipReadNextAudioFrame(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder()
+ * @brief    Creates and initialize the audio decoder for the clip.
+ * @note
+ * @param   pClipCtxt        (IN) internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intClipPrepareAudioDecoder(
+    M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4_StreamType audiotype;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    M4_AACType iAacType = 0;
+
+#endif
+
+    /**
+    * Set the proper audio decoder */
+
+    audiotype = pClipCtxt->pAudioStream->m_basicProperties.m_streamType;
+
+    //EVRC
+    if( M4DA_StreamTypeAudioEvrc
+        != audiotype ) /* decoder not supported yet, but allow to do null encoding */
+
+        err = M4VSS3GPP_setCurrentAudioDecoder(&pClipCtxt->ShellAPI, audiotype);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Creates the audio decoder */
+    if( M4OSA_NULL == pClipCtxt->ShellAPI.m_pAudioDecoder )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipPrepareAudioDecoder(): Fails to initiate the audio decoder.");
+        return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
+    }
+
+    if( M4OSA_NULL == pClipCtxt->pAudioDecCtxt )
+    {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        if( M4OSA_TRUE == pClipCtxt->ShellAPI.bAllowFreeingOMXCodecInterface )
+        {
+            /* NXP SW codec interface is used*/
+            if( M4DA_StreamTypeAudioAac == audiotype )
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                &(pClipCtxt->AacProperties));
+            else
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                M4OSA_NULL /* to be changed with HW interfaces */);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipPrepareAudioDecoder: m_pAudioDecoder->m_pFctCreateAudioDec\
+                    returns 0x%x", err);
+                return err;
+            }
+        }
+        else
+        {
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                Creating external audio decoder of type 0x%x", audiotype);
+            /* External OMX codecs are used*/
+            if( M4DA_StreamTypeAudioAac == audiotype )
+            {
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                    &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                    pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
+
+                if( M4NO_ERROR == err )
+                {
+                    /* AAC properties*/
+                    /*get from Reader; temporary, till Audio decoder shell API
+                      available to get the AAC properties*/
+                    pClipCtxt->AacProperties.aNumChan =
+                        pClipCtxt->pAudioStream->m_nbChannels;
+                    pClipCtxt->AacProperties.aSampFreq =
+                        pClipCtxt->pAudioStream->m_samplingFrequency;
+
+                    err = pClipCtxt->ShellAPI.m_pAudioDecoder->
+                        m_pFctGetOptionAudioDec(pClipCtxt->pAudioDecCtxt,
+                        M4AD_kOptionID_StreamType,
+                        (M4OSA_DataOption) &iAacType);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                            m_pAudioDecoder->m_pFctGetOptionAudioDec returns err 0x%x", err);
+                        iAacType = M4_kAAC; //set to default
+                        err = M4NO_ERROR;
+                    }
+                    else {
+                        M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipPrepareAudioDecoder: \
+                        m_pAudioDecoder->m_pFctGetOptionAudioDec returns streamType %d",
+                        iAacType);
+                       }
+                    switch( iAacType )
+                    {
+                        case M4_kAAC:
+                            pClipCtxt->AacProperties.aSBRPresent = 0;
+                            pClipCtxt->AacProperties.aPSPresent = 0;
+                            break;
+
+                        case M4_kAACplus:
+                            pClipCtxt->AacProperties.aSBRPresent = 1;
+                            pClipCtxt->AacProperties.aPSPresent = 0;
+                            pClipCtxt->AacProperties.aExtensionSampFreq =
+                                pClipCtxt->pAudioStream->m_samplingFrequency;
+                            break;
+
+                        case M4_keAACplus:
+                            pClipCtxt->AacProperties.aSBRPresent = 1;
+                            pClipCtxt->AacProperties.aPSPresent = 1;
+                            pClipCtxt->AacProperties.aExtensionSampFreq =
+                                pClipCtxt->pAudioStream->m_samplingFrequency;
+                            break;
+                        default:
+                            break;
+                    }
+                    M4OSA_TRACE3_2(
+                        "M4VSS3GPP_intClipPrepareAudioDecoder: AAC NBChans=%d, SamplFreq=%d",
+                        pClipCtxt->AacProperties.aNumChan,
+                        pClipCtxt->AacProperties.aSampFreq);
+                }
+            }
+            else
+                err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+                &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+                pClipCtxt->ShellAPI.pCurrentAudioDecoderUserData);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                    m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+
+#else
+        /* Trick, I use pUserData to retrieve aac properties,
+           waiting for some better implementation... */
+
+        if( M4DA_StreamTypeAudioAac == audiotype )
+            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+            &pClipCtxt->pAudioDecCtxt,
+            pClipCtxt->pAudioStream, &(pClipCtxt->AacProperties));
+        else
+            err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctCreateAudioDec(
+            &pClipCtxt->pAudioDecCtxt, pClipCtxt->pAudioStream,
+            M4OSA_NULL /* to be changed with HW interfaces */);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                m_pAudioDecoder->m_pFctCreateAudioDec returns 0x%x",
+                err);
+            return err;
+        }
+
+#endif
+
+    }
+
+    if( M4DA_StreamTypeAudioAmrNarrowBand == audiotype ) {
+        /* AMR DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4DA_StreamTypeAudioEvrc == audiotype ) {
+        /* EVRC DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4DA_StreamTypeAudioMp3 == audiotype ) {
+        /* MP3 DECODER CONFIGURATION */
+
+        /* nothing specific to do */
+    }
+    else if( M4DA_StreamTypeAudioAac == audiotype )
+    {
+        /* AAC DECODER CONFIGURATION */
+
+        /* Decode high quality aac but disable PS and SBR */
+        /* Because we have to mix different kind of AAC so we must take the lowest capability */
+        /* In MCS it was not needed because there is only one stream */
+        M4_AacDecoderConfig AacDecParam;
+
+        AacDecParam.m_AACDecoderProfile = AAC_kAAC;
+        AacDecParam.m_DownSamplingMode = AAC_kDS_OFF;
+
+        if( M4ENCODER_kMono == pClipCtxt->pAudioStream->m_nbChannels )
+        {
+            AacDecParam.m_OutputMode = AAC_kMono;
+        }
+        else
+        {
+            AacDecParam.m_OutputMode = AAC_kStereo;
+        }
+
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctSetOptionAudioDec(
+            pClipCtxt->pAudioDecCtxt,
+            M4AD_kOptionID_UserParam, (M4OSA_DataOption) &AacDecParam);
+    }
+
+    if( M4OSA_NULL != pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec )
+    {
+        /* Not implemented in all decoders */
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStartAudioDec(
+            pClipCtxt->pAudioDecCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipPrepareAudioDecoder:\
+                m_pAudioDecoder->m_pFctStartAudioDec returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Allocate output buffer for the audio decoder */
+    pClipCtxt->AudioDecBufferOut.m_bufferSize =
+        pClipCtxt->pAudioStream->m_byteFrameLength
+        * pClipCtxt->pAudioStream->m_byteSampleSize
+        * pClipCtxt->pAudioStream->m_nbChannels;
+    pClipCtxt->AudioDecBufferOut.m_dataAddress =
+        (M4OSA_MemAddr8)M4OSA_malloc(pClipCtxt->AudioDecBufferOut.m_bufferSize
+        * sizeof(M4OSA_Int16),
+        M4VSS3GPP, (M4OSA_Char *)"AudioDecBufferOut.m_bufferSize");
+
+    if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipPrepareAudioDecoder():\
+            unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame()
+ * @brief    Decode the current AUDIO frame.
+ * @note
+ * @param   pClipCtxt        (IN) internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipDecodeCurrentAudioFrame(
+    M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Silence mode */
+    if( pClipCtxt->pSilenceFrameData
+        == (M4OSA_UInt8 *)pClipCtxt->pAudioFramePtr )
+    {
+        if( pClipCtxt->AudioDecBufferOut.m_dataAddress == M4OSA_NULL )
+        {
+            /**
+            * Allocate output buffer for the audio decoder */
+            pClipCtxt->AudioDecBufferOut.m_bufferSize =
+                pClipCtxt->uiSilencePcmSize;
+            pClipCtxt->AudioDecBufferOut.m_dataAddress =
+                (M4OSA_MemAddr8)M4OSA_malloc(
+                pClipCtxt->AudioDecBufferOut.m_bufferSize
+                * sizeof(M4OSA_Int16),
+                M4VSS3GPP,(M4OSA_Char *) "AudioDecBufferOut.m_bufferSize");
+
+            if( M4OSA_NULL == pClipCtxt->AudioDecBufferOut.m_dataAddress )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
+                    unable to allocate AudioDecBufferOut.m_dataAddress, returning M4ERR_ALLOC");
+                return M4ERR_ALLOC;
+            }
+        }
+
+        /* Fill it with 0 (= pcm silence) */
+        M4OSA_memset(pClipCtxt->AudioDecBufferOut.m_dataAddress,
+             pClipCtxt->AudioDecBufferOut.m_bufferSize * sizeof(M4OSA_Int16), 0);
+    }
+    else if (pClipCtxt->pSettings->FileType == M4VIDEOEDITING_kFileType_PCM)
+    {
+        pClipCtxt->AudioDecBufferIn.m_dataAddress = (M4OSA_MemAddr8) pClipCtxt->pAudioFramePtr;
+        pClipCtxt->AudioDecBufferIn.m_bufferSize  = pClipCtxt->uiAudioFrameSize;
+
+        M4OSA_memcpy(pClipCtxt->AudioDecBufferOut.m_dataAddress,
+            pClipCtxt->AudioDecBufferIn.m_dataAddress, pClipCtxt->AudioDecBufferIn.m_bufferSize);
+        pClipCtxt->AudioDecBufferOut.m_bufferSize = pClipCtxt->AudioDecBufferIn.m_bufferSize;
+        /**
+        * Return with no error */
+
+        M4OSA_TRACE3_0("M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
+        return M4NO_ERROR;
+    }
+    /**
+    * Standard decoding mode */
+    else
+    {
+        /**
+        * Decode current AMR frame */
+        pClipCtxt->AudioDecBufferIn.m_dataAddress =
+            (M4OSA_MemAddr8)pClipCtxt->pAudioFramePtr;
+        pClipCtxt->AudioDecBufferIn.m_bufferSize = pClipCtxt->uiAudioFrameSize;
+
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctStepAudioDec(
+            pClipCtxt->pAudioDecCtxt,
+            &pClipCtxt->AudioDecBufferIn, &pClipCtxt->AudioDecBufferOut,
+            M4OSA_FALSE);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipDecodeCurrentAudioFrame():\
+                m_pAudioDecoder->m_pFctStepAudio returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intClipDecodeCurrentAudioFrame(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt()
+ * @brief    Jump in the audio track of the clip.
+ * @note
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param   pJumpCts            (IN/OUT) in:target CTS, out: reached CTS
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipJumpAudioAt( M4VSS3GPP_ClipContext *pClipCtxt,
+                                       M4OSA_Int32 *pJumpCts )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iTargetCts;
+    M4OSA_Int32 iJumpCtsMs;
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipJumpAudioAt: pClipCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pJumpCts), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipJumpAudioAt: pJumpCts is M4OSA_NULL");
+
+    iTargetCts = *pJumpCts;
+
+    /**
+    * If there is no audio stream, we simulate a jump at the target jump CTS */
+    if( M4OSA_NULL == pClipCtxt->pAudioStream )
+    {
+        /**
+        * the target CTS will be reached at next ReadFrame call (thus the -20) */
+        *pJumpCts = iTargetCts - pClipCtxt->iSilenceFrameDuration;
+
+        /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+        /* (cts is not an integer with frequency 24 kHz for example) */
+        *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
+            / pClipCtxt->iSilenceFrameDuration)
+            * pClipCtxt->iSilenceFrameDuration;
+        pClipCtxt->iAudioFrameCts =
+            *
+            pJumpCts; /* simulate a read at jump position for later silence AUs */
+    }
+    else
+    {
+        M4OSA_Int32 current_time = 0;
+        M4OSA_Int32 loop_counter = 0;
+
+        if( (M4DA_StreamTypeAudioMp3
+            == pClipCtxt->pAudioStream->m_basicProperties.m_streamType) )
+        {
+            while( ( loop_counter < M4VSS3GPP_MP3_JUMPED_AU_NUMBER_MAX)
+                && (current_time < iTargetCts) )
+            {
+                err = pClipCtxt->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                    pClipCtxt->pReaderContext,
+                    (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                    &pClipCtxt->AudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE3_1(
+                        "M4VSS3GPP_intClipJumpAudioAt: m_pFctGetNextAu() returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                current_time = (M4OSA_Int32)pClipCtxt->AudioAU.m_CTS;
+                loop_counter++;
+            }
+
+            /**
+            * The current AU is stored */
+            pClipCtxt->pAudioFramePtr = pClipCtxt->AudioAU.m_dataAddress;
+            pClipCtxt->uiAudioFrameSize =
+                (M4OSA_UInt16)pClipCtxt->AudioAU.m_size;
+            pClipCtxt->iAudioFrameCts =
+                (M4OSA_Int32)(pClipCtxt->AudioAU.m_CTS * pClipCtxt->scale_audio
+                + 0.5);
+
+            *pJumpCts = pClipCtxt->iAudioFrameCts;
+        }
+        else
+        {
+            /**
+            * Jump in the audio stream */
+            iJumpCtsMs =
+                (M4OSA_Int32)(*pJumpCts / pClipCtxt->scale_audio + 0.5);
+
+            err = pClipCtxt->ShellAPI.m_pReader->m_pFctJump(
+                pClipCtxt->pReaderContext,
+                (M4_StreamHandler *)pClipCtxt->pAudioStream,
+                &iJumpCtsMs);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipJumpAudioAt(): m_pFctJump() returns 0x%x",
+                    err);
+                return err;
+            }
+
+            *pJumpCts =
+                (M4OSA_Int32)(iJumpCtsMs * pClipCtxt->scale_audio + 0.5);
+
+            /* Patch because m_CTS is unfortunately rounded in 3gp reader shell */
+            /* (cts is not an integer with frequency 24 kHz for example) */
+            *pJumpCts = ( ( *pJumpCts + pClipCtxt->iSilenceFrameDuration / 2)
+                / pClipCtxt->iSilenceFrameDuration)
+                * pClipCtxt->iSilenceFrameDuration;
+            pClipCtxt->iAudioFrameCts = 0; /* No frame read yet */
+
+            /**
+            * To detect some may-be bugs, I prefer to reset all these after a jump */
+            pClipCtxt->bAudioFrameAvailable = M4OSA_FALSE;
+            pClipCtxt->pAudioFramePtr = M4OSA_NULL;
+
+            /**
+            * In AMR, we have to manage multi-framed AUs,
+            but also in AAC the jump can be 1 AU too much backward */
+            if( *pJumpCts < iTargetCts )
+            {
+                /**
+                * Jump doesn't read any AU, we must read at least one */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intClipJumpAudioAt():\
+                        M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Read AU frames as long as we reach the AU before the target CTS
+                * (so the target will be reached when the user call ReadNextAudioFrame). */
+                while( pClipCtxt->iAudioFrameCts
+                    < (iTargetCts - pClipCtxt->iSilenceFrameDuration) )
+                {
+                    err = M4VSS3GPP_intClipReadNextAudioFrame(pClipCtxt);
+
+                    if( M4OSA_ERR_IS_ERROR(err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intClipJumpAudioAt():\
+                            M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Return the CTS that will be reached at next ReadFrame */
+                *pJumpCts = pClipCtxt->iAudioFrameCts
+                    + pClipCtxt->iSilenceFrameDuration;
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intClipJumpAudioAt(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipClose()
+ * @brief    Close a clip. Destroy the context.
+ * @note
+ * @param   pClipCtxt            (IN) Internal clip context
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intClipClose( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err;
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipClose: pClipCtxt is M4OSA_NULL");
+
+    /**
+    * Free the video decoder context */
+    if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
+    {
+        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
+            pClipCtxt->pViDecCtxt);
+        pClipCtxt->pViDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the audio decoder context  */
+    if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
+    {
+        err = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
+            pClipCtxt->pAudioDecCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipClose: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the decoded audio buffer */
+    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipCtxt->AudioDecBufferOut.m_dataAddress);
+        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    }
+
+    /**
+    * Audio AU is allocated by reader.
+    * If no audio track, audio AU is set at 'silent' (SID) by VSS.
+    * As a consequence, if audio AU is set to 'silent' (static)
+    it can't be free unless it is set to NULL */
+    if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
+        == pClipCtxt->AudioAU.m_dataAddress)
+        || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
+        == pClipCtxt->AudioAU.m_dataAddress) )
+    {
+        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pClipCtxt->pReaderContext )
+    {
+        /**
+        * Close the 3GPP or MP3 reader */
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipClose(): m_pReader->m_pFctClose returns 0x%x",
+                err);
+        }
+
+        /**
+        * Destroy the 3GPP or MP3 reader context */
+        err = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipClose(): m_pReader->m_pFctDestroy returns 0x%x",
+                err);
+        }
+
+        pClipCtxt->pReaderContext = M4OSA_NULL;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_1("M4VSS3GPP_intClipClose(Ctxt=0x%x): returning M4NO_ERROR",
+        pClipCtxt);
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4VSS3GPP_intClipCleanUp( M4VSS3GPP_ClipContext *pClipCtxt )
+{
+    M4OSA_ERR err = M4NO_ERROR, err2;
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipCtxt), M4ERR_PARAMETER,
+        "M4VSS3GPP_intClipCleanUp: pClipCtxt is M4OSA_NULL");
+
+    /**
+    * Free the video decoder context */
+    if( M4OSA_NULL != pClipCtxt->pViDecCtxt )
+    {
+        pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctDestroy(
+            pClipCtxt->pViDecCtxt);
+        pClipCtxt->pViDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the audio decoder context  */
+    if( M4OSA_NULL != pClipCtxt->pAudioDecCtxt )
+    {
+        err2 = pClipCtxt->ShellAPI.m_pAudioDecoder->m_pFctDestroyAudioDec(
+            pClipCtxt->pAudioDecCtxt);
+
+        if( M4NO_ERROR != err2 )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipCleanUp: m_pAudioDecoder->m_pFctDestroyAudioDec returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+            if( M4NO_ERROR != err )
+                err = err2;
+        }
+
+        pClipCtxt->pAudioDecCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the decoded audio buffer */
+    if( M4OSA_NULL != pClipCtxt->AudioDecBufferOut.m_dataAddress )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipCtxt->AudioDecBufferOut.m_dataAddress);
+        pClipCtxt->AudioDecBufferOut.m_dataAddress = M4OSA_NULL;
+    }
+
+    /**
+    * Audio AU is allocated by reader.
+    * If no audio track, audio AU is set at 'silent' (SID) by VSS.
+    * As a consequence, if audio AU is set to 'silent' (static)
+    it can't be free unless it is set to NULL */
+    if( ( (M4OSA_MemAddr8)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048
+        == pClipCtxt->AudioAU.m_dataAddress)
+        || ((M4OSA_MemAddr8)pClipCtxt->pSilenceFrameData
+        == pClipCtxt->AudioAU.m_dataAddress) )
+    {
+        pClipCtxt->AudioAU.m_dataAddress = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pClipCtxt->pReaderContext )
+    {
+        /**
+        * Close the 3GPP or MP3 reader */
+        err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctClose(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err2 )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctClose returns 0x%x",
+                err);
+
+            if( M4NO_ERROR != err )
+                err = err2;
+        }
+
+        /**
+        * Destroy the 3GPP or MP3 reader context */
+        err2 = pClipCtxt->ShellAPI.m_pReader->m_pFctDestroy(
+            pClipCtxt->pReaderContext);
+
+        if( M4NO_ERROR != err2 )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipCleanUp(): m_pReader->m_pFctDestroy returns 0x%x",
+                err);
+
+            if( M4NO_ERROR != err )
+                err = err2;
+        }
+
+        pClipCtxt->pReaderContext = M4OSA_NULL;
+    }
+
+    /**
+    * Free the shells interfaces */
+    M4VSS3GPP_unRegisterAllWriters(&pClipCtxt->ShellAPI);
+    M4VSS3GPP_unRegisterAllEncoders(&pClipCtxt->ShellAPI);
+    M4VSS3GPP_unRegisterAllReaders(&pClipCtxt->ShellAPI);
+    M4VSS3GPP_unRegisterAllDecoders(&pClipCtxt->ShellAPI);
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intClipCleanUp: pClipCtxt=0x%x", pClipCtxt);
+    /**
+    * Free the clip context */
+    M4OSA_free((M4OSA_MemAddr32)pClipCtxt);
+
+    return err;
+}
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+M4OSA_ERR
+M4VSS3GPP_intClipRegisterExternalVideoDecoder( M4VSS3GPP_ClipContext *pClipCtxt,
+                                              M4VD_VideoType decoderType,
+                                              M4VD_Interface *pDecoderInterface,
+                                              M4OSA_Void *pUserData )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4DECODER_VideoInterface *shellInterface;
+    M4DECODER_VideoType nativeType;
+    M4DECODER_EXTERNAL_UserDataType shellUserData;
+
+    switch( decoderType )
+    {
+        case M4VD_kMpeg4VideoDec:
+        case M4VD_kH263VideoDec:
+            nativeType = M4DECODER_kVideoTypeMPEG4;
+            break;
+
+        case M4VD_kH264VideoDec:
+            nativeType = M4DECODER_kVideoTypeAVC;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intClipRegisterExternalVideoDecoder: unknown decoderType %d",
+                decoderType);
+            return M4ERR_PARAMETER;
+            break;
+    }
+
+    shellUserData =
+        (M4DECODER_EXTERNAL_UserDataType)M4OSA_malloc(sizeof(*shellUserData),
+        M4VSS3GPP, (M4OSA_Char *)"userData structure for the external shell decoder");
+
+    if( M4OSA_NULL == shellUserData )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intClipRegisterExternalVideoDecoder:\
+            failed to allocate userData structure for the external shell decoder");
+        return M4ERR_ALLOC;
+    }
+
+    shellUserData->externalFuncs = pDecoderInterface;
+    shellUserData->externalUserData = pUserData;
+
+    err = M4DECODER_EXTERNAL_getInterface(&shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipRegisterExternalVideoDecoder:\
+            M4DECODER_EXTERNAL_getInterface failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellUserData);
+        return err;
+    }
+
+    err = M4VSS3GPP_registerVideoDecoder(&(pClipCtxt->ShellAPI), nativeType,
+        shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intClipRegisterExternalVideoDecoder:\
+            M4VSS3GPP_registerVideoDecoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellInterface);
+        M4OSA_free((M4OSA_MemAddr32)shellUserData);
+        return err;
+    }
+
+    pClipCtxt->ShellAPI.m_pVideoDecoderUserDataTable[nativeType] =
+        shellUserData;
+
+    return M4NO_ERROR;
+}
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB()
+ * @brief   Return the length, in bytes, of the AMR Narrow-Band frame contained in the given buffer
+ * @note
+ * @param   pAudioFrame   (IN) AMRNB frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+ */
+
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_AMRNB( M4OSA_MemAddr8 pAudioFrame )
+{
+    M4OSA_UInt32 frameSize = 0;
+    M4OSA_UInt32 frameType = ( ( *pAudioFrame) &(0xF << 3)) >> 3;
+
+    switch( frameType )
+    {
+        case 0:
+            frameSize = 95;
+            break; /*  4750 bps */
+
+        case 1:
+            frameSize = 103;
+            break; /*  5150 bps */
+
+        case 2:
+            frameSize = 118;
+            break; /*  5900 bps */
+
+        case 3:
+            frameSize = 134;
+            break; /*  6700 bps */
+
+        case 4:
+            frameSize = 148;
+            break; /*  7400 bps */
+
+        case 5:
+            frameSize = 159;
+            break; /*  7950 bps */
+
+        case 6:
+            frameSize = 204;
+            break; /* 10200 bps */
+
+        case 7:
+            frameSize = 244;
+            break; /* 12000 bps */
+
+        case 8:
+            frameSize = 39;
+            break; /* SID (Silence) */
+
+        case 15:
+            frameSize = 0;
+            break; /* No data */
+
+        default:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intGetFrameSize_AMRNB(): Corrupted AMR frame! returning 0.");
+            return 0;
+    }
+
+    return (1 + (( frameSize + 7) / 8));
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC()
+ * @brief   Return the length, in bytes, of the EVRC frame contained in the given buffer
+ * @note
+ *     0 1 2 3
+ *    +-+-+-+-+
+ *    |fr type|              RFC 3558
+ *    +-+-+-+-+
+ *
+ * Frame Type: 4 bits
+ *    The frame type indicates the type of the corresponding codec data
+ *    frame in the RTP packet.
+ *
+ * For EVRC and SMV codecs, the frame type values and size of the
+ * associated codec data frame are described in the table below:
+ *
+ * Value   Rate      Total codec data frame size (in octets)
+ * ---------------------------------------------------------
+ *   0     Blank      0    (0 bit)
+ *   1     1/8        2    (16 bits)
+ *   2     1/4        5    (40 bits; not valid for EVRC)
+ *   3     1/2       10    (80 bits)
+ *   4     1         22    (171 bits; 5 padded at end with zeros)
+ *   5     Erasure    0    (SHOULD NOT be transmitted by sender)
+ *
+ * @param   pCpAudioFrame   (IN) EVRC frame
+ * @return  M4NO_ERROR: No error
+ ******************************************************************************
+ */
+M4OSA_UInt32 M4VSS3GPP_intGetFrameSize_EVRC( M4OSA_MemAddr8 pAudioFrame )
+{
+    M4OSA_UInt32 frameSize = 0;
+    M4OSA_UInt32 frameType = ( *pAudioFrame) &0x0F;
+
+    switch( frameType )
+    {
+        case 0:
+            frameSize = 0;
+            break; /*  blank */
+
+        case 1:
+            frameSize = 16;
+            break; /*  1/8 */
+
+        case 2:
+            frameSize = 40;
+            break; /*  1/4 */
+
+        case 3:
+            frameSize = 80;
+            break; /*  1/2 */
+
+        case 4:
+            frameSize = 171;
+            break; /*  1 */
+
+        case 5:
+            frameSize = 0;
+            break; /*  erasure */
+
+        default:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_intGetFrameSize_EVRC(): Corrupted EVRC frame! returning 0.");
+            return 0;
+    }
+
+    return (1 + (( frameSize + 7) / 8));
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c b/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
new file mode 100755
index 0000000..471cb6d
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_ClipAnalysis.c
@@ -0,0 +1,1388 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_ClipAnalysis.c
+ * @brief    Implementation of functions related to analysis of input clips
+ * @note    All functions in this file are static, i.e. non public
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ *    Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#include "M4VD_EXTERNAL_Interface.h"
+
+#endif
+
+/**
+ *    OSAL headers */
+#include "M4OSA_Memory.h" /* OSAL memory management */
+#include "M4OSA_Debug.h"  /* OSAL debug management */
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editAnalyseClip()
+ * @brief    This function allows checking if a clip is compatible with VSS 3GPP editing
+ * @note    It also fills a ClipAnalysis structure, which can be used to check if two
+ *        clips are compatible
+ * @param    pClip                (IN) File descriptor of the input 3GPP/MP3 clip file.
+ * @param    pClipProperties        (IN) Pointer to a valid ClipProperties structure.
+ * @param    FileType            (IN) Type of the input file (.3gp, .amr, .mp3)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return   M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED
+ * @return   M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return   M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC
+ * @return   M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE
+ * @return   M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT
+ * @return   M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editAnalyseClip( M4OSA_Void *pClip,
+                                    M4VIDEOEDITING_FileType FileType,
+                                    M4VIDEOEDITING_ClipProperties *pClipProperties,
+                                    M4OSA_FileReadPointer *pFileReadPtrFct )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClipContext;
+    M4VSS3GPP_ClipSettings ClipSettings;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editAnalyseClip called with pClip=0x%x, pClipProperties=0x%x",
+        pClip, pClipProperties);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip), M4ERR_PARAMETER,
+        "M4VSS3GPP_editAnalyseClip: pClip is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipProperties), M4ERR_PARAMETER,
+        "M4VSS3GPP_editAnalyseClip: pClipProperties is M4OSA_NULL");
+
+    /**
+    * Build dummy clip settings, in order to use the editClipOpen function */
+    ClipSettings.pFile = pClip;
+    ClipSettings.FileType = FileType;
+    ClipSettings.uiBeginCutTime = 0;
+    ClipSettings.uiEndCutTime = 0;
+
+    /* Clip properties not build yet, set at least this flag */
+    ClipSettings.ClipProperties.bAnalysed = M4OSA_FALSE;
+
+    /**
+    * Open the clip in fast open mode */
+    err = M4VSS3GPP_intClipInit(&pClipContext, pFileReadPtrFct);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipInit() returns 0x%x!",
+            err);
+
+        /**
+        * Free the clip */
+        if( M4OSA_NULL != pClipContext )
+        {
+            M4VSS3GPP_intClipCleanUp(pClipContext);
+        }
+        return err;
+    }
+
+    err = M4VSS3GPP_intClipOpen(pClipContext, &ClipSettings, M4OSA_FALSE,
+        M4OSA_TRUE, M4OSA_TRUE);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intClipOpen() returns 0x%x!",
+            err);
+
+        M4VSS3GPP_intClipCleanUp(pClipContext);
+
+        /**
+        * Here it is better to return the Editing specific error code */
+        if( ( ((M4OSA_UInt32)M4ERR_DECODER_H263_PROFILE_NOT_SUPPORTED) == err)
+            || (((M4OSA_UInt32)M4ERR_DECODER_H263_NOT_BASELINE) == err) )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editAnalyseClip:\
+                M4VSS3GPP_intClipOpen() returns M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
+            return M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED;
+        }
+        return err;
+    }
+
+    /**
+    * Analyse the clip */
+    err = M4VSS3GPP_intBuildAnalysis(pClipContext, pClipProperties);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS3GPP_intBuildAnalysis() returns 0x%x!",
+            err);
+
+        /**
+        * Free the clip */
+        M4VSS3GPP_intClipCleanUp(pClipContext);
+        return err;
+    }
+
+    /**
+    * Free the clip */
+    err = M4VSS3GPP_intClipClose(pClipContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip: M4VSS_intClipClose() returns 0x%x!",
+            err);
+        M4VSS3GPP_intClipCleanUp(pClipContext);
+        return err;
+    }
+
+    M4VSS3GPP_intClipCleanUp(pClipContext);
+
+    /**
+    * Check the clip is compatible with VSS editing */
+    err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClipProperties);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editAnalyseClip:\
+            M4VSS3GPP_intCheckClipCompatibleWithVssEditing() returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editAnalyseClip(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility()
+ * @brief    This function allows checking if two clips are compatible with each other for
+ *        VSS 3GPP editing assembly feature.
+ * @note
+ * @param    pClip1Properties        (IN) Clip analysis of the first clip
+ * @param    pClip2Properties        (IN) Clip analysis of the second clip
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE
+ * @return    M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING
+ * @return  M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY
+ * @return  M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCheckClipCompatibility( M4VIDEOEDITING_ClipProperties *pClip1Properties,
+                                                M4VIDEOEDITING_ClipProperties *pClip2Properties )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_ERR video_err = M4NO_ERROR;
+    M4OSA_ERR audio_err = M4NO_ERROR;
+
+    M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
+    M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
+
+    M4OSA_TRACE3_2("M4VSS3GPP_editCheckClipCompatibility called with pClip1Analysis=0x%x,\
+                   pClip2Analysis=0x%x", pClip1Properties, pClip2Properties);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip1Properties), M4ERR_PARAMETER,
+        "M4VSS3GPP_editCheckClipCompatibility: pClip1Properties is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClip2Properties), M4ERR_PARAMETER,
+        "M4VSS3GPP_editCheckClipCompatibility: pClip2Properties is M4OSA_NULL");
+
+    /**
+    * Check if the two clips are, alone, comptible with VSS 3GPP.
+    *
+    * Note: if a clip is not compatible with VSS3GPP, M4VSS3GPP_editAnalyseClip()
+    * did return an error to the integrator. So he should not call
+    * M4VSS3GPP_editCheckClipCompatibility
+    * with the ClipAnalysis...
+    * Still, I think it is good to redo the test here, to be sure.
+    * M4VSS3GPP_intCheckClipCompatibleWithVssEditing is not a long function to execute.*/
+    err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClip1Properties);
+
+    if( err != M4NO_ERROR )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editCheckClipCompatibility: Clip1 not compatible with VSS3GPP,\
+            returning 0x%x", err);
+        return err;
+    }
+    err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(pClip2Properties);
+
+    if( err != M4NO_ERROR )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editCheckClipCompatibility: Clip2 not compatible with VSS3GPP,\
+            returning 0x%x", err);
+        return err;
+    }
+
+    if( ( M4VIDEOEDITING_kFileType_MP3 == pClip1Properties->FileType)
+        || (M4VIDEOEDITING_kFileType_AMR == pClip1Properties->FileType) )
+    {
+        if( pClip1Properties != pClip2Properties )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility: MP3 CAN ONLY BE CUT,\
+                returning M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
+            return M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY;
+        }
+        else
+        {
+            /* We are in VSS Splitter mode */
+            goto audio_analysis;
+        }
+    }
+
+    /********** Video ************/
+
+    /**
+    * Check both clips have same video stream type */
+    if( pClip1Properties->VideoStreamType != pClip2Properties->VideoStreamType )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same video format");
+        video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT;
+        goto audio_analysis;
+    }
+
+    /**
+    * Check both clips have the same video frame size */
+    if( ( pClip1Properties->uiVideoWidth != pClip2Properties->uiVideoWidth)
+        || (pClip1Properties->uiVideoHeight
+        != pClip2Properties->uiVideoHeight) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same video frame size");
+        video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE;
+        goto audio_analysis;
+    }
+
+    switch( pClip1Properties->VideoStreamType )
+    {
+        case M4VIDEOEDITING_kH263:
+        case M4VIDEOEDITING_kH264:
+            /**< nothing to check here */
+            break;
+
+        case M4VIDEOEDITING_kMPEG4_EMP:
+        case M4VIDEOEDITING_kMPEG4:
+            /**
+            * Check both streams have the same time scale */
+            if( pClip1Properties->uiVideoTimeScale
+                != pClip2Properties->uiVideoTimeScale )
+            {
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same video time\
+                    scale (%d != %d), returning M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE",
+                    pClip1Properties->uiVideoTimeScale,
+                    pClip2Properties->uiVideoTimeScale);
+                video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE;
+                goto audio_analysis;
+            }
+            /**
+            * Check both streams have the same use of data partitioning */
+            if( pClip1Properties->bMPEG4dataPartition
+                != pClip2Properties->bMPEG4dataPartition )
+            {
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_editCheckClipCompatibility:\
+                    Clips don't have the same use of data partitioning (%d != %d),\
+                    returning M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING",
+                    pClip1Properties->bMPEG4dataPartition,
+                    pClip2Properties->bMPEG4dataPartition);
+                video_err = M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING;
+                goto audio_analysis;
+            }
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCheckClipCompatibility: unknown video stream type (0x%x),\
+                returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT",
+                pClip1Properties->VideoStreamType);
+            video_err =
+                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT; /**< this error should never happen,
+                                                              it's here for code safety only... */
+            goto audio_analysis;
+    }
+
+    pClip2Properties->bVideoIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+    /********** Audio ************/
+
+audio_analysis:
+    if( M4VIDEOEDITING_kNoneAudio != pClip1Properties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AAC */
+        switch( pClip1Properties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                bClip1IsAAC = M4OSA_TRUE;
+                break;
+            default:
+                break;
+        }
+    }
+
+    if( M4VIDEOEDITING_kNoneAudio != pClip2Properties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AAC */
+        switch( pClip2Properties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                bClip2IsAAC = M4OSA_TRUE;
+                break;
+            default:
+                break;
+        }
+    }
+
+    /**
+    * If there is no audio, the clips are compatibles ... */
+    if( ( pClip1Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio)
+        && (pClip2Properties->AudioStreamType != M4VIDEOEDITING_kNoneAudio) )
+    {
+        /**
+        * Check both clips have same audio stream type
+        * And let_s say AAC, AAC+ and eAAC+ are mixable */
+        if( ( pClip1Properties->AudioStreamType
+            != pClip2Properties->AudioStreamType)
+            && (( M4OSA_FALSE == bClip1IsAAC) || (M4OSA_FALSE == bClip2IsAAC)) )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility:\
+                Clips don't have the same Audio Stream Type");
+
+            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE;
+            goto analysis_done;
+        }
+
+        /**
+        * Check both clips have same number of channels */
+        if( pClip1Properties->uiNbChannels != pClip2Properties->uiNbChannels )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility: Clips don't have the same Nb of Channels");
+            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS;
+            goto analysis_done;
+        }
+
+        /**
+        * Check both clips have same sampling frequency */
+        if( pClip1Properties->uiSamplingFrequency
+            != pClip2Properties->uiSamplingFrequency )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCheckClipCompatibility:\
+                Clips don't have the same Sampling Frequency");
+            audio_err = M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY;
+            goto analysis_done;
+        }
+    }
+
+    pClip2Properties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+    /**
+    * Return with no error */
+
+analysis_done:
+    if( video_err != M4NO_ERROR )
+        return video_err;
+
+    if( audio_err != M4NO_ERROR )
+        return audio_err;
+
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_editCheckClipCompatibility(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intBuildAnalysis()
+ * @brief    Get video and audio properties from the clip streams
+ * @note    This function must return fatal errors only (errors that should not happen
+ *        in the final integrated product).
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param    pClipProperties        (OUT) Pointer to a valid ClipProperties structure.
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intBuildAnalysis( M4VSS3GPP_ClipContext *pClipCtxt,
+                                     M4VIDEOEDITING_ClipProperties *pClipProperties )
+{
+    M4OSA_ERR err;
+    M4DECODER_MPEG4_DecoderConfigInfo DecConfigInfo;
+    M4DECODER_VideoSize dummySize;
+    M4DECODER_AVCProfileLevel AVCProfle;
+
+    pClipProperties->bAnalysed = M4OSA_FALSE;
+
+    /**
+    * Reset video characteristics */
+    pClipProperties->VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+    pClipProperties->uiClipVideoDuration = 0;
+    pClipProperties->uiVideoBitrate = 0;
+    pClipProperties->uiVideoMaxAuSize = 0;
+    pClipProperties->uiVideoWidth = 0;
+    pClipProperties->uiVideoHeight = 0;
+    pClipProperties->uiVideoTimeScale = 0;
+    pClipProperties->fAverageFrameRate = 0.0;
+    pClipProperties->ProfileAndLevel =
+        M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+    pClipProperties->uiH263level = 0;
+    pClipProperties->uiVideoProfile = 0;
+    pClipProperties->bMPEG4dataPartition = M4OSA_FALSE;
+    pClipProperties->bMPEG4rvlc = M4OSA_FALSE;
+    pClipProperties->bMPEG4resynchMarker = M4OSA_FALSE;
+
+    M4OSA_memset((M4OSA_MemAddr8) &pClipProperties->ftyp,
+        sizeof(pClipProperties->ftyp), 0);
+
+    /**
+    * Video Analysis */
+    if( M4OSA_NULL != pClipCtxt->pVideoStream )
+    {
+        pClipProperties->uiVideoWidth = pClipCtxt->pVideoStream->m_videoWidth;
+        pClipProperties->uiVideoHeight = pClipCtxt->pVideoStream->m_videoHeight;
+        pClipProperties->fAverageFrameRate =
+            pClipCtxt->pVideoStream->m_averageFrameRate;
+
+        switch( pClipCtxt->pVideoStream->m_basicProperties.m_streamType )
+        {
+            case M4DA_StreamTypeVideoMpeg4:
+
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kMPEG4;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+   /* This issue is so incredibly stupid that it's depressing. Basically, a file can be analysed
+   outside of any context (besides that of the clip itself), so that for instance two clips can
+   be checked for compatibility before allocating an edit context for editing them. But this
+   means there is no way in heck to pass an external video decoder (to begin with) to this
+   function, as they work by being registered in an existing context; furthermore, it is actually
+   pretty overkill to use a full decoder for that, moreso a HARDWARE decoder just to get the
+   clip config info. In fact, the hardware itself doesn't provide this service, in the case of a
+   HW decoder, the shell builds the config info itself, so we don't need the actual decoder, only
+   a detached functionality of it. So in case HW/external decoders may be present, we instead use
+   directly the DSI parsing function of the shell HW decoder (which we know to be present, since
+   HW decoders are possible) to get the config info. Notice this function is used even if the
+   software decoder is actually present and even if it will end up being actually used: figuring
+   out the config does not involve actual decoding nor the particularities of a specific decoder,
+   it's the fact that it's MPEG4 that matters, so it should not be functionally any different
+   from the way it was done before (and it's light enough for performance not to be any problem
+         whatsoever). */
+
+                err = M4DECODER_EXTERNAL_ParseVideoDSI(pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo,
+                    pClipCtxt->pVideoStream->
+                    m_basicProperties.m_decoderSpecificInfoSize,
+                    &DecConfigInfo, &dummySize);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intBuildAnalysis():\
+                        M4DECODER_EXTERNAL_ParseVideoDSI returns 0x%08X", err);
+                    return err;
+                }
+
+    #else /* an external decoder cannot be present, so we can rely on the
+                software decoder to be installed already */
+                /* Get MPEG-4 decoder config. */
+
+                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctGetOption(
+                    pClipCtxt->pViDecCtxt,
+                    M4DECODER_MPEG4_kOptionID_DecoderConfigInfo,
+                    &DecConfigInfo);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intBuildAnalysis(): m_pFctGetOption(DecConfigInfo)\
+                        returns 0x%x", err);
+                    return err;
+                }
+
+    #endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+                pClipProperties->uiVideoProfile = DecConfigInfo.uiProfile;
+                pClipProperties->uiVideoTimeScale = DecConfigInfo.uiTimeScale;
+                pClipProperties->bMPEG4dataPartition =
+                    DecConfigInfo.bDataPartition;
+                pClipProperties->bMPEG4rvlc = DecConfigInfo.bUseOfRVLC;
+                pClipProperties->bMPEG4resynchMarker =
+                    DecConfigInfo.uiUseOfResynchMarker;
+
+                /* Supported enum value for profile and level */
+                switch( pClipProperties->uiVideoProfile )
+                {
+                    case 0x08:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_0;
+                        break;
+
+                    case 0x09:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_0b;
+                        break;
+
+                    case 0x01:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_1;
+                        break;
+
+                    case 0x02:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_2;
+                        break;
+
+                    case 0x03:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_3;
+                        break;
+
+                    case 0x04:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_4a;
+                        break;
+
+                    case 0x05:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kMPEG4_SP_Level_5;
+                        break;
+                }
+                break;
+
+            case M4DA_StreamTypeVideoH263:
+
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kH263;
+
+                /* Get H263 level, which is sixth byte in the DSI */
+                pClipProperties->uiH263level = pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo[5];
+                /* Get H263 profile, which is fifth byte in the DSI */
+                pClipProperties->uiVideoProfile = pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo[6];
+                /* H263 time scale is always 30000 */
+                pClipProperties->uiVideoTimeScale = 30000;
+
+                /* Supported enum value for profile and level */
+                if( pClipProperties->uiVideoProfile == 0 )
+                {
+                    switch( pClipProperties->uiH263level )
+                    {
+                        case 10:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_10;
+                            break;
+
+                        case 20:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_20;
+                            break;
+
+                        case 30:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_30;
+                            break;
+
+                        case 40:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_40;
+                            break;
+
+                        case 45:
+                            pClipProperties->ProfileAndLevel =
+                                M4VIDEOEDITING_kH263_Profile_0_Level_45;
+                            break;
+                    }
+                }
+                break;
+
+            case M4DA_StreamTypeVideoMpeg4Avc:
+
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kH264;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+                err = M4DECODER_EXTERNAL_ParseAVCDSI(pClipCtxt->pVideoStream->
+                    m_basicProperties.m_pDecoderSpecificInfo,
+                    pClipCtxt->pVideoStream->
+                    m_basicProperties.m_decoderSpecificInfoSize,
+                    &AVCProfle);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intBuildAnalysis(): \
+                         M4DECODER_EXTERNAL_ParseAVCDSI returns 0x%08X",
+                         err);
+                    return err;
+                }
+
+#else /* an external decoder cannot be present, so we can rely on the
+                software decoder to be installed already */
+
+                err = pClipCtxt->ShellAPI.m_pVideoDecoder->m_pFctGetOption(
+                    pClipCtxt->pViDecCtxt,
+                    M4DECODER_kOptionID_AVCProfileAndLevel, &AVCProfle);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intBuildAnalysis(): m_pFctGetOption(AVCProfileInfo)\
+                            returns 0x%x", err);
+                    return err;
+                }
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+                switch( AVCProfle )
+                {
+                    case M4DECODER_AVC_kProfile_0_Level_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1b:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1b;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_1_3:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_1_3;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_2_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_2_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_2_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_2_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_3:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_3;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_3_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_3_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_3_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_3_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_4:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_4;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_4_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_4_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_4_2:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_4_2;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_5:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_5;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_0_Level_5_1:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kH264_Profile_0_Level_5_1;
+                        break;
+
+                    case M4DECODER_AVC_kProfile_and_Level_Out_Of_Range:
+                    default:
+                        pClipProperties->ProfileAndLevel =
+                            M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+                }
+
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intBuildAnalysis: unknown input video format (0x%x),\
+                    returning M4NO_ERROR",pClipCtxt->pVideoStream->m_basicProperties.m_streamType);
+                return
+                    M4NO_ERROR; /**< We do not return error here.
+                                The video format compatibility check will be done latter */
+        }
+
+        pClipProperties->uiClipVideoDuration =
+            (M4OSA_UInt32)pClipCtxt->pVideoStream->m_basicProperties.m_duration;
+        pClipProperties->uiVideoMaxAuSize =
+            pClipCtxt->pVideoStream->m_basicProperties.m_maxAUSize;
+
+        /* if video bitrate not available retrieve an estimation of the overall bitrate */
+        pClipProperties->uiVideoBitrate =
+            (M4OSA_UInt32)pClipCtxt->pVideoStream->
+            m_basicProperties.m_averageBitRate;
+
+        if( 0 == pClipProperties->uiVideoBitrate )
+        {
+            pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+                pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
+                &pClipProperties->uiVideoBitrate);
+
+            if( M4OSA_NULL != pClipCtxt->pAudioStream )
+            {
+                /* we get the overall bitrate, substract the audio bitrate if any */
+                pClipProperties->uiVideoBitrate -=
+                    pClipCtxt->pAudioStream->m_basicProperties.m_averageBitRate;
+            }
+        }
+    }
+
+    /**
+    * Reset audio characteristics */
+    pClipProperties->AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+    pClipProperties->uiClipAudioDuration = 0;
+    pClipProperties->uiAudioBitrate = 0;
+    pClipProperties->uiAudioMaxAuSize = 0;
+    pClipProperties->uiNbChannels = 0;
+    pClipProperties->uiSamplingFrequency = 0;
+    pClipProperties->uiExtendedSamplingFrequency = 0;
+    pClipProperties->uiDecodedPcmSize = 0;
+
+    /**
+    * Audio Analysis */
+    if( M4OSA_NULL != pClipCtxt->pAudioStream )
+    {
+        switch( pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
+        {
+            case M4DA_StreamTypeAudioAmrNarrowBand:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAMR_NB;
+                break;
+
+            case M4DA_StreamTypeAudioAac:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAAC;
+                break;
+
+            case M4DA_StreamTypeAudioMp3:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kMP3;
+                break;
+
+            case M4DA_StreamTypeAudioEvrc:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kEVRC;
+                break;
+
+            case M4DA_StreamTypeAudioPcm:
+
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kPCM;
+                break;
+
+            default:
+
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intBuildAnalysis: unknown input audio format (0x%x),\
+                    returning M4NO_ERROR!",
+                    pClipCtxt->pAudioStream->m_basicProperties.m_streamType);
+                return
+                    M4NO_ERROR; /**< We do not return error here.
+                                The audio format compatibility check will be done latter */
+        }
+
+        pClipProperties->uiAudioMaxAuSize =
+            pClipCtxt->pAudioStream->m_basicProperties.m_maxAUSize;
+        pClipProperties->uiClipAudioDuration =
+            (M4OSA_UInt32)pClipCtxt->pAudioStream->m_basicProperties.m_duration;
+
+        pClipProperties->uiNbChannels = pClipCtxt->pAudioStream->m_nbChannels;
+        pClipProperties->uiSamplingFrequency =
+            pClipCtxt->pAudioStream->m_samplingFrequency;
+        pClipProperties->uiDecodedPcmSize =
+            pClipCtxt->pAudioStream->m_byteFrameLength
+            * pClipCtxt->pAudioStream->m_byteSampleSize
+            * pClipCtxt->pAudioStream->m_nbChannels;
+
+        /**
+        * Bugfix P4ME00001128: With some IMTC files, the AMR bit rate is 0 kbps
+        according the GetProperties function */
+        pClipProperties->uiAudioBitrate =
+            (M4OSA_UInt32)pClipCtxt->pAudioStream->
+            m_basicProperties.m_averageBitRate;
+
+        if( 0 == pClipProperties->uiAudioBitrate )
+        {
+            if( M4VIDEOEDITING_kAMR_NB == pClipProperties->AudioStreamType )
+            {
+                /**
+                *Better returning a guessed 12.2 kbps value than a sure-to-be-false 0 kbps value!*/
+                pClipProperties->uiAudioBitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
+            }
+            else if( M4VIDEOEDITING_kEVRC == pClipProperties->AudioStreamType )
+            {
+                /**
+                *Better returning a guessed 9.2 kbps value than a sure-to-be-false 0 kbps value!*/
+                pClipProperties->uiAudioBitrate =
+                    M4VSS3GPP_EVRC_DEFAULT_BITRATE;
+            }
+            else
+            {
+                pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+                    pClipCtxt->pReaderContext, M4READER_kOptionID_Bitrate,
+                    &pClipProperties->uiAudioBitrate);
+
+                if( M4OSA_NULL != pClipCtxt->pVideoStream )
+                {
+                    /* we get the overall bitrate, substract the video bitrate if any */
+                    pClipProperties->uiAudioBitrate -= pClipCtxt->pVideoStream->
+                        m_basicProperties.m_averageBitRate;
+                }
+            }
+        }
+
+        /* New aac properties */
+        if( M4DA_StreamTypeAudioAac
+            == pClipCtxt->pAudioStream->m_basicProperties.m_streamType )
+        {
+            pClipProperties->uiNbChannels = pClipCtxt->AacProperties.aNumChan;
+            pClipProperties->uiSamplingFrequency =
+                pClipCtxt->AacProperties.aSampFreq;
+
+            if( pClipCtxt->AacProperties.aSBRPresent )
+            {
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_kAACplus;
+                pClipProperties->uiExtendedSamplingFrequency =
+                    pClipCtxt->AacProperties.aExtensionSampFreq;
+            }
+
+            if( pClipCtxt->AacProperties.aPSPresent )
+            {
+                pClipProperties->AudioStreamType = M4VIDEOEDITING_keAACplus;
+            }
+        }
+    }
+
+    /* Get 'ftyp' atom */
+    err = pClipCtxt->ShellAPI.m_pReader->m_pFctGetOption(
+        pClipCtxt->pReaderContext,
+        M4READER_kOptionID_3gpFtypBox, &pClipProperties->ftyp);
+
+    if( M4NO_ERROR == err )
+    {
+        M4OSA_UInt8 i;
+
+        for ( i = 0; i < pClipProperties->ftyp.nbCompatibleBrands; i++ )
+            if( M4VIDEOEDITING_BRAND_EMP
+                == pClipProperties->ftyp.compatible_brands[i] )
+                pClipProperties->VideoStreamType = M4VIDEOEDITING_kMPEG4_EMP;
+    }
+
+    /**
+    * We write the VSS 3GPP version in the clip analysis to be sure the integrator doesn't
+    * mix older analysis results with newer libraries */
+    pClipProperties->Version[0] = M4VIDEOEDITING_VERSION_MAJOR;
+    pClipProperties->Version[1] = M4VIDEOEDITING_VERSION_MINOR;
+    pClipProperties->Version[2] = M4VIDEOEDITING_VERSION_REVISION;
+
+    pClipProperties->FileType = pClipCtxt->pSettings->FileType;
+
+    if( pClipProperties->uiClipVideoDuration
+        > pClipProperties->uiClipAudioDuration )
+        pClipProperties->uiClipDuration = pClipProperties->uiClipVideoDuration;
+    else
+        pClipProperties->uiClipDuration = pClipProperties->uiClipAudioDuration;
+
+    /* Reset compatibility chart */
+    pClipProperties->bVideoIsEditable = M4OSA_FALSE;
+    pClipProperties->bAudioIsEditable = M4OSA_FALSE;
+    pClipProperties->bVideoIsCompatibleWithMasterClip = M4OSA_FALSE;
+    pClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+    /* Analysis successfully completed */
+    pClipProperties->bAnalysed = M4OSA_TRUE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intBuildAnalysis(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing()
+ * @brief    Check if the clip is compatible with VSS editing
+ * @note
+ * @param   pClipCtxt            (IN) internal clip context
+ * @param    pClipProperties     (OUT) Pointer to a valid ClipProperties structure.
+ * @return    M4NO_ERROR:            No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
+    M4VIDEOEDITING_ClipProperties *pClipProperties )
+{
+    M4OSA_UInt32 uiNbOfValidStreams = 0;
+    M4OSA_ERR video_err = M4NO_ERROR;
+    M4OSA_ERR audio_err = M4NO_ERROR;
+
+    /**
+    * Check that analysis has been generated by this version of the VSS3GPP library */
+    if( ( pClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+        || (pClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+        || (pClipProperties->Version[2]
+    != M4VIDEOEDITING_VERSION_REVISION) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing: The clip analysis has been generated\
+            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+    }
+
+    /********* file type *********/
+
+    if( M4VIDEOEDITING_kFileType_AMR == pClipProperties->FileType )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing:\
+            returning M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
+        return M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED;
+    }
+
+    if( M4VIDEOEDITING_kFileType_MP3 == pClipProperties->FileType )
+    {
+        M4OSA_TRACE3_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
+        return M4NO_ERROR;
+    }
+
+    /********* Video *********/
+
+    if( M4VIDEOEDITING_kNoneVideo
+        != pClipProperties->VideoStreamType ) /**< if there is a video stream */
+    {
+        /**
+        * Check video format is MPEG-4 or H263 */
+        switch( pClipProperties->VideoStreamType )
+        {
+            case M4VIDEOEDITING_kH263:
+                if( M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range
+                    == pClipProperties->ProfileAndLevel )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing():\
+                        unsupported H263 profile");
+                    video_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE;
+                    break;
+                }
+                uiNbOfValidStreams++;
+                pClipProperties->bVideoIsEditable = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kMPEG4_EMP:
+            case M4VIDEOEDITING_kMPEG4:
+                if( M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range
+                    == pClipProperties->ProfileAndLevel )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing():\
+                        unsupported MPEG-4 profile");
+                    video_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE;
+                    break;
+                }
+
+                if( M4OSA_TRUE == pClipProperties->bMPEG4rvlc )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing():\
+                        unsupported MPEG-4 RVLC tool");
+                    video_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC;
+                    break;
+                }
+                uiNbOfValidStreams++;
+                pClipProperties->bVideoIsEditable = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kH264:
+
+                uiNbOfValidStreams++;
+                pClipProperties->bVideoIsEditable = M4OSA_TRUE;
+                break;
+
+            default: /*< KO, we return error */
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported video format");
+                video_err = M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+                break;
+        }
+    }
+    else
+    {
+        /**
+        * Audio only stream are currently not supported by the VSS editing feature
+        (unless in the MP3 case) */
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): No video stream in clip");
+        video_err = M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE;
+    }
+
+    /********* Audio *********/
+    if( M4VIDEOEDITING_kNoneAudio != pClipProperties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AMR-NB, EVRC or AAC */
+        switch( pClipProperties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                uiNbOfValidStreams++;
+                break;
+
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                switch( pClipProperties->uiSamplingFrequency )
+                {
+                case 8000:
+                case 16000:
+                case 22050:
+                case 24000:
+                case 32000:
+                case 44100:
+                case 48000:
+                    pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                    break;
+
+                default:
+                    break;
+                }
+                uiNbOfValidStreams++;
+                break;
+
+            case M4VIDEOEDITING_kEVRC:
+                /*< OK, we proceed, no return */
+                uiNbOfValidStreams++;
+                break;
+
+            default: /*< KO, we return error */
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): unsupported audio format");
+                audio_err = M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+                break;
+        }
+    }
+    else
+    {
+        /* Silence is always editable */
+        pClipProperties->bAudioIsEditable = M4OSA_TRUE;
+    }
+
+    /**
+    * Check there is at least one valid stream in the file... */
+    if( video_err != M4NO_ERROR )
+        return video_err;
+
+    if( audio_err != M4NO_ERROR )
+        return audio_err;
+
+    if( 0 == uiNbOfValidStreams )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): File contains no supported stream,\
+            returning M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
+        return M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intCheckClipCompatibleWithVssEditing(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioMixingCompatibility()
+ * @brief    This function allows checking if two clips are compatible with each other for
+ *        VSS 3GPP audio mixing feature.
+ * @note
+ * @param    pC                            (IN) Context of the audio mixer
+ * @param    pInputClipProperties        (IN) Clip analysis of the first clip
+ * @param    pAddedClipProperties        (IN) Clip analysis of the second clip
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION
+ * @return  M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP
+ * @return  M4NO_ERROR
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_intAudioMixingCompatibility( M4VSS3GPP_InternalAudioMixingContext
+                                      *pC, M4VIDEOEDITING_ClipProperties *pInputClipProperties,
+                                      M4VIDEOEDITING_ClipProperties *pAddedClipProperties )
+{
+    M4OSA_Bool bClip1IsAAC = M4OSA_FALSE;
+    M4OSA_Bool bClip2IsAAC = M4OSA_FALSE;
+
+    /**
+    * Reset settings */
+    pInputClipProperties->bAudioIsEditable = M4OSA_FALSE;
+    pAddedClipProperties->bAudioIsEditable = M4OSA_FALSE;
+    pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+    pAddedClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_FALSE;
+
+    /**
+    * Check that analysis has been generated by this version of the VSS3GPP library */
+    if( ( pInputClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+        || (pInputClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+        || (pInputClipProperties->Version[2]
+    != M4VIDEOEDITING_VERSION_REVISION) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
+            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+    }
+
+    if( ( pAddedClipProperties->Version[0] != M4VIDEOEDITING_VERSION_MAJOR)
+        || (pAddedClipProperties->Version[1] != M4VIDEOEDITING_VERSION_MINOR)
+        || (pAddedClipProperties->Version[2]
+    != M4VIDEOEDITING_VERSION_REVISION) )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingCompatibility: The clip analysis has been generated\
+            by another version, returning M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+        return M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION;
+    }
+
+    /********* input file type *********/
+
+    if( M4VIDEOEDITING_kFileType_3GPP != pInputClipProperties->FileType )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAudioMixingCompatibility:\
+            returning M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
+        return M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP;
+    }
+
+    /********* input audio *********/
+
+    if( M4VIDEOEDITING_kNoneAudio != pInputClipProperties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AMR-NB or AAC */
+        switch( pInputClipProperties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                switch( pInputClipProperties->uiSamplingFrequency )
+                {
+                case 8000:
+                case 16000:
+                case 22050:
+                case 24000:
+                case 32000:
+                case 44100:
+                case 48000:
+                    pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                    break;
+
+                default:
+                    break;
+            }
+            bClip1IsAAC = M4OSA_TRUE;
+            break;
+          default:
+            break;
+        }
+    }
+    else
+    {
+        /* Silence is always editable */
+        pInputClipProperties->bAudioIsEditable = M4OSA_TRUE;
+    }
+
+    /********* added audio *********/
+
+    if( M4VIDEOEDITING_kNoneAudio != pAddedClipProperties->
+        AudioStreamType ) /**< if there is an audio stream */
+    {
+        /**
+        * Check audio format is AMR-NB or AAC */
+        switch( pAddedClipProperties->AudioStreamType )
+        {
+            case M4VIDEOEDITING_kAMR_NB:
+                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+                break;
+
+            case M4VIDEOEDITING_kAAC:
+            case M4VIDEOEDITING_kAACplus:
+            case M4VIDEOEDITING_keAACplus:
+                switch( pAddedClipProperties->uiSamplingFrequency )
+                {
+                case 8000:
+                case 16000:
+                case 22050:
+                case 24000:
+                case 32000:
+                case 44100:
+                case 48000:
+                    pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                    break;
+
+                default:
+                    break;
+                }
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+                bClip2IsAAC = M4OSA_TRUE;
+                break;
+
+            case M4VIDEOEDITING_kEVRC:
+                break;
+
+            case M4VIDEOEDITING_kPCM:
+                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+
+                if( pAddedClipProperties->uiSamplingFrequency == 16000 )
+                {
+                    bClip2IsAAC = M4OSA_TRUE;
+                }
+                break;
+
+            case M4VIDEOEDITING_kMP3: /*RC*/
+                pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+                pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+                    M4OSA_TRUE; /* I use this field to know if silence supported */
+                break;
+
+            default:
+                /* The writer cannot write this  into a 3gpp */
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intAudioMixingCompatibility:\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
+                return M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM;
+        }
+    }
+    else
+    {
+        /* Silence is always editable */
+        pAddedClipProperties->bAudioIsEditable = M4OSA_TRUE;
+        pAddedClipProperties->bAudioIsCompatibleWithMasterClip =
+            M4OSA_TRUE; /* I use this field to know if silence supported */
+    }
+
+    if( pC->bRemoveOriginal == M4OSA_FALSE )
+    {
+        if( pInputClipProperties->uiSamplingFrequency
+            != pAddedClipProperties->uiSamplingFrequency )
+        {
+            /* We need to call SSRC in order to align ASF and/or nb of channels */
+            /* Moreover, audio encoder may be needed in case of audio replacing... */
+            pC->b_SSRCneeded = M4OSA_TRUE;
+        }
+
+        if( pInputClipProperties->uiNbChannels
+            < pAddedClipProperties->uiNbChannels )
+        {
+            /* Stereo to Mono */
+            pC->ChannelConversion = 1;
+        }
+        else if( pInputClipProperties->uiNbChannels
+            > pAddedClipProperties->uiNbChannels )
+        {
+            /* Mono to Stereo */
+            pC->ChannelConversion = 2;
+        }
+    }
+
+    pInputClipProperties->bAudioIsCompatibleWithMasterClip = M4OSA_TRUE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intAudioMixingCompatibility(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c b/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
new file mode 100755
index 0000000..547b099
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Codecs.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *************************************************************************
+ * @file   M4VSS3GPP_Codecs.c
+ * @brief  VSS implementation
+ * @note   This file contains all functions related to audio/video
+ *            codec manipulations.
+ *************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4OSA_Debug.h"             /**< Include for OSAL debug services */
+#include "M4VSS3GPP_ErrorCodes.h"
+#include "M4VSS3GPP_InternalTypes.h" /**< Internal types of the VSS */
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_clearInterfaceTables()
+ * @brief    Clear encoders, decoders, reader and writers interfaces tables
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    The context is null
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_clearInterfaceTables( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_UInt8 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    /* Initialisation that will allow to check if registering twice */
+    pC->pWriterGlobalFcts = M4OSA_NULL;
+    pC->pWriterDataFcts = M4OSA_NULL;
+    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+    pC->pCurrentAudioEncoderUserData = M4OSA_NULL;
+    pC->pCurrentAudioDecoderUserData = M4OSA_NULL;
+
+    pC->pCurrentVideoEncoderExternalAPI = M4OSA_NULL;
+    pC->pCurrentVideoEncoderUserData = M4OSA_NULL;
+
+    for ( i = 0; i < M4WRITER_kType_NB; i++ )
+    {
+        pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+        pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+    }
+
+    for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
+    {
+        pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+        pC->pVideoEncoderExternalAPITable[i] = M4OSA_NULL;
+        pC->pVideoEncoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
+    {
+        pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+        pC->pAudioEncoderFlag[i] = M4OSA_FALSE;
+        pC->pAudioEncoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    /* Initialisation that will allow to check if registering twice */
+    pC->m_pReader = M4OSA_NULL;
+    pC->m_pReaderDataIt = M4OSA_NULL;
+    pC->m_uiNbRegisteredReaders = 0;
+
+    for ( i = 0; i < M4READER_kMediaType_NB; i++ )
+    {
+        pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+        pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+    }
+
+    pC->m_pVideoDecoder = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    pC->m_pCurrentVideoDecoderUserData = M4OSA_NULL;
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    pC->m_uiNbRegisteredVideoDec = 0;
+
+    for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
+    {
+        pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+        pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    }
+
+    pC->m_pAudioDecoder = M4OSA_NULL;
+
+    for ( i = 0; i < M4AD_kType_NB; i++ )
+    {
+        pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+        pC->m_pAudioDecoderFlagTable[i] = M4OSA_FALSE;
+        pC->pAudioDecoderUserDataTable[i] = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerWriter()
+ * @brief    This function will register a specific file format writer.
+ * @note    According to the Mediatype, this function will store in the internal
+ *        context the writer context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext,pWtrGlobalInterface or pWtrDataInterface is M4OSA_NULL
+ *                          (debug only), or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                   M4WRITER_OutputFileType MediaType,
+                                   M4WRITER_GlobalInterface *pWtrGlobalInterface,
+                                   M4WRITER_DataInterface *pWtrDataInterface )
+{
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerWriter");
+    M4OSA_DEBUG_IF2((pWtrGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pWtrGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
+    M4OSA_DEBUG_IF2((pWtrDataInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pWtrDataInterface is M4OSA_NULL in M4VSS3GPP_registerWriter");
+
+    M4OSA_TRACE3_3(
+        "VSS: M4VSS3GPP_registerWriter called with pContext=0x%x, pWtrGlobalInterface=0x%x,\
+        pWtrDataInterface=0x%x",
+        pC, pWtrGlobalInterface, pWtrDataInterface);
+
+    if( ( MediaType == M4WRITER_kUnknown) || (MediaType >= M4WRITER_kType_NB) )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->WriterInterface[MediaType].pGlobalFcts != M4OSA_NULL )
+    {
+        /* a writer corresponding to this media type has already been registered !*/
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "This media type has already been registered");
+        return M4ERR_PARAMETER;
+    }
+
+    /*
+    * Save writer interface in context */
+    pC->WriterInterface[MediaType].pGlobalFcts = pWtrGlobalInterface;
+    pC->WriterInterface[MediaType].pDataFcts = pWtrDataInterface;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerVideoEncoder()
+ * @brief    This function will register a specific video encoder.
+ * @note    According to the Mediatype, this function will store in the internal
+ *        context the encoder context.
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext or pEncGlobalInterface is M4OSA_NULL (debug only),
+ *                          or invalid MediaType
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4ENCODER_Format MediaType,
+                                         M4ENCODER_GlobalInterface *pEncGlobalInterface )
+{
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
+    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerVideoEncoder");
+
+    M4OSA_TRACE3_3(
+        "VSS: M4VSS3GPP_registerEncoder called with pContext=0x%x, pEncGlobalInterface=0x%x,\
+        MediaType=0x%x",
+        pC, pEncGlobalInterface, MediaType);
+
+    if( MediaType >= M4ENCODER_kVideo_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid video encoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->pVideoEncoderInterface[MediaType] != M4OSA_NULL )
+    {
+        /* can be legitimate, in cases where we have one version that can use external encoders
+        but which still has the built-in one to be able to work without an external encoder; in
+        this case the new encoder simply replaces the old one (i.e. we unregister it first). */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+        {
+
+#endif
+
+            M4OSA_free((M4OSA_MemAddr32)pC->pVideoEncoderInterface[MediaType]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        }
+
+#endif
+
+        pC->pVideoEncoderInterface[MediaType] = M4OSA_NULL;
+    }
+
+    /*
+    * Save encoder interface in context */
+    pC->pVideoEncoderInterface[MediaType] = pEncGlobalInterface;
+    /* The actual userData and external API will be set by the registration function in the case
+    of an external encoder (add it as a parameter to this function in the long run?) */
+    pC->pVideoEncoderUserDataTable[MediaType] = M4OSA_NULL;
+    pC->pVideoEncoderExternalAPITable[MediaType] = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerAudioEncoder()
+ * @brief    This function will register a specific audio encoder.
+ * @note    According to the Mediatype, this function will store in the internal
+ *        context the encoder context.
+ * @param    pContext:                (IN) Execution context.
+ * @param    mediaType:                (IN) The media type.
+ * @param    pEncGlobalInterface:    (OUT) the encoder interface functions.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER: pContext or pEncGlobalInterface is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4ENCODER_AudioFormat MediaType,
+                                         M4ENCODER_AudioGlobalInterface *pEncGlobalInterface )
+{
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((pC == M4OSA_NULL), M4ERR_PARAMETER,
+        "VSS: context is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
+    M4OSA_DEBUG_IF2((pEncGlobalInterface == M4OSA_NULL), M4ERR_PARAMETER,
+        "pEncGlobalInterface is M4OSA_NULL in M4VSS3GPP_registerAudioEncoder");
+
+    M4OSA_TRACE3_3(
+        "VSS: M4VSS3GPP_registerAudioEncoder called pContext=0x%x, pEncGlobalInterface=0x%x,\
+        MediaType=0x%x",
+        pC, pEncGlobalInterface, MediaType);
+
+    if( MediaType >= M4ENCODER_kAudio_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid audio encoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->pAudioEncoderInterface[MediaType] != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderInterface[MediaType]);
+        pC->pAudioEncoderInterface[MediaType] = M4OSA_NULL;
+    }
+    /*
+    * Save encoder interface in context */
+    pC->pAudioEncoderInterface[MediaType] = pEncGlobalInterface;
+    pC->pAudioEncoderFlag[MediaType] = M4OSA_FALSE; /* internal encoder */
+    pC->pAudioEncoderUserDataTable[MediaType] = M4OSA_NULL;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_registerAudioEncoder: pC->pAudioEncoderInterface[0x%x] = 0x%x",
+        MediaType, pC->pAudioEncoderInterface[MediaType]);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerReader()
+ * @brief    Register reader.
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                   M4READER_MediaType mediaType,
+                                   M4READER_GlobalInterface *pRdrGlobalInterface,
+                                   M4READER_DataInterface *pRdrDataInterface )
+{
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrGlobalInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerReader: invalid pointer on global interface");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pRdrDataInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerReader: invalid pointer on data interface");
+
+    if( mediaType == M4READER_kMediaTypeUnknown
+        || mediaType >= M4READER_kMediaType_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER, "Invalid media type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->m_pReaderGlobalItTable[mediaType] != M4OSA_NULL )
+    {
+        /* a reader corresponding to this media type has already been registered !*/
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "This media type has already been registered");
+        return M4ERR_PARAMETER;
+    }
+
+    pC->m_pReaderGlobalItTable[mediaType] = pRdrGlobalInterface;
+    pC->m_pReaderDataItTable[mediaType] = pRdrDataInterface;
+
+    pC->m_uiNbRegisteredReaders++;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerVideoDecoder()
+ * @brief    Register video decoder
+ * @param    pContext                (IN/OUT) VSS context.
+ * @param    decoderType            (IN) Decoder type
+ * @param    pDecoderInterface    (IN) Decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only),
+ *                                or the decoder type is invalid
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4DECODER_VideoType decoderType,
+                                         M4DECODER_VideoInterface *pDecoderInterface )
+{
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerVideoDecoder: invalid pointer on decoder interface");
+
+    if( decoderType >= M4DECODER_kVideoType_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid video decoder type");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pC->m_pVideoDecoderItTable[decoderType] != M4OSA_NULL )
+    {
+#ifndef M4VSS_ENABLE_EXTERNAL_DECODERS
+        /* a decoder corresponding to this media type has already been registered !*/
+
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Decoder has already been registered");
+        return M4ERR_PARAMETER;
+
+#else /* external decoders are possible */
+        /* can be legitimate, in cases where we have one version that can use external decoders
+        but which still has the built-in one to be able to work without an external decoder; in
+        this case the new decoder simply replaces the old one (i.e. we unregister it first). */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+        {
+
+#endif
+
+            M4OSA_free(
+                (M4OSA_MemAddr32)pC->m_pVideoDecoderItTable[decoderType]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+        }
+
+#endif
+
+        pC->m_pVideoDecoderItTable[decoderType] = M4OSA_NULL;
+        /* oh, and don't forget the user data, too. */
+        if( pC->m_pVideoDecoderUserDataTable[decoderType] != M4OSA_NULL )
+        {
+            M4OSA_free(
+                (M4OSA_MemAddr32)pC->m_pVideoDecoderUserDataTable[decoderType]);
+            pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+        }
+#endif /* are external decoders possible? */
+
+    }
+
+    pC->m_pVideoDecoderItTable[decoderType] = pDecoderInterface;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    pC->m_pVideoDecoderUserDataTable[decoderType] = M4OSA_NULL;
+    /* The actual userData will be set by the registration function in the case
+    of an external decoder (add it as a parameter to this function in the long run?) */
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    pC->m_uiNbRegisteredVideoDec++;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_registerAudioDecoder()
+ * @brief    Register audio decoder
+ * @note    This function is used internaly by the VSS to register NXP audio decoders,
+ * @param    context                (IN/OUT) VSS context.
+ * @param    decoderType            (IN) Audio decoder type
+ * @param    pDecoderInterface    (IN) Audio decoder interface.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:   A parameter is null, or the decoder type is invalid(in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_registerAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                         M4AD_Type decoderType, M4AD_Interface *pDecoderInterface)
+{
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pDecoderInterface), M4ERR_PARAMETER,
+        "M4VSS3GPP_registerAudioDecoder: invalid pointer on decoder interface");
+
+    if( decoderType >= M4AD_kType_NB )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4ERR_PARAMETER,
+            "Invalid audio decoder type");
+        return M4ERR_PARAMETER;
+    }
+    if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[decoderType]);
+        pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+
+        if(M4OSA_NULL != pC->m_pAudioDecoderItTable[decoderType])
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[decoderType]);
+            pC->m_pAudioDecoderItTable[decoderType] = M4OSA_NULL;
+        }
+    }
+
+
+
+    pC->m_pAudioDecoderItTable[decoderType] = pDecoderInterface;
+    pC->m_pAudioDecoderFlagTable[decoderType] =
+        M4OSA_FALSE; /* internal decoder */
+    pC->pAudioDecoderUserDataTable[decoderType] = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllWriters()
+ * @brief    Unregister writer
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllWriters( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    for ( i = 0; i < M4WRITER_kType_NB; i++ )
+    {
+        if( pC->WriterInterface[i].pGlobalFcts != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->WriterInterface[i].pGlobalFcts);
+            pC->WriterInterface[i].pGlobalFcts = M4OSA_NULL;
+        }
+
+        if( pC->WriterInterface[i].pDataFcts != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->WriterInterface[i].pDataFcts);
+            pC->WriterInterface[i].pDataFcts = M4OSA_NULL;
+        }
+    }
+
+    pC->pWriterGlobalFcts = M4OSA_NULL;
+    pC->pWriterDataFcts = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllEncoders()
+ * @brief    Unregister the encoders
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllEncoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllEncoders: pC=0x%x", pC);
+
+    for ( i = 0; i < M4ENCODER_kVideo_NB; i++ )
+    {
+        if( pC->pVideoEncoderInterface[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+
+                M4OSA_free((M4OSA_MemAddr32)pC->pVideoEncoderInterface[i]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->pVideoEncoderInterface[i] = M4OSA_NULL;
+        }
+    }
+
+    for ( i = 0; i < M4ENCODER_kAudio_NB; i++ )
+    {
+        if( pC->pAudioEncoderInterface[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+                /*Don't free external audio encoders interfaces*/
+
+                if( M4OSA_FALSE == pC->pAudioEncoderFlag[i] )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)pC->pAudioEncoderInterface[i]);
+                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->pAudioEncoderInterface[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->pVideoEncoderGlobalFcts = M4OSA_NULL;
+    pC->pAudioEncoderGlobalFcts = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllReaders()
+ * @brief    Unregister reader
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllReaders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    for ( i = 0; i < M4READER_kMediaType_NB; i++ )
+    {
+        if( pC->m_pReaderGlobalItTable[i] != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderGlobalItTable[i]);
+            pC->m_pReaderGlobalItTable[i] = M4OSA_NULL;
+        }
+
+        if( pC->m_pReaderDataItTable[i] != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pReaderDataItTable[i]);
+            pC->m_pReaderDataItTable[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->m_uiNbRegisteredReaders = 0;
+    pC->m_pReader = M4OSA_NULL;
+    pC->m_pReaderDataIt = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_unRegisterAllDecoders()
+ * @brief    Unregister the decoders
+ * @param    pContext            (IN/OUT) VSS context.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_unRegisterAllDecoders( M4VSS3GPP_MediaAndCodecCtxt *pC )
+{
+    M4OSA_Int32 i;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_1("M4VSS3GPP_unRegisterAllDecoders: pC=0x%x", pC);
+
+    for ( i = 0; i < M4DECODER_kVideoType_NB; i++ )
+    {
+        if( pC->m_pVideoDecoderItTable[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+
+                M4OSA_free((M4OSA_MemAddr32)pC->m_pVideoDecoderItTable[i]);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->m_pVideoDecoderItTable[i] = M4OSA_NULL;
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+#if 0 /* This is to avoid freeing OMX core context, passed as user data */
+
+            if( pC->m_pVideoDecoderUserDataTable[i] != M4OSA_NULL )
+            {
+                M4OSA_free(
+                    (M4OSA_MemAddr32)pC->m_pVideoDecoderUserDataTable[i]);
+                /* there ought to be a better pattern... right? */
+                pC->m_pVideoDecoderUserDataTable[i] = M4OSA_NULL;
+            }
+
+#endif
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+        }
+    }
+
+    for ( i = 0; i < M4AD_kType_NB; i++ )
+    {
+        if( pC->m_pAudioDecoderItTable[i] != M4OSA_NULL )
+        {
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            if( M4OSA_TRUE == pC->bAllowFreeingOMXCodecInterface )
+            {
+
+#endif
+                /*Don't free external audio decoders interfaces*/
+
+                if( M4OSA_FALSE == pC->m_pAudioDecoderFlagTable[i] )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)pC->m_pAudioDecoderItTable[i]);
+                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+            }
+
+#endif
+
+            pC->m_pAudioDecoderItTable[i] = M4OSA_NULL;
+        }
+    }
+
+    pC->m_uiNbRegisteredVideoDec = 0;
+    pC->m_pVideoDecoder = M4OSA_NULL;
+
+    pC->m_pAudioDecoder = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentWriter()
+ * @brief    Set current writer
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentWriter( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                     M4VIDEOEDITING_FileType mediaType )
+{
+    M4WRITER_OutputFileType writerType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    switch( mediaType )
+    {
+        case M4VIDEOEDITING_kFileType_3GPP:
+            writerType = M4WRITER_k3GPP;
+            break;
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+                "Writer type not supported");
+            return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+
+    pC->pWriterGlobalFcts = pC->WriterInterface[writerType].pGlobalFcts;
+    pC->pWriterDataFcts = pC->WriterInterface[writerType].pDataFcts;
+
+    if( pC->pWriterGlobalFcts == M4OSA_NULL
+        || pC->pWriterDataFcts == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+            "Writer type not supported");
+        M4OSA_TRACE1_0("Writer type not supported");
+        return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+
+    pC->pWriterDataFcts->pWriterContext = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentVideoEncoder()
+ * @brief    Set a video encoder
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    MediaType           (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentVideoEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4SYS_StreamType mediaType )
+{
+    M4ENCODER_Format encoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoEncoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4SYS_kH263:
+            encoderType = M4ENCODER_kH263;
+            break;
+
+        case M4SYS_kMPEG_4:
+            encoderType = M4ENCODER_kMPEG4;
+            break;
+
+        case M4SYS_kH264:
+            encoderType = M4ENCODER_kH264;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
+                "Video encoder type not supported");
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    pC->pVideoEncoderGlobalFcts = pC->pVideoEncoderInterface[encoderType];
+    pC->pCurrentVideoEncoderExternalAPI =
+        pC->pVideoEncoderExternalAPITable[encoderType];
+    pC->pCurrentVideoEncoderUserData =
+        pC->pVideoEncoderUserDataTable[encoderType];
+
+    if( pC->pVideoEncoderGlobalFcts == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT,
+            "Video encoder type not supported");
+        M4OSA_TRACE1_0("Video encoder type not supported");
+        return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentAudioEncoder()
+ * @brief    Set an audio encoder
+ * @param    context            (IN/OUT) VSS context.
+ * @param    MediaType        (IN) Encoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentAudioEncoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4SYS_StreamType mediaType )
+{
+    M4ENCODER_AudioFormat encoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioEncoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4SYS_kAMR:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_setCurrentAudioEncoder: encoder type AMR");
+            encoderType = M4ENCODER_kAMRNB;
+            break;
+
+        case M4SYS_kAAC:
+            M4OSA_TRACE3_0(
+                "M4VSS3GPP_setCurrentAudioEncoder: encoder type AAC");
+            encoderType = M4ENCODER_kAAC;
+            break;
+
+       default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
+                "Audio encoder type not supported");
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+    }
+
+    pC->pAudioEncoderGlobalFcts = pC->pAudioEncoderInterface[encoderType];
+    pC->pCurrentAudioEncoderUserData =
+        pC->pAudioEncoderUserDataTable[encoderType];
+
+    M4OSA_TRACE3_3(
+        "M4VSS3GPP_setCurrentAudioEncoder: pC->pAudioEncoderInterface[0x%x]=0x%x,\
+        pC->pAudioEncoderGlobalFcts = 0x%x",
+        encoderType, pC->pAudioEncoderInterface[encoderType],
+        pC->pAudioEncoderGlobalFcts);
+
+    if( pC->pAudioEncoderGlobalFcts == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT,
+            "Audio encoder type not supported");
+        M4OSA_TRACE1_0("Audio encoder type not supported");
+        return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentReader()
+ * @brief    Set current reader
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    mediaType            (IN) Media type.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentReader( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                     M4VIDEOEDITING_FileType mediaType )
+{
+    M4READER_MediaType readerType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+
+    switch( mediaType )
+    {
+        case M4VIDEOEDITING_kFileType_3GPP:
+
+        case M4VIDEOEDITING_kFileType_MP4:
+            readerType = M4READER_kMediaType3GPP;
+            break;
+
+        case M4VIDEOEDITING_kFileType_AMR:
+            readerType = M4READER_kMediaTypeAMR;
+            break;
+
+        case M4VIDEOEDITING_kFileType_MP3:
+            readerType = M4READER_kMediaTypeMP3;
+            break;
+
+        case M4VIDEOEDITING_kFileType_PCM:
+            readerType = M4READER_kMediaTypePCM;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+                "Reader type not supported");
+            return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+
+    pC->m_pReader = pC->m_pReaderGlobalItTable[readerType];
+    pC->m_pReaderDataIt = pC->m_pReaderDataItTable[readerType];
+
+    if( pC->m_pReader == M4OSA_NULL || pC->m_pReaderDataIt == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE, M4VSS3GPP_ERR_INVALID_FILE_TYPE,
+            "Reader type not supported");
+        M4OSA_TRACE1_0("Reader type not supported");
+        return M4VSS3GPP_ERR_INVALID_FILE_TYPE;
+    }
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentVideoDecoder()
+ * @brief    Set a video decoder
+ * @param    pContext            (IN/OUT) VSS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:                    A parameter is null (in DEBUG only)
+ * @return    M4WAR_VSS_MEDIATYPE_NOT_SUPPORTED:    Media type not supported
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentVideoDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4_StreamType mediaType )
+{
+    M4DECODER_VideoType decoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentVideoDecoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4DA_StreamTypeVideoMpeg4:
+        case M4DA_StreamTypeVideoH263:
+            decoderType = M4DECODER_kVideoTypeMPEG4;
+            break;
+
+        case M4DA_StreamTypeVideoMpeg4Avc:
+            decoderType = M4DECODER_kVideoTypeAVC;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
+                "Video decoder type not supported");
+            return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+    }
+
+    pC->m_pVideoDecoder = pC->m_pVideoDecoderItTable[decoderType];
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    pC->m_pCurrentVideoDecoderUserData =
+        pC->m_pVideoDecoderUserDataTable[decoderType];
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    if( pC->m_pVideoDecoder == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT,
+            "Video decoder type not supported");
+        M4OSA_TRACE1_0("Video decoder type not supported");
+        return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ************************************************************************
+ * M4OSA_ERR   M4VSS3GPP_setCurrentAudioDecoder()
+ * @brief    Set an audio decoder
+ * @param    context            (IN/OUT) VSS context.
+ * @param    decoderType        (IN) Decoder type
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    A parameter is null (in DEBUG only)
+ ************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_setCurrentAudioDecoder( M4VSS3GPP_MediaAndCodecCtxt *pC,
+                                           M4_StreamType mediaType )
+{
+    M4AD_Type decoderType;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_TRACE3_2("M4VSS3GPP_setCurrentAudioDecoder: pC=0x%x, mediaType=0x%x",
+        pC, mediaType);
+
+    switch( mediaType )
+    {
+        case M4DA_StreamTypeAudioAmrNarrowBand:
+            decoderType = M4AD_kTypeAMRNB;
+            break;
+
+        case M4DA_StreamTypeAudioAac:
+        case M4DA_StreamTypeAudioAacADTS:
+        case M4DA_StreamTypeAudioAacADIF:
+            decoderType = M4AD_kTypeAAC;
+            break;
+
+        case M4DA_StreamTypeAudioMp3:
+            decoderType = M4AD_kTypeMP3;
+            break;
+
+        case M4DA_StreamTypeAudioPcm:
+            decoderType = M4AD_kTypePCM;
+            break;
+
+        default:
+            M4OSA_DEBUG_IF1(M4OSA_TRUE,
+                M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
+                "Audio decoder type not supported");
+            return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+    }
+
+    pC->m_pAudioDecoder = pC->m_pAudioDecoderItTable[decoderType];
+    pC->pCurrentAudioDecoderUserData =
+        pC->pAudioDecoderUserDataTable[decoderType];
+
+    if( pC->m_pAudioDecoder == M4OSA_NULL )
+    {
+        M4OSA_DEBUG_IF1(M4OSA_TRUE,
+            M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT,
+            "Audio decoder type not supported");
+        M4OSA_TRACE1_0("Audio decoder type not supported");
+        return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+    }
+
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_Edit.c b/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
new file mode 100755
index 0000000..7bd1a99
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_Edit.c
@@ -0,0 +1,4130 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_Edit.c
+ * @brief    Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our headers */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h"   /**< OSAL memory management */
+#include "M4OSA_Debug.h"    /**< OSAL debug management */
+#include "M4OSA_CharStar.h" /**< OSAL string management */
+
+#ifdef WIN32
+#include "string.h"         /**< for strcpy (Don't want to get dependencies
+                                 with M4OSA_String...) */
+
+#endif                      /* WIN32 */
+
+/************************************************************************/
+/* Static local functions                                               */
+/************************************************************************/
+static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
+    M4VSS3GPP_ClipSettings *pClip );
+static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
+    M4VSS3GPP_TransitionSettings *pTransition );
+static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
+    M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_ERR
+M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
+                                 M4OSA_Void *pOutputFile );
+static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
+    M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_ERR
+M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
+                                           M4OSA_UInt8 uiMasterClip );
+static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
+    M4VSS3GPP_InternalEditContext *pC );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetVersion()
+ * @brief    Get the VSS 3GPP version.
+ * @note    Can be called anytime. Do not need any context.
+ * @param    pVersionInfo        (OUT) Pointer to a version info structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pVersionInfo is M4OSA_NULL (If Debug Level >= 2)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_GetVersion( M4_VersionInfo *pVersionInfo )
+{
+    M4OSA_TRACE3_1("M4VSS3GPP_GetVersion called with pVersionInfo=0x%x",
+        pVersionInfo);
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pVersionInfo), M4ERR_PARAMETER,
+        "M4VSS3GPP_GetVersion: pVersionInfo is M4OSA_NULL");
+
+    pVersionInfo->m_major = M4VSS_VERSION_MAJOR;
+    pVersionInfo->m_minor = M4VSS_VERSION_MINOR;
+    pVersionInfo->m_revision = M4VSS_VERSION_REVISION;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editInit()
+ * @brief    Initializes the VSS 3GPP edit operation (allocates an execution context).
+ * @note
+ * @param    pContext            (OUT) Pointer on the VSS 3GPP edit context to allocate
+ * @param    pFileReadPtrFct        (IN) Pointer to OSAL file reader functions
+ * @param   pFileWritePtrFct    (IN) Pointer to OSAL file writer functions
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editInit( M4VSS3GPP_EditContext *pContext,
+                             M4OSA_FileReadPointer *pFileReadPtrFct,
+                             M4OSA_FileWriterPointer *pFileWritePtrFct )
+{
+    M4VSS3GPP_InternalEditContext *pC;
+    M4OSA_ERR err;
+    M4OSA_UInt32 i;
+
+    M4OSA_TRACE3_3(
+        "M4VSS3GPP_editInit called with pContext=0x%x, \
+        pFileReadPtrFct=0x%x, pFileWritePtrFct=0x%x",
+        pContext, pFileReadPtrFct, pFileWritePtrFct);
+
+    /**
+    * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editInit: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileReadPtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_editInit: pFileReadPtrFct is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pFileWritePtrFct), M4ERR_PARAMETER,
+        "M4VSS3GPP_editInit: pFileWritePtrFct is M4OSA_NULL");
+
+    /**
+    * Allocate the VSS context and return it to the user */
+    pC = (M4VSS3GPP_InternalEditContext
+        *)M4OSA_malloc(sizeof(M4VSS3GPP_InternalEditContext),
+        M4VSS3GPP, (M4OSA_Char *)"M4VSS3GPP_InternalContext");
+    *pContext = pC;
+        /* Inialization of context Variables */
+    M4OSA_memset((M4OSA_MemAddr8)pC, sizeof(M4VSS3GPP_InternalEditContext), 0);
+
+    if( M4OSA_NULL == pC )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editInit(): unable to allocate M4VSS3GPP_InternalContext,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+
+    /* Init the context. */
+    pC->pClipList = M4OSA_NULL;
+    pC->pTransitionList = M4OSA_NULL;
+    pC->pEffectsList = M4OSA_NULL;
+    pC->pActiveEffectsList = M4OSA_NULL;
+    pC->pActiveEffectsList1 = M4OSA_NULL;
+    pC->pC1 = M4OSA_NULL;
+    pC->pC2 = M4OSA_NULL;
+    pC->yuv1[0].pac_data = pC->yuv1[1].pac_data = pC->
+        yuv1[2].pac_data = M4OSA_NULL;
+    pC->yuv2[0].pac_data = pC->yuv2[1].pac_data = pC->
+        yuv2[2].pac_data = M4OSA_NULL;
+    pC->yuv3[0].pac_data = pC->yuv3[1].pac_data = pC->
+        yuv3[2].pac_data = M4OSA_NULL;
+    pC->yuv4[0].pac_data = pC->yuv4[1].pac_data = pC->
+        yuv4[2].pac_data = M4OSA_NULL;
+    pC->bClip1AtBeginCut = M4OSA_FALSE;
+    pC->bTransitionEffect = M4OSA_FALSE;
+    pC->bSupportSilence = M4OSA_FALSE;
+
+    /**
+    * Init PC->ewc members */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+    pC->ewc.bVideoDataPartitioning = M4OSA_FALSE;
+    pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+    pC->ewc.bActivateEmp = M4OSA_FALSE;
+    pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+    pC->ewc.uiNbChannels = 1;
+    pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+    pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    pC->ewc.pAudioEncDSI.pInfo = M4OSA_NULL;
+    pC->ewc.pSilenceFrameData = M4OSA_NULL;
+    pC->ewc.pEncContext = M4OSA_NULL;
+    pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+    pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+    pC->ewc.p3gpWriterContext = M4OSA_NULL;
+    /**
+    * Keep the OSAL file functions pointer set in our context */
+    pC->pOsaFileReadPtr = pFileReadPtrFct;
+    pC->pOsaFileWritPtr = pFileWritePtrFct;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    for ( i = 0; i < M4VD_kVideoType_NB; i++ )
+    {
+        pC->registeredExternalDecs[i].pDecoderInterface = M4OSA_NULL;
+        pC->registeredExternalDecs[i].pUserData = M4OSA_NULL;
+        pC->registeredExternalDecs[i].registered = M4OSA_FALSE;
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    for ( i = 0; i < M4VSS3GPP_kCodecType_NB; i++ )
+    {
+        pC->m_codecInterface[i] = M4OSA_NULL;
+    }
+    pC->pOMXUserData = M4OSA_NULL;
+#endif /* M4VSS_SUPPORT_OMX_CODECS */
+    /*
+    * Reset pointers for media and codecs interfaces */
+
+    err = M4VSS3GPP_clearInterfaceTables(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /*
+    *  Call the media and codecs subscription module */
+    err = M4VSS3GPP_subscribeMediaAndCodec(&pC->ShellAPI);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Update main state automaton */
+    pC->State = M4VSS3GPP_kEditState_CREATED;
+    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+    pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+
+    pC->bIsMMS = M4OSA_FALSE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editInit(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCreateClipSettings()
+ * @brief    Allows filling a clip settings structure with default values
+ *
+ * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ *                   pClipSettings->pFile      will be allocated in this function.
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   pFile               (IN) Clip file name
+ * @param   filePathSize        (IN) Clip path size (needed for UTF 16 conversion)
+ * @param    nbEffects           (IN) Nb of effect settings to allocate
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_editCreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
+                                 M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
+                                 M4OSA_UInt8 nbEffects )
+{
+    M4OSA_UInt8 uiFx;
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editCreateClipSettings called with pClipSettings=0x%p",
+        pClipSettings);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_editCreateClipSettings: pClipSettings is NULL");
+
+    /**
+    * Set the clip settings to default */
+    pClipSettings->pFile = M4OSA_NULL;        /**< no file */
+    pClipSettings->FileType =
+        M4VIDEOEDITING_kFileType_Unsupported; /**< undefined */
+
+    if( M4OSA_NULL != pFile )
+    {
+        //pClipSettings->pFile = (M4OSA_Char*) M4OSA_malloc(M4OSA_chrLength(pFile)+1, M4VSS3GPP,
+        // "pClipSettings->pFile");
+        /*FB: add clip path size because of utf 16 conversion*/
+        pClipSettings->pFile =
+            (M4OSA_Void *)M4OSA_malloc(filePathSize + 1, M4VSS3GPP,
+            (M4OSA_Char *)"pClipSettings->pFile");
+
+        if( M4OSA_NULL == pClipSettings->pFile )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editCreateClipSettings : ERROR allocating filename");
+            return M4ERR_ALLOC;
+        }
+        //M4OSA_memcpy(pClipSettings->pFile, pFile, M4OSA_chrLength(pFile)+1);
+        /*FB: add clip path size because of utf 16 conversion*/
+        M4OSA_memcpy(pClipSettings->pFile, pFile, filePathSize + 1);
+    }
+
+    /*FB: add file path size to support UTF16 conversion*/
+    pClipSettings->filePathSize = filePathSize + 1;
+    /**/
+    pClipSettings->ClipProperties.bAnalysed = M4OSA_FALSE;
+    pClipSettings->ClipProperties.FileType = 0;
+    pClipSettings->ClipProperties.Version[0] = 0;
+    pClipSettings->ClipProperties.Version[1] = 0;
+    pClipSettings->ClipProperties.Version[2] = 0;
+    pClipSettings->ClipProperties.uiClipDuration = 0;
+
+    pClipSettings->uiBeginCutTime = 0; /**< no begin cut */
+    pClipSettings->uiEndCutTime = 0;   /**< no end cut */
+
+    /**
+    * Reset video characteristics */
+    pClipSettings->ClipProperties.VideoStreamType = M4VIDEOEDITING_kNoneVideo;
+    pClipSettings->ClipProperties.uiClipVideoDuration = 0;
+    pClipSettings->ClipProperties.uiVideoBitrate = 0;
+    pClipSettings->ClipProperties.uiVideoMaxAuSize = 0;
+    pClipSettings->ClipProperties.uiVideoWidth = 0;
+    pClipSettings->ClipProperties.uiVideoHeight = 0;
+    pClipSettings->ClipProperties.uiVideoTimeScale = 0;
+    pClipSettings->ClipProperties.fAverageFrameRate = 0.0;
+    pClipSettings->ClipProperties.ProfileAndLevel =
+        M4VIDEOEDITING_kProfile_and_Level_Out_Of_Range;
+    pClipSettings->ClipProperties.uiH263level = 0;
+    pClipSettings->ClipProperties.uiVideoProfile = 0;
+    pClipSettings->ClipProperties.bMPEG4dataPartition = M4OSA_FALSE;
+    pClipSettings->ClipProperties.bMPEG4rvlc = M4OSA_FALSE;
+    pClipSettings->ClipProperties.bMPEG4resynchMarker = M4OSA_FALSE;
+
+    /**
+    * Reset audio characteristics */
+    pClipSettings->ClipProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+    pClipSettings->ClipProperties.uiClipAudioDuration = 0;
+    pClipSettings->ClipProperties.uiAudioBitrate = 0;
+    pClipSettings->ClipProperties.uiAudioMaxAuSize = 0;
+    pClipSettings->ClipProperties.uiNbChannels = 0;
+    pClipSettings->ClipProperties.uiSamplingFrequency = 0;
+    pClipSettings->ClipProperties.uiExtendedSamplingFrequency = 0;
+    pClipSettings->ClipProperties.uiDecodedPcmSize = 0;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editSetDefaultSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editDuplicateClipSettings()
+ * @brief    Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_editDuplicateClipSettings( M4VSS3GPP_ClipSettings *pClipSettingsDest,
+                                    M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+                                    M4OSA_Bool bCopyEffects )
+{
+    M4OSA_UInt8 uiFx;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editDuplicateClipSettings called with dest=0x%p src=0x%p",
+        pClipSettingsDest, pClipSettingsOrig);
+
+    /* Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
+        "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsDest is NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
+        "M4VSS3GPP_editDuplicateClipSettings: pClipSettingsOrig is NULL");
+
+    /* Copy plain structure */
+    M4OSA_memcpy((M4OSA_MemAddr8)pClipSettingsDest,
+        (M4OSA_MemAddr8)pClipSettingsOrig, sizeof(M4VSS3GPP_ClipSettings));
+
+    /* Duplicate filename */
+    if( M4OSA_NULL != pClipSettingsOrig->pFile )
+    {
+        //pClipSettingsDest->pFile =
+        // (M4OSA_Char*) M4OSA_malloc(M4OSA_chrLength(pClipSettingsOrig->pFile)+1, M4VSS3GPP,
+        // "pClipSettingsDest->pFile");
+        /*FB: clip path size is needed for utf 16 conversion*/
+        /*FB 2008/10/16: bad allocation size which raises a crash*/
+        pClipSettingsDest->pFile =
+            (M4OSA_Char *)M4OSA_malloc(pClipSettingsOrig->filePathSize + 1,
+            M4VSS3GPP, (M4OSA_Char *)"pClipSettingsDest->pFile");
+
+        if( M4OSA_NULL == pClipSettingsDest->pFile )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editDuplicateClipSettings : ERROR allocating filename");
+            return M4ERR_ALLOC;
+        }
+        /*FB: clip path size is needed for utf 16 conversion*/
+        //M4OSA_memcpy(pClipSettingsDest->pFile, pClipSettingsOrig->pFile,
+        // M4OSA_chrLength(pClipSettingsOrig->pFile)+1);
+        /*FB 2008/10/16: bad allocation size which raises a crash*/
+        M4OSA_memcpy(pClipSettingsDest->pFile, pClipSettingsOrig->pFile,
+            pClipSettingsOrig->filePathSize/*+1*/);
+        ( (M4OSA_Char
+            *)pClipSettingsDest->pFile)[pClipSettingsOrig->filePathSize] = '\0';
+    }
+
+    /* Duplicate effects */
+#if 0
+
+    if( M4OSA_TRUE == bCopyEffects )
+    {
+        if( pClipSettingsOrig->nbEffects > 0 )
+        {
+            pClipSettingsDest->Effects = (M4VSS3GPP_EffectSettings
+                *)M4OSA_malloc(sizeof(M4VSS3GPP_EffectSettings)
+                * pClipSettingsOrig->nbEffects,
+                M4VSS3GPP, "pClipSettingsDest->Effects");
+
+            if( M4OSA_NULL == pClipSettingsDest->Effects )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editDuplicateClipSettings : ERROR allocating effects, nb=%lu",
+                    pClipSettingsOrig->nbEffects);
+                pClipSettingsDest->nbEffects = 0;
+                return M4ERR_ALLOC;
+            }
+
+            for ( uiFx = 0; uiFx < pClipSettingsOrig->nbEffects; uiFx++ )
+            {
+                /* Copy plain structure */
+                M4OSA_memcpy(
+                    (M4OSA_MemAddr8) &(pClipSettingsDest->Effects[uiFx]),
+                    (M4OSA_MemAddr8) &(pClipSettingsOrig->Effects[uiFx]),
+                    sizeof(M4VSS3GPP_EffectSettings));
+            }
+        }
+    }
+    else
+    {
+        pClipSettingsDest->nbEffects = 0;
+        pClipSettingsDest->Effects = M4OSA_NULL;
+    }
+
+#endif /* RC */
+    /* Return with no error */
+
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_editDuplicateClipSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editFreeClipSettings()
+ * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects).
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editFreeClipSettings(
+    M4VSS3GPP_ClipSettings *pClipSettings )
+{
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_editFreeClipSettings: pClipSettings is NULL");
+
+    /* free filename */
+    if( M4OSA_NULL != pClipSettings->pFile )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pClipSettings->pFile);
+        pClipSettings->pFile = M4OSA_NULL;
+    }
+
+    /* free effects settings */
+    /*    if(M4OSA_NULL != pClipSettings->Effects)
+    {
+    M4OSA_free((M4OSA_MemAddr32)pClipSettings->Effects);
+    pClipSettings->Effects = M4OSA_NULL;
+    pClipSettings->nbEffects = 0;
+    } RC */
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editOpen()
+ * @brief     Set the VSS input and output files.
+ * @note      It opens the input file, but the output file may not be created yet.
+ * @param     pContext           (IN) VSS edit context
+ * @param     pSettings           (IN) Edit settings
+ * @return    M4NO_ERROR:       No error
+ * @return    M4ERR_PARAMETER:  At least one parameter is M4OSA_NULL (debug only)
+ * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
+ * @return    M4ERR_ALLOC:      There is no more available memory
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editOpen( M4VSS3GPP_EditContext pContext,
+                             M4VSS3GPP_EditSettings *pSettings )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    M4OSA_ERR err;
+    M4OSA_Int32 i;
+    M4VIDEOEDITING_FileType outputFileType =
+        M4VIDEOEDITING_kFileType_Unsupported; /**< 3GPP or MP3 (we don't do AMR output) */
+    M4OSA_UInt32 uiC1duration, uiC2duration;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editOpen called with pContext=0x%x, pSettings=0x%x",
+        pContext, pSettings);
+
+    /**
+    *    Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pSettings is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pSettings->pClipList), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pSettings->pClipList is M4OSA_NULL");
+    M4OSA_DEBUG_IF2(( pSettings->uiClipNumber > 1)
+        && (M4OSA_NULL == pSettings->pTransitionList), M4ERR_PARAMETER,
+        "M4VSS3GPP_editOpen: pSettings->pTransitionList is M4OSA_NULL");
+
+    /**
+    * Check state automaton */
+    if( ( pC->State != M4VSS3GPP_kEditState_CREATED)
+        && (pC->State != M4VSS3GPP_kEditState_CLOSED) )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editOpen: State error (0x%x)! Returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /**
+    * Free any previously allocated internal settings list */
+    M4VSS3GPP_intFreeSettingsList(pC);
+
+    /**
+    * Copy the user settings in our context */
+    pC->uiClipNumber = pSettings->uiClipNumber;
+
+    /**
+    * Copy the clip list */
+    pC->pClipList =
+        (M4VSS3GPP_ClipSettings *)M4OSA_malloc(sizeof(M4VSS3GPP_ClipSettings)
+        * pC->uiClipNumber, M4VSS3GPP, (M4OSA_Char *)"pC->pClipList");
+
+    if( M4OSA_NULL == pC->pClipList )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pClipList,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    for ( i = 0; i < pSettings->uiClipNumber; i++ )
+    {
+        M4VSS3GPP_editDuplicateClipSettings(&(pC->pClipList[i]),
+            pSettings->pClipList[i], M4OSA_TRUE);
+    }
+
+    /**
+    * Copy effects list RC */
+
+    /*FB bug fix 19.03.2008 if the number of effects is 0 -> crash*/
+    if( pSettings->nbEffects > 0 )
+    {
+        pC->nbEffects = pSettings->nbEffects;
+        pC->pEffectsList = (M4VSS3GPP_EffectSettings
+            *)M4OSA_malloc(sizeof(M4VSS3GPP_EffectSettings) * pC->nbEffects,
+            M4VSS3GPP, (M4OSA_Char *)"pC->pEffectsList");
+
+        if( M4OSA_NULL == pC->pEffectsList )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editOpen: unable to allocate pC->pEffectsList, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        for ( i = 0; i < pC->nbEffects; i++ )
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8) &(pC->pEffectsList[i]),
+                (M4OSA_MemAddr8) &(pSettings->Effects[i]),
+                sizeof(M4VSS3GPP_EffectSettings));
+        }
+
+        /**
+        * Allocate active effects list RC */
+        pC->pActiveEffectsList =
+            (M4OSA_UInt8 *)M4OSA_malloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
+            M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
+
+        if( M4OSA_NULL == pC->pActiveEffectsList )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList,\
+                returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        /**
+         * Allocate active effects list */
+        pC->pActiveEffectsList1 =
+            (M4OSA_UInt8 *)M4OSA_malloc(sizeof(M4OSA_UInt8) * pC->nbEffects,
+            M4VSS3GPP, (M4OSA_Char *)"pC->pActiveEffectsList");
+        if (M4OSA_NULL == pC->pActiveEffectsList1)
+        {
+            M4OSA_TRACE1_0("M4VSS3GPP_editOpen: unable to allocate pC->pActiveEffectsList, \
+                           returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+    }
+    else
+    {
+        pC->nbEffects = 0;
+        pC->nbActiveEffects = 0;
+        pC->nbActiveEffects1 = 0;
+        pC->pEffectsList = M4OSA_NULL;
+        pC->pActiveEffectsList = M4OSA_NULL;
+        pC->pActiveEffectsList1 = M4OSA_NULL;
+    }
+
+    /**
+    * Test the clip analysis data, if it is not provided, analyse the clips by ourselves. */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAnalysed )
+        {
+            /**< Analysis not provided by the integrator */
+            err = M4VSS3GPP_editAnalyseClip(pC->pClipList[i].pFile,
+                pC->pClipList[i].FileType, &pC->pClipList[i].ClipProperties,
+                pC->pOsaFileReadPtr);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen: M4VSS3GPP_editAnalyseClip returns 0x%x!",
+                    err);
+                return err;
+            }
+        }
+    }
+
+    /**
+    * Check clip compatibility */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        /**
+        * Check all the clips are compatible with VSS 3GPP */
+        err = M4VSS3GPP_intCheckClipCompatibleWithVssEditing(
+            &pC->pClipList[i].ClipProperties);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_2(
+                "M4VSS3GPP_editOpen:\
+                M4VSS3GPP_intCheckClipCompatibleWithVssEditing(%d) returns 0x%x!",
+                i, err);
+            return err;
+        }
+
+        /**
+        * Check the master clip versus all the other ones.
+        (including master clip with itself, else variables for master clip
+        are not properly setted) */
+        err = M4VSS3GPP_editCheckClipCompatibility(
+            &pC->pClipList[pSettings->uiMasterClip].ClipProperties,
+            &pC->pClipList[i].ClipProperties);
+        /* in case of warning regarding audio incompatibility, editing continues */
+        if( M4OSA_ERR_IS_ERROR(err) )
+        {
+            M4OSA_TRACE1_2(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_editCheckClipCompatibility(%d) returns 0x%x!",
+                i, err);
+            return err;
+        }
+    }
+
+    /* Search audio tracks that cannot be edited :
+    *   - delete all audio effects for the clip
+    *   - if master clip is editable let the transition
+    (bad track will be replaced later with silence)
+    *   - if master clip is not editable switch to a dummy transition (only copy/paste) */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        if( M4OSA_FALSE == pC->pClipList[i].ClipProperties.bAudioIsEditable )
+        {
+            M4OSA_UInt8 uiFx;
+
+            for ( uiFx = 0; uiFx < pC->nbEffects; uiFx++ )
+            {
+                pC->pEffectsList[uiFx].AudioEffectType
+                    = M4VSS3GPP_kAudioEffectType_None;
+            }
+
+            if( ( i < (pC->uiClipNumber - 1))
+                && (M4OSA_NULL != pSettings->pTransitionList[i])
+                && (M4OSA_FALSE == pC->pClipList[pSettings->
+                uiMasterClip].ClipProperties.bAudioIsEditable) )
+            {
+                pSettings->pTransitionList[i]->AudioTransitionType
+                    = M4VSS3GPP_kAudioTransitionType_None;
+            }
+        }
+    }
+
+    /**
+    * We add a transition of duration 0 at the end of the last clip.
+    * It will suppress a whole bunch a test latter in the processing... */
+    pC->pTransitionList = (M4VSS3GPP_TransitionSettings
+        *)M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings)
+        * (pC->uiClipNumber), M4VSS3GPP, (M4OSA_Char *)"pC->pTransitionList");
+
+    if( M4OSA_NULL == pC->pTransitionList )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editOpen: unable to allocate pC->Settings.pTransitionList,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**< copy transition settings */
+    for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8) &(pC->pTransitionList[i]),
+            (M4OSA_MemAddr8)pSettings->pTransitionList[i],
+            sizeof(M4VSS3GPP_TransitionSettings));
+    }
+
+    /**< We fill the last "dummy" transition */
+    pC->pTransitionList[pC->uiClipNumber - 1].uiTransitionDuration = 0;
+    pC->pTransitionList[pC->uiClipNumber
+        - 1].VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
+    pC->pTransitionList[pC->uiClipNumber
+        - 1].AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
+
+    /**
+    * Avoid weird clip settings */
+    for ( i = 0; i < pSettings->uiClipNumber; i++ )
+    {
+        err = M4VSS3GPP_intClipSettingsSanityCheck(&pC->pClipList[i]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    for ( i = 0; i < (pSettings->uiClipNumber - 1); i++ )
+    {
+        /**
+        * Maximum transition duration between clip n and clip n+1 is the duration
+        * of the shortest clip */
+        if( 0 == pC->pClipList[i].uiEndCutTime )
+        {
+            uiC1duration = pC->pClipList[i].ClipProperties.uiClipVideoDuration;
+        }
+        else
+        {
+            /**< duration of clip n is the end cut time */
+            uiC1duration = pC->pClipList[i].uiEndCutTime;
+        }
+
+        /**< Substract begin cut */
+        uiC1duration -= pC->pClipList[i].uiBeginCutTime;
+
+        /**< Check that the transition is shorter than clip n */
+        if( pC->pTransitionList[i].uiTransitionDuration > uiC1duration )
+        {
+            pC->pTransitionList[i].uiTransitionDuration = uiC1duration - 1;
+        }
+
+        if( 0 == pC->pClipList[i + 1].uiEndCutTime )
+        {
+            uiC2duration =
+                pC->pClipList[i + 1].ClipProperties.uiClipVideoDuration;
+        }
+        else
+        {
+            /**< duration of clip n+1 is the end cut time */
+            uiC2duration = pC->pClipList[i + 1].uiEndCutTime;
+        }
+
+        /**< Substract begin cut */
+        uiC2duration -= pC->pClipList[i + 1].uiBeginCutTime;
+
+        /**< Check that the transition is shorter than clip n+1 */
+        if( pC->pTransitionList[i].uiTransitionDuration > uiC2duration )
+        {
+            pC->pTransitionList[i].uiTransitionDuration = uiC2duration - 1;
+        }
+
+        /**
+        * Avoid weird transition settings */
+        err =
+            M4VSS3GPP_intTransitionSettingsSanityCheck(&pC->pTransitionList[i]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intClipSettingsSanityCheck returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Check that two transitions are not overlapping
+          (no overlapping possible for first clip) */
+        if( i > 0 )
+        {
+            /**
+            * There is a transition overlap if the sum of the duration of
+              two consecutive transitions
+            * is higher than the duration of the clip in-between. */
+            if( ( pC->pTransitionList[i - 1].uiTransitionDuration
+                + pC->pTransitionList[i].uiTransitionDuration) >= uiC1duration )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen: Overlapping transitions on clip %d,\
+                    returning M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS",
+                    i);
+                return M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS;
+            }
+        }
+    }
+
+    /**
+    * Output clip duration */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        /**
+        * Compute the sum of the clip duration */
+        if( 0 == pC->pClipList[i].uiEndCutTime )
+        {
+            pC->ewc.iOutputDuration +=
+                pC->
+                pClipList[
+                    i].ClipProperties.
+                        uiClipVideoDuration; /* Only video track duration is important to
+                                             avoid deviation if audio track is longer */
+        }
+        else
+        {
+            pC->ewc.iOutputDuration +=
+                pC->pClipList[i].uiEndCutTime; /**< Add end cut */
+        }
+
+        pC->ewc.iOutputDuration -=
+            pC->pClipList[i].uiBeginCutTime; /**< Remove begin cut */
+
+        /**
+        * Remove the duration of the transition (it is counted twice) */
+        pC->ewc.iOutputDuration -= pC->pTransitionList[i].uiTransitionDuration;
+    }
+
+    /**
+    * Copy the video properties of the master clip to the output properties */
+    pC->ewc.uiVideoWidth =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiVideoWidth;
+    pC->ewc.uiVideoHeight =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiVideoHeight;
+    pC->ewc.uiVideoTimeScale =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiVideoTimeScale;
+    pC->ewc.bVideoDataPartitioning = pC->pClipList[pSettings->
+        uiMasterClip].ClipProperties.bMPEG4dataPartition;
+
+    switch( pC->pClipList[pSettings->uiMasterClip].ClipProperties.VideoStreamType )
+    {
+        case M4VIDEOEDITING_kH263:
+            pC->ewc.VideoStreamType = M4SYS_kH263;
+            break;
+
+        case M4VIDEOEDITING_kMPEG4_EMP:
+            pC->ewc.bActivateEmp = M4OSA_TRUE; /* no break */
+
+        case M4VIDEOEDITING_kMPEG4:
+            pC->ewc.VideoStreamType = M4SYS_kMPEG_4;
+            break;
+
+        case M4VIDEOEDITING_kH264:
+            pC->ewc.VideoStreamType = M4SYS_kH264;
+            break;
+
+        default:
+            pC->ewc.VideoStreamType = M4SYS_kVideoUnknown;
+            break;
+    }
+
+    /**
+    * Copy the audio properties of the master clip to the output properties */
+    pC->ewc.uiNbChannels =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiNbChannels;
+    pC->ewc.uiAudioBitrate =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiAudioBitrate;
+    pC->ewc.uiSamplingFrequency = pC->pClipList[pSettings->
+        uiMasterClip].ClipProperties.uiSamplingFrequency;
+    pC->ewc.uiSilencePcmSize =
+        pC->pClipList[pSettings->uiMasterClip].ClipProperties.uiDecodedPcmSize;
+    pC->ewc.scale_audio = pC->ewc.uiSamplingFrequency / 1000.0;
+
+    switch( pC->pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
+    {
+        case M4VIDEOEDITING_kAMR_NB:
+            pC->ewc.AudioStreamType = M4SYS_kAMR;
+            pC->ewc.pSilenceFrameData =
+                (M4OSA_UInt8 *)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048;
+            pC->ewc.uiSilenceFrameSize =
+                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE;
+            pC->ewc.iSilenceFrameDuration =
+                M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_DURATION;
+            pC->bSupportSilence = M4OSA_TRUE;
+            break;
+
+        case M4VIDEOEDITING_kAAC:
+        case M4VIDEOEDITING_kAACplus:
+        case M4VIDEOEDITING_keAACplus:
+            pC->ewc.AudioStreamType = M4SYS_kAAC;
+
+            if( pC->ewc.uiNbChannels == 1 )
+            {
+                pC->ewc.pSilenceFrameData =
+                    (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_MONO;
+                pC->ewc.uiSilenceFrameSize = M4VSS3GPP_AAC_AU_SILENCE_MONO_SIZE;
+                pC->bSupportSilence = M4OSA_TRUE;
+            }
+            else
+            {
+                pC->ewc.pSilenceFrameData =
+                    (M4OSA_UInt8 *)M4VSS3GPP_AAC_AU_SILENCE_STEREO;
+                pC->ewc.uiSilenceFrameSize =
+                    M4VSS3GPP_AAC_AU_SILENCE_STEREO_SIZE;
+                pC->bSupportSilence = M4OSA_TRUE;
+            }
+            pC->ewc.iSilenceFrameDuration =
+                1024; /* AAC is always 1024/Freq sample duration */
+            break;
+
+        case M4VIDEOEDITING_kMP3:
+            pC->ewc.AudioStreamType = M4SYS_kMP3;
+            pC->ewc.pSilenceFrameData = M4OSA_NULL;
+            pC->ewc.uiSilenceFrameSize = 0;
+            pC->ewc.iSilenceFrameDuration = 0;
+            /* Special case, mp3 core reader return a time in ms */
+            pC->ewc.scale_audio = 1.0;
+            break;
+
+        case M4VIDEOEDITING_kEVRC:
+            pC->ewc.AudioStreamType = M4SYS_kEVRC;
+            pC->ewc.pSilenceFrameData = M4OSA_NULL;
+            pC->ewc.uiSilenceFrameSize = 0;
+            pC->ewc.iSilenceFrameDuration = 160; /* EVRC frames are 20 ms at 8000 Hz
+                                             (makes it easier to factorize amr and evrc code) */
+            break;
+
+        default:
+            pC->ewc.AudioStreamType = M4SYS_kAudioUnknown;
+            break;
+    }
+
+    /**
+    * We produce a 3gpp file, unless it is mp3 */
+    if( M4VIDEOEDITING_kMP3 == pC->
+        pClipList[pSettings->uiMasterClip].ClipProperties.AudioStreamType )
+        outputFileType = M4VIDEOEDITING_kFileType_MP3;
+    else
+        outputFileType = M4VIDEOEDITING_kFileType_3GPP;
+
+    /**
+    * Beware, a null duration would lead to a divide by zero error (better safe than sorry...) */
+    if( 0 == pC->ewc.iOutputDuration )
+    {
+        pC->ewc.iOutputDuration = 1;
+    }
+
+    /**
+    * Open first clip */
+    pC->uiCurrentClip = 0;
+
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.dInputVidCts  = 0.0;
+    pC->ewc.dOutputVidCts = 0.0;
+    pC->ewc.dATo = 0.0;
+
+    err = M4VSS3GPP_intSwitchToNextClip(pC);
+    /* RC: to know when a file has been processed */
+    if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editOpen: M4VSS3GPP_intSwitchToNextClip() returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Do the video stuff in 3GPP Audio/Video case */
+    if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
+    {
+        /**
+        * Compute the Decoder Specific Info for the output video and audio streams */
+        err = M4VSS3GPP_intComputeOutputVideoAndAudioDsi(pC,
+            pSettings->uiMasterClip);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intComputeOutputVideoAndAudioDsi() returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Compute the time increment for the transition file */
+        switch( pSettings->videoFrameRate )
+        {
+            case M4VIDEOEDITING_k5_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 5.0;
+                break;
+
+            case M4VIDEOEDITING_k7_5_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 7.5;
+                break;
+
+            case M4VIDEOEDITING_k10_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 10.0;
+                break;
+
+            case M4VIDEOEDITING_k12_5_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 12.5;
+                break;
+
+            case M4VIDEOEDITING_k15_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 15.0;
+                break;
+
+            case M4VIDEOEDITING_k20_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 20.0;
+                break;
+
+            case M4VIDEOEDITING_k25_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 25.0;
+                break;
+
+            case M4VIDEOEDITING_k30_FPS:
+                pC->dOutputFrameDuration = 1000.0 / 30.0;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen(): invalid videoFrameRate (0x%x),\
+                    returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE",
+                    pSettings->videoFrameRate);
+                return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
+        }
+
+        if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType )
+        {
+            M4OSA_UInt32 uiAlpha;
+            /**
+            * MPEG-4 case.
+            * Time scale of the transition encoder must be the same than the
+            * timescale of the input files.
+            * So the frame duration must be compatible with this time scale,
+            * but without beeing too short.
+            * For that, we must compute alpha (integer) so that:
+            *             (alpha x 1000)/EncoderTimeScale > MinFrameDuration
+            **/
+
+            uiAlpha = (M4OSA_UInt32)(( pC->dOutputFrameDuration
+                * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
+
+            if( uiAlpha > 0 )
+            {
+                pC->dOutputFrameDuration =
+                    ( uiAlpha * 1000.0) / pC->ewc.uiVideoTimeScale;
+            }
+        }
+        else if( M4SYS_kH263 == pC->ewc.VideoStreamType )
+        {
+            switch( pSettings->videoFrameRate )
+            {
+                case M4VIDEOEDITING_k12_5_FPS:
+                case M4VIDEOEDITING_k20_FPS:
+                case M4VIDEOEDITING_k25_FPS:
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_editOpen(): invalid videoFrameRate for H263,\
+                        returning M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
+                    return M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE;
+               default:
+                  break;
+            }
+        }
+    }
+
+    /**
+    * Create the MP3 output file */
+    if( M4VIDEOEDITING_kFileType_MP3 == outputFileType )
+    {
+        M4READER_Buffer mp3tagBuffer;
+        err = M4VSS3GPP_intCreateMP3OutputFile(pC, pSettings->pOutputFile);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intCreateMP3OutputFile returns 0x%x",
+                err);
+            return err;
+        }
+
+        /* The ID3v2 tag could be at any place in the mp3 file                             */
+        /* The mp3 reader only checks few bytes in the beginning of
+           stream to look for a ID3v2 tag  */
+        /* It means that if the ID3v2 tag is not at the beginning of the file the reader do
+        as there is no these metadata */
+
+        /* Retrieve the data of the ID3v2 Tag */
+        err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
+            pC->pC1->pReaderContext, M4READER_kOptionID_Mp3Id3v2Tag,
+            (M4OSA_DataOption) &mp3tagBuffer);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_editOpen: M4MP3R_getOption returns 0x%x",
+                err);
+            return err;
+        }
+
+        /* Write the data of the ID3v2 Tag in the output file */
+        if( 0 != mp3tagBuffer.m_uiBufferSize )
+        {
+            err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+                (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
+
+            /**
+            * Free before the error checking anyway */
+            M4OSA_free((M4OSA_MemAddr32)mp3tagBuffer.m_pData);
+
+            /**
+            * Error checking */
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editOpen: WriteData(ID3v2Tag) returns 0x%x",
+                    err);
+                return err;
+            }
+
+            mp3tagBuffer.m_uiBufferSize = 0;
+            mp3tagBuffer.m_pData = M4OSA_NULL;
+        }
+    }
+    /**
+    * Create the 3GPP output file */
+    else if( M4VIDEOEDITING_kFileType_3GPP == outputFileType )
+    {
+        /* Compute an average bitrate from mixed bitrates of the input clips */
+        M4VSS3GPP_intComputeOutputAverageVideoBitrate(pC);
+
+        /**
+        * 11/12/2008 CR3283 MMS use case in VideoArtist: Set max output file size if needed */
+        if( pC->bIsMMS == M4OSA_TRUE )
+        {
+            err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+                pC->pOsaFileWritPtr, pSettings->pOutputFile,
+                pC->pOsaFileReadPtr, pSettings->pTemporaryFile,
+                pSettings->xVSS.outputFileSize);
+        }
+        else
+        {
+            err = M4VSS3GPP_intCreate3GPPOutputFile(&pC->ewc, &pC->ShellAPI,
+                pC->pOsaFileWritPtr, pSettings->pOutputFile,
+                pC->pOsaFileReadPtr, pSettings->pTemporaryFile, 0);
+        }
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editOpen: M4VSS3GPP_intCreate3GPPOutputFile returns 0x%x",
+                err);
+            return err;
+        }
+    }
+    /**
+    * Default error case */
+    else
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editOpen: invalid outputFileType = 0x%x,\
+            returning M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR",
+            outputFileType);
+        return
+            M4VSS3GPP_ERR_OUTPUT_FILE_TYPE_ERROR; /**< this is an internal error code
+                                                  unknown to the user */
+    }
+
+    /**
+    * Initialize state */
+    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+    {
+        /**
+        * In the MP3 case we use a special audio state */
+        pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
+    }
+    else
+    {
+        /**
+        * We start with the video processing */
+        pC->State = M4VSS3GPP_kEditState_VIDEO;
+    }
+
+    /**
+    * Initialize state.
+    * The first clip is independant to the "virtual previous clips",
+    * so it's like if we where in Read/Write mode before it. */
+    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+    pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editOpen(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editStep()
+ * @brief    Perform one step of editing.
+ * @note
+ * @param     pContext           (IN) VSS 3GPP edit context
+ * @param     pProgress          (OUT) Progress percentage (0 to 100) of the editing operation
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:   pContext is M4OSA_NULL (debug only)
+ * @return    M4ERR_STATE:       VSS 3GPP is not in an appropriate state for this
+ *                               function to be called
+ * @return    M4VSS3GPP_WAR_EDITING_DONE: Edition is done, user should now call
+ *            M4VSS3GPP_editClose()
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editStep( M4VSS3GPP_EditContext pContext,
+                             M4OSA_UInt8 *pProgress )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_UInt32 uiProgressAudio, uiProgressVideo, uiProgress;
+    M4OSA_ERR err;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_editStep called with pContext=0x%x", pContext);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editStep: pContext is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pProgress), M4ERR_PARAMETER,
+        "M4VSS3GPP_editStep: pProgress is M4OSA_NULL");
+
+    /**
+    * Check state automaton and select correct processing */
+    switch( pC->State )
+    {
+        case M4VSS3GPP_kEditState_VIDEO:
+            err = M4VSS3GPP_intEditStepVideo(pC);
+            break;
+
+        case M4VSS3GPP_kEditState_AUDIO:
+            err = M4VSS3GPP_intEditStepAudio(pC);
+            break;
+
+        case M4VSS3GPP_kEditState_MP3:
+            err = M4VSS3GPP_intEditStepMP3(pC);
+            break;
+
+        case M4VSS3GPP_kEditState_MP3_JUMP:
+            err = M4VSS3GPP_intEditJumpMP3(pC);
+            break;
+
+        default:
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editStep(): invalid internal state (0x%x), returning M4ERR_STATE");
+            return M4ERR_STATE;
+    }
+
+    /**
+    * Compute progress.
+    * We do the computing with 32bits precision because in some (very) extreme case, we may get
+    * values higher than 256 (...) */
+    uiProgressAudio =
+        ( (M4OSA_UInt32)(pC->ewc.dATo * 100)) / pC->ewc.iOutputDuration;
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    uiProgressVideo = ((M4OSA_UInt32)(pC->ewc.dInputVidCts * 100)) / pC->ewc.iOutputDuration;
+
+    uiProgress = uiProgressAudio + uiProgressVideo;
+
+    if( ( pC->ewc.AudioStreamType != M4SYS_kAudioUnknown)
+        && (pC->ewc.VideoStreamType != M4SYS_kVideoUnknown) )
+        uiProgress /= 2;
+
+    /**
+    * Sanity check */
+    if( uiProgress > 100 )
+    {
+        *pProgress = 100;
+    }
+    else
+    {
+        *pProgress = (M4OSA_UInt8)uiProgress;
+    }
+
+    /**
+    * Return the error */
+    M4OSA_TRACE3_1("M4VSS3GPP_editStep(): returning 0x%x", err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editClose()
+ * @brief    Finish the VSS edit operation.
+ * @note    The output 3GPP file is ready to be played after this call
+ * @param    pContext           (IN) VSS edit context
+ * @return    M4NO_ERROR:       No error
+ * @return    M4ERR_PARAMETER:  pContext is M4OSA_NULL (debug only)
+ * @return    M4ERR_STATE:      VSS is not in an appropriate state for this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editClose( M4VSS3GPP_EditContext pContext )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err;
+    M4OSA_ERR returnedError = M4NO_ERROR;
+    M4OSA_UInt32 lastCTS;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_editClose called with pContext=0x%x", pContext);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4VSS3GPP_editClose: pContext is M4OSA_NULL");
+
+    /**
+    * Check state automaton.
+    * In "theory", we should not authorize closing if we are in CREATED state.
+    * But in practice, in case the opening failed, it may have been partially done.
+    * In that case we have to free some opened ressources by calling Close. */
+    if( M4VSS3GPP_kEditState_CLOSED == pC->State )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editClose: Wrong state (0x%x), returning M4ERR_STATE",
+            pC->State);
+        return M4ERR_STATE;
+    }
+
+    /**
+    * There may be an encoder to destroy */
+    err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editClose: M4VSS3GPP_editDestroyVideoEncoder() returns 0x%x!",
+            err);
+        /**< We do not return the error here because we still have stuff to free */
+        returnedError = err;
+    }
+
+    /**
+    * Close the output file */
+    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+    {
+        /**
+        * MP3 case */
+        if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+        {
+            err = pC->pOsaFileWritPtr->closeWrite(pC->ewc.p3gpWriterContext);
+            pC->ewc.p3gpWriterContext = M4OSA_NULL;
+        }
+    }
+    else
+    {
+        /**
+        * Close the output 3GPP clip, if it has been opened */
+        if( M4OSA_NULL != pC->ewc.p3gpWriterContext )
+        {
+            /* Update last Video CTS */
+            lastCTS = pC->ewc.iOutputDuration;
+
+            err = pC->ShellAPI.pWriterGlobalFcts->pFctSetOption(
+                pC->ewc.p3gpWriterContext,
+                (M4OSA_UInt32)M4WRITER_kMaxFileDuration, &lastCTS);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editClose: SetOption(M4WRITER_kMaxFileDuration) returns 0x%x",
+                    err);
+            }
+
+            err = pC->ShellAPI.pWriterGlobalFcts->pFctCloseWrite(
+                pC->ewc.p3gpWriterContext);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_editClose: pFctCloseWrite(OUT) returns 0x%x!",
+                    err);
+                /**< We do not return the error here because we still have stuff to free */
+                if( M4NO_ERROR
+                    == returnedError ) /**< we return the first error that happened */
+                {
+                    returnedError = err;
+                }
+            }
+            pC->ewc.p3gpWriterContext = M4OSA_NULL;
+        }
+    }
+
+    /**
+    * Free the output video DSI, if it has been created */
+    if( M4OSA_NULL != pC->ewc.pVideoOutputDsi )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->ewc.pVideoOutputDsi);
+        pC->ewc.pVideoOutputDsi = M4OSA_NULL;
+    }
+
+    /**
+    * Free the output audio DSI, if it has been created */
+    if( M4OSA_NULL != pC->ewc.pAudioOutputDsi )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->ewc.pAudioOutputDsi);
+        pC->ewc.pAudioOutputDsi = M4OSA_NULL;
+    }
+
+    /**
+    * Close clip1, if needed */
+    if( M4OSA_NULL != pC->pC1 )
+    {
+        err = M4VSS3GPP_intClipCleanUp(pC->pC1);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+            if( M4NO_ERROR
+                == returnedError ) /**< we return the first error that happened */
+            {
+                returnedError = err;
+            }
+        }
+        pC->pC1 = M4OSA_NULL;
+    }
+
+    /**
+    * Close clip2, if needed */
+    if( M4OSA_NULL != pC->pC2 )
+    {
+        err = M4VSS3GPP_intClipCleanUp(pC->pC2);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editClose: M4VSS3GPP_intClipCleanUp(C2) returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+            if( M4NO_ERROR
+                == returnedError ) /**< we return the first error that happened */
+            {
+                returnedError = err;
+            }
+        }
+        pC->pC2 = M4OSA_NULL;
+    }
+
+    /**
+    * Free the temporary YUV planes */
+    if( M4OSA_NULL != pC->yuv1[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv1[0].pac_data);
+        pC->yuv1[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv1[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv1[1].pac_data);
+        pC->yuv1[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv1[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv1[2].pac_data);
+        pC->yuv1[2].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv2[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv2[0].pac_data);
+        pC->yuv2[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv2[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv2[1].pac_data);
+        pC->yuv2[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv2[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv2[2].pac_data);
+        pC->yuv2[2].pac_data = M4OSA_NULL;
+    }
+
+    /* RC */
+    if( M4OSA_NULL != pC->yuv3[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv3[0].pac_data);
+        pC->yuv3[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv3[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv3[1].pac_data);
+        pC->yuv3[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv3[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv3[2].pac_data);
+        pC->yuv3[2].pac_data = M4OSA_NULL;
+    }
+
+    /* RC */
+    if( M4OSA_NULL != pC->yuv4[0].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv4[0].pac_data);
+        pC->yuv4[0].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv4[1].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv4[1].pac_data);
+        pC->yuv4[1].pac_data = M4OSA_NULL;
+    }
+
+    if( M4OSA_NULL != pC->yuv4[2].pac_data )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->yuv4[2].pac_data);
+        pC->yuv4[2].pac_data = M4OSA_NULL;
+    }
+
+    /**
+    * RC Free effects list */
+    if( pC->pEffectsList != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pEffectsList);
+        pC->pEffectsList = M4OSA_NULL;
+    }
+
+    /**
+    * RC Free active effects list */
+    if( pC->pActiveEffectsList != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pActiveEffectsList);
+        pC->pActiveEffectsList = M4OSA_NULL;
+    }
+    /**
+     *  Free active effects list */
+    if(pC->pActiveEffectsList1 != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pActiveEffectsList1);
+        pC->pActiveEffectsList1 = M4OSA_NULL;
+    }
+    /**
+    * Update state automaton */
+    pC->State = M4VSS3GPP_kEditState_CLOSED;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_1("M4VSS3GPP_editClose(): returning 0x%x", returnedError);
+    return returnedError;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editCleanUp()
+ * @brief    Free all resources used by the VSS edit operation.
+ * @note    The context is no more valid after this call
+ * @param    pContext            (IN) VSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pContext is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editCleanUp( M4VSS3GPP_EditContext pContext )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    M4OSA_TRACE3_1("M4VSS3GPP_editCleanUp called with pContext=0x%x", pContext);
+
+    /**
+    *    Check input parameter */
+    if( M4OSA_NULL == pContext )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_editCleanUp(): pContext is M4OSA_NULL, returning M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    /**
+    * Close, if needed.
+    * In "theory", we should not close if we are in CREATED state.
+    * But in practice, in case the opening failed, it may have been partially done.
+    * In that case we have to free some opened ressources by calling Close. */
+    if( M4VSS3GPP_kEditState_CLOSED != pC->State )
+    {
+        M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): calling M4VSS3GPP_editClose");
+        err = M4VSS3GPP_editClose(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCleanUp(): M4VSS3GPP_editClose returns 0x%x",
+                err);
+        }
+    }
+
+    /**
+    * Free the video encoder dummy AU */
+    if( M4OSA_NULL != pC->ewc.pDummyAuBuffer )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->ewc.pDummyAuBuffer);
+        pC->ewc.pDummyAuBuffer = M4OSA_NULL;
+    }
+
+    /**
+    * Free the Audio encoder context */
+    if( M4OSA_NULL != pC->ewc.pAudioEncCtxt )
+    {
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctClose(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctCleanUp(
+            pC->ewc.pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editCleanUp: pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC->ewc.pAudioEncCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Free the shells interfaces */
+    M4VSS3GPP_unRegisterAllWriters(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllEncoders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllReaders(&pC->ShellAPI);
+    M4VSS3GPP_unRegisterAllDecoders(&pC->ShellAPI);
+
+    /**
+    * Free the settings copied in the internal context */
+    M4VSS3GPP_intFreeSettingsList(pC);
+
+    /**
+    * Finally, Free context */
+    M4OSA_free((M4OSA_MemAddr32)pC);
+    pC = M4OSA_NULL;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_editCleanUp(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR
+M4VSS3GPP_editRegisterExternalVideoDecoder( M4VSS3GPP_EditContext pContext,
+                                           M4VD_VideoType decoderType,
+                                           M4VD_Interface *pDecoderInterface,
+                                           M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    /* Here the situation is a bit special: we need to record the registrations that are made,
+    so that we can replay them for each clip we create. */
+
+    if( decoderType >= M4VD_kVideoType_NB )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    pC->registeredExternalDecs[decoderType].pDecoderInterface
+        = pDecoderInterface;
+    pC->registeredExternalDecs[decoderType].pUserData = pUserData;
+    pC->registeredExternalDecs[decoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW decoder that may already have been registered for this type;
+    this is normal.*/
+
+    return M4NO_ERROR;
+
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+}
+
+M4OSA_ERR
+M4VSS3GPP_editRegisterExternalVideoEncoder( M4VSS3GPP_EditContext pContext,
+                                           M4VE_EncoderType encoderType,
+                                           M4VE_Interface *pEncoderInterface,
+                                           M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_ENCODERS
+
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4ENCODER_GlobalInterface *shellInterface;
+    M4ENCODER_Format nativeType;
+
+    switch( encoderType )
+    {
+        case M4VE_kH263VideoEnc:
+            err = M4EGE_H263_getInterfaces(&nativeType, &shellInterface,
+                M4ENCODER_OPEN_ADVANCED);
+
+            break;
+
+        case M4VE_kMpeg4VideoEnc:
+            err = M4EGE_MPEG4_getInterfaces(&nativeType, &shellInterface,
+                M4ENCODER_OPEN_ADVANCED);
+            break;
+
+        case M4VE_kH264VideoEnc:
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_editRegisterExternalVideoEncoder:\
+                H264 encoder type not implemented yet");
+            return M4ERR_NOT_IMPLEMENTED;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_editRegisterExternalVideoEncoder: unknown encoderType %d",
+                encoderType);
+            return M4ERR_PARAMETER;
+            break;
+    }
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editRegisterExternalVideoEncoder:\
+            M4EGE_getInterface failed with error 0x%08X",
+            err);
+        return err;
+    }
+
+    err = M4VSS3GPP_registerVideoEncoder(&(pC->ShellAPI), nativeType,
+        shellInterface);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editRegisterExternalVideoEncoder:\
+            M4VSS3GPP_registerVideoEncoder failed with error 0x%08X",
+            err);
+        M4OSA_free((M4OSA_MemAddr32)shellInterface);
+        return err;
+    }
+
+    pC->ShellAPI.pVideoEncoderExternalAPITable[nativeType] = pEncoderInterface;
+    pC->ShellAPI.pVideoEncoderUserDataTable[nativeType] = pUserData;
+
+    return M4NO_ERROR;
+
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif
+
+}
+
+#ifdef WIN32
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_GetErrorMessage()
+ * @brief    Return a string describing the given error code
+ * @note    The input string must be already allocated (and long enough!)
+ * @param    err                (IN) Error code to get the description from
+ * @param    sMessage        (IN/OUT) Allocated string in which the description will be copied
+ * @return    M4NO_ERROR:        Input error is from the VSS3GPP module
+ * @return    M4ERR_PARAMETER:Input error is not from the VSS3GPP module
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_GetErrorMessage( M4OSA_ERR err, M4OSA_Char *sMessage )
+{
+    switch( err )
+    {
+        case M4VSS3GPP_WAR_EDITING_DONE:
+            strcpy(sMessage, "M4VSS3GPP_WAR_EDITING_DONE");
+            break;
+
+        case M4VSS3GPP_WAR_END_OF_AUDIO_MIXING:
+            strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_AUDIO_MIXING");
+            break;
+
+        case M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE:
+            strcpy(sMessage, "M4VSS3GPP_WAR_END_OF_EXTRACT_PICTURE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_FILE_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_FILE_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_EFFECT_KIND:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_EFFECT_KIND");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_VIDEO_ENCODING_FRAME_RATE");
+            break;
+
+        case M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL");
+            break;
+
+        case M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL");
+            break;
+
+        case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION:
+            strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION");
+            break;
+
+        case M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT");
+            break;
+
+        case M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS:
+            strcpy(sMessage, "M4VSS3GPP_ERR_OVERLAPPING_TRANSITIONS");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_3GPP_FILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_3GPP_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_AMR_EDITING_UNSUPPORTED");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_AUDIO_CORRUPTED_AU");
+            break;
+
+        case M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR:
+            strcpy(sMessage, "M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_H263_PROFILE");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_PROFILE");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_MPEG4_RVLC");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_STREAM_IN_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_EDITING_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INVALID_CLIP_ANALYSIS_VERSION");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_FRAME_SIZE");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_TIME_SCALE");
+            break;
+
+        case M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_INCOMPATIBLE_VIDEO_DATA_PARTITIONING");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_MP3_ASSEMBLY");
+            break;
+
+        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE:
+            strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_STREAM_TYPE");
+            break;
+
+        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS:
+            strcpy(sMessage, "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_NB_OF_CHANNELS");
+            break;
+
+        case M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY:
+            strcpy(sMessage,
+                "M4VSS3GPP_WAR_INCOMPATIBLE_AUDIO_SAMPLING_FREQUENCY");
+            break;
+
+        case M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_STREAM_IN_FILE");
+            break;
+
+        case M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO:
+            strcpy(sMessage, "M4VSS3GPP_ERR_ADDVOLUME_EQUALS_ZERO");
+            break;
+
+        case M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION:
+            strcpy(sMessage, "M4VSS3GPP_ERR_ADDCTS_HIGHER_THAN_VIDEO_DURATION");
+            break;
+
+        case M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNDEFINED_AUDIO_TRACK_FILE_FORMAT");
+            break;
+
+        case M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM:
+            strcpy(sMessage, "M4VSS3GPP_ERR_UNSUPPORTED_ADDED_AUDIO_STREAM");
+            break;
+
+        case M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_MIXING_UNSUPPORTED");
+            break;
+
+        case M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK:
+            strcpy(sMessage,
+                "M4VSS3GPP_ERR_FEATURE_UNSUPPORTED_WITH_AUDIO_TRACK");
+            break;
+
+        case M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_AUDIO_CANNOT_BE_MIXED");
+            break;
+
+        case M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP:
+            strcpy(sMessage, "M4VSS3GPP_ERR_INPUT_CLIP_IS_NOT_A_3GPP");
+            break;
+
+        case M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP:
+            strcpy(sMessage, "M4VSS3GPP_ERR_BEGINLOOP_HIGHER_ENDLOOP");
+            break;
+
+        case M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED:
+            strcpy(sMessage, "M4VSS3GPP_ERR_H263_PROFILE_NOT_SUPPORTED");
+            break;
+
+        case M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE:
+            strcpy(sMessage, "M4VSS3GPP_ERR_NO_SUPPORTED_VIDEO_STREAM_IN_FILE");
+            break;
+
+        default: /**< Not a VSS3GPP error */
+            strcpy(sMessage, "");
+            return M4ERR_PARAMETER;
+    }
+    return M4NO_ERROR;
+}
+
+#endif /* WIN32 */
+
+/********************************************************/
+/********************************************************/
+/********************************************************/
+/****************   STATIC FUNCTIONS   ******************/
+/********************************************************/
+/********************************************************/
+/********************************************************/
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck()
+ * @brief    Simplify the given clip settings
+ * @note    This function may modify the given structure
+ * @param   pClip    (IN/OUT) Clip settings
+ * @return    M4NO_ERROR:            No error
+ * @return    M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL:
+ ******************************************************************************
+ */
+
+static M4OSA_ERR M4VSS3GPP_intClipSettingsSanityCheck(
+    M4VSS3GPP_ClipSettings *pClip )
+{
+    M4OSA_UInt8 uiFx;
+    M4OSA_UInt32
+        uiClipActualDuration; /**< the clip duration once the cuts are done */
+    M4OSA_UInt32 uiDuration;
+    M4VSS3GPP_EffectSettings *pFx;
+
+    /**
+    * If begin cut is too far, return an error */
+    uiDuration = pClip->ClipProperties.uiClipDuration;
+
+    if( pClip->uiBeginCutTime > uiDuration )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
+            returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION",
+            pClip->uiBeginCutTime, uiDuration);
+        return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_DURATION;
+    }
+
+    /**
+    * If end cut is too far, set to zero (it means no end cut) */
+    if( pClip->uiEndCutTime > uiDuration )
+    {
+        pClip->uiEndCutTime = 0;
+    }
+
+    /**
+    * Compute actual clip duration (once cuts are done) */
+    if( 0 == pClip->uiEndCutTime )
+    {
+        /**
+        * No end cut */
+        uiClipActualDuration = uiDuration - pClip->uiBeginCutTime;
+    }
+    else
+    {
+        if( pClip->uiBeginCutTime >= pClip->uiEndCutTime )
+        {
+            M4OSA_TRACE1_2(
+                "M4VSS3GPP_intClipSettingsSanityCheck: %d > %d,\
+                returning M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT",
+                pClip->uiBeginCutTime, pClip->uiEndCutTime);
+            return M4VSS3GPP_ERR_BEGIN_CUT_LARGER_THAN_END_CUT;
+        }
+        uiClipActualDuration = pClip->uiEndCutTime - pClip->uiBeginCutTime;
+    }
+
+    if( M4VIDEOEDITING_kMP3 != pClip->ClipProperties.AudioStreamType )
+    {
+#if 0 /*RC*/
+        /**
+        * Check the three effects */
+
+        for ( uiFx = 0; uiFx < pClip->nbEffects; uiFx++ )
+        {
+            pFx = &(pClip->Effects[uiFx]); /**< shortcut */
+
+            /**
+            * No effect cases */
+            if( 0 == pFx->uiDuration )
+            {
+                pFx->VideoEffectType = M4VSS3GPP_kVideoEffectType_None;
+                pFx->AudioEffectType = M4VSS3GPP_kAudioEffectType_None;
+            }
+            else if( ( M4VSS3GPP_kVideoEffectType_None == pFx->VideoEffectType)
+                && (M4VSS3GPP_kAudioEffectType_None == pFx->AudioEffectType) )
+            {
+                pFx->uiStartTime = 0;
+                pFx->uiDuration = 0;
+            }
+
+            /**
+            * We convert all the effects into middle effects, computing the corresponding
+            * start time and duration */
+            if( M4VSS3GPP_kEffectKind_Begin == pFx->EffectKind )
+            {
+                pFx->uiStartTime = 0;
+            }
+            else if( M4VSS3GPP_kEffectKind_End == pFx->EffectKind )
+            {
+                /**
+                * Duration sanity check */
+                if( pFx->uiDuration > uiClipActualDuration )
+                {
+                    pFx->uiDuration = uiClipActualDuration;
+                }
+                /**
+                * Start time computing */
+                pFx->uiStartTime = uiClipActualDuration - pFx->uiDuration;
+            }
+            else if( M4VSS3GPP_kEffectKind_Middle == pFx->EffectKind )
+            {
+                /**
+                * Duration sanity check */
+                if( pFx->uiDuration + pFx->uiStartTime > uiClipActualDuration )
+                {
+                    pFx->uiDuration = uiClipActualDuration - pFx->uiStartTime;
+                }
+            }
+            else
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intClipSettingsSanityCheck: unknown effect kind (0x%x),\
+                    returning M4VSS3GPP_ERR_INVALID_EFFECT_KIND",
+                    pFx->EffectKind);
+                return M4VSS3GPP_ERR_INVALID_EFFECT_KIND;
+            }
+
+            /**
+            * Check external effect function is set */
+            if( ( pFx->VideoEffectType >= M4VSS3GPP_kVideoEffectType_External)
+                && (M4OSA_NULL == pFx->ExtVideoEffectFct) )
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intClipSettingsSanityCheck:\
+                    returning M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL");
+                return M4VSS3GPP_ERR_EXTERNAL_EFFECT_NULL;
+            }
+        }
+
+#endif
+
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck()
+ * @brief    Simplify the given transition settings
+ * @note     This function may modify the given structure
+ * @param    pTransition    (IN/OUT) Transition settings
+ * @return    M4NO_ERROR:            No error
+ * @return    M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL:
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intTransitionSettingsSanityCheck(
+    M4VSS3GPP_TransitionSettings *pTransition )
+{
+    /**
+    * No transition */
+    if( 0 == pTransition->uiTransitionDuration )
+    {
+        pTransition->VideoTransitionType = M4VSS3GPP_kVideoTransitionType_None;
+        pTransition->AudioTransitionType = M4VSS3GPP_kAudioTransitionType_None;
+    }
+    else if( ( M4VSS3GPP_kVideoTransitionType_None
+        == pTransition->VideoTransitionType)
+        && (M4VSS3GPP_kAudioTransitionType_None
+        == pTransition->AudioTransitionType) )
+    {
+        pTransition->uiTransitionDuration = 0;
+    }
+
+    /**
+    * Check external transition function is set */
+    if( ( pTransition->VideoTransitionType
+        >= M4VSS3GPP_kVideoTransitionType_External)
+        && (M4OSA_NULL == pTransition->ExtVideoTransitionFct) )
+    {
+        return M4VSS3GPP_ERR_EXTERNAL_TRANSITION_NULL;
+    }
+
+    /**
+    * Set minimal transition duration */
+    if( ( pTransition->uiTransitionDuration > 0)
+        && (pTransition->uiTransitionDuration
+        < M4VSS3GPP_MINIMAL_TRANSITION_DURATION) )
+    {
+        pTransition->uiTransitionDuration =
+            M4VSS3GPP_MINIMAL_TRANSITION_DURATION;
+    }
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intFreeSettingsList()
+ * @brief    Free the settings copied in the internal context
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intFreeSettingsList(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_UInt32 i;
+
+    /**
+    * Free the settings list */
+    if( M4OSA_NULL != pC->pClipList )
+    {
+        for ( i = 0; i < pC->uiClipNumber; i++ )
+        {
+            M4VSS3GPP_editFreeClipSettings(&(pC->pClipList[i]));
+        }
+
+        M4OSA_free((M4OSA_MemAddr32)pC->pClipList);
+        pC->pClipList = M4OSA_NULL;
+    }
+
+    /**
+    * Free the transition list */
+    if( M4OSA_NULL != pC->pTransitionList )
+    {
+        M4OSA_free((M4OSA_MemAddr32)pC->pTransitionList);
+        pC->pTransitionList = M4OSA_NULL;
+    }
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateMP3OutputFile()
+ * @brief        Creates and prepare the output MP file
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intCreateMP3OutputFile( M4VSS3GPP_InternalEditContext *pC,
+                                 M4OSA_Void *pOutputFile )
+{
+    M4OSA_ERR err;
+
+    err =
+        pC->pOsaFileWritPtr->openWrite(&pC->ewc.p3gpWriterContext, pOutputFile,
+        M4OSA_kFileWrite);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateMP3OutputFile: WriteOpen returns 0x%x!", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreate3GPPOutputFile()
+ * @brief   Creates and prepare the output MP3 file
+ * @note    Creates the writer, Creates the output file, Adds the streams,
+           Readies the writing process
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR
+M4VSS3GPP_intCreate3GPPOutputFile( M4VSS3GPP_EncodeWriteContext *pC_ewc,
+                                  M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+                                  M4OSA_FileWriterPointer *pOsaFileWritPtr,
+                                  M4OSA_Void *pOutputFile,
+                                  M4OSA_FileReadPointer *pOsaFileReadPtr,
+                                  M4OSA_Void *pTempFile,
+                                  M4OSA_UInt32 maxOutputFileSize )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 uiVersion;
+    M4SYS_StreamIDValue temp;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_intCreate3GPPOutputFile called with pC_ewc=0x%x, pOutputFile=0x%x",
+        pC_ewc, pOutputFile);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pC_ewc), M4ERR_PARAMETER,
+        "M4VSS3GPP_intCreate3GPPOutputFile: pC_ewc is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pOutputFile), M4ERR_PARAMETER,
+        "M4VSS3GPP_intCreate3GPPOutputFile: pOutputFile is M4OSA_NULL");
+
+    /* Set writer */
+    err =
+        M4VSS3GPP_setCurrentWriter(pC_ShellAPI, M4VIDEOEDITING_kFileType_3GPP);
+    M4ERR_CHECK_RETURN(err);
+
+    /**
+    * Create the output file */
+    err = pC_ShellAPI->pWriterGlobalFcts->pFctOpen(&pC_ewc->p3gpWriterContext,
+        pOutputFile, pOsaFileWritPtr, pTempFile, pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile: pWriterGlobalFcts->pFctOpen returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Set the signature option of the writer */
+    err =
+        pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
+        M4WRITER_kEmbeddedString, (M4OSA_DataOption)"NXP-SW : VSS    ");
+
+    if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+        != err) ) /* this option may not be implemented by some writers */
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile:\
+            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedString) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /*11/12/2008 CR3283 MMS use case for VideoArtist:
+    Set the max output file size option in the writer so that the output file will be
+    smaller than the given file size limitation*/
+    if( maxOutputFileSize > 0 )
+    {
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            M4WRITER_kMaxFileSize, &maxOutputFileSize);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                writer set option M4WRITER_kMaxFileSize returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Set the version option of the writer */
+    uiVersion =
+        (M4VIDEOEDITING_VERSION_MAJOR * 100 + M4VIDEOEDITING_VERSION_MINOR * 10
+        + M4VIDEOEDITING_VERSION_REVISION);
+    err =
+        pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(pC_ewc->p3gpWriterContext,
+        M4WRITER_kEmbeddedVersion, (M4OSA_DataOption) &uiVersion);
+
+    if( ( M4NO_ERROR != err) && (((M4OSA_UInt32)M4ERR_BAD_OPTION_ID)
+        != err) ) /* this option may not be implemented by some writers */
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile:\
+            pWriterGlobalFcts->pFctSetOption(M4WRITER_kEmbeddedVersion) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * In case of EMP, we have to explicitely give an emp ftyp to the writer */
+    if( M4OSA_TRUE == pC_ewc->bActivateEmp )
+    {
+        M4VIDEOEDITING_FtypBox ftyp;
+
+        ftyp.major_brand = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.minor_version = M4VIDEOEDITING_BRAND_0000;
+        ftyp.nbCompatibleBrands = 2;
+        ftyp.compatible_brands[0] = M4VIDEOEDITING_BRAND_3GP4;
+        ftyp.compatible_brands[1] = M4VIDEOEDITING_BRAND_EMP;
+
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            M4WRITER_kSetFtypBox, (M4OSA_DataOption) &ftyp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kSetFtypBox) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    if( M4SYS_kVideoUnknown != pC_ewc->VideoStreamType )
+    {
+        /**
+        * Set the video stream properties */
+        pC_ewc->WriterVideoStreamInfo.height = pC_ewc->uiVideoHeight;
+        pC_ewc->WriterVideoStreamInfo.width = pC_ewc->uiVideoWidth;
+        pC_ewc->WriterVideoStreamInfo.fps =
+            0.0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterVideoStreamInfo.Header.pBuf =
+            pC_ewc->pVideoOutputDsi; /**< Previously computed output DSI */
+        pC_ewc->WriterVideoStreamInfo.Header.Size = pC_ewc->
+            uiVideoOutputDsiSize; /**< Previously computed output DSI size */
+
+        pC_ewc->WriterVideoStream.streamType = pC_ewc->VideoStreamType;
+
+        switch( pC_ewc->VideoStreamType )
+        {
+            case M4SYS_kMPEG_4:
+            case M4SYS_kH263:
+            case M4SYS_kH264:
+                /**< We HAVE to put a value here... */
+                pC_ewc->WriterVideoStream.averageBitrate =
+                    pC_ewc->uiVideoBitrate;
+                pC_ewc->WriterVideoStream.maxBitrate = pC_ewc->uiVideoBitrate;
+                break;
+
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intCreate3GPPOutputFile: unknown input video format (0x%x),\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT!",
+                    pC_ewc->VideoStreamType);
+                return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_VIDEO_FORMAT;
+        }
+
+        pC_ewc->WriterVideoStream.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+        pC_ewc->WriterVideoStream.timeScale =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterVideoStream.profileLevel =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterVideoStream.duration =
+            0; /**< Not used by the shell/core writer */
+
+        pC_ewc->WriterVideoStream.decoderSpecificInfoSize =
+            sizeof(M4WRITER_StreamVideoInfos);
+        pC_ewc->WriterVideoStream.decoderSpecificInfo =
+            (M4OSA_MemAddr32) &(pC_ewc->WriterVideoStreamInfo);
+
+        /**
+        * Add the video stream */
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
+            pC_ewc->p3gpWriterContext, &pC_ewc->WriterVideoStream);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctAddStream(video) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Update AU properties for video stream */
+        pC_ewc->WriterVideoAU.attribute = AU_RAP;
+        pC_ewc->WriterVideoAU.CTS = 0;
+        pC_ewc->WriterVideoAU.DTS = 0;    /** Reset time */
+        pC_ewc->WriterVideoAU.frag = M4OSA_NULL;
+        pC_ewc->WriterVideoAU.nbFrag = 0; /** No fragment */
+        pC_ewc->WriterVideoAU.size = 0;
+        pC_ewc->WriterVideoAU.dataAddress = M4OSA_NULL;
+        pC_ewc->WriterVideoAU.stream = &(pC_ewc->WriterVideoStream);
+
+        /**
+        * Set the writer max video AU size */
+        pC_ewc->uiVideoMaxAuSize = (M4OSA_UInt32)(1.5F
+            *(M4OSA_Float)(pC_ewc->WriterVideoStreamInfo.width
+            * pC_ewc->WriterVideoStreamInfo.height)
+            * M4VSS3GPP_VIDEO_MIN_COMPRESSION_RATIO);
+        temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+        temp.value = pC_ewc->uiVideoMaxAuSize;
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Set the writer max video chunk size */
+        temp.streamID = M4VSS3GPP_WRITER_VIDEO_STREAM_ID;
+        temp.value = (M4OSA_UInt32)(pC_ewc->uiVideoMaxAuSize \
+            * M4VSS3GPP_VIDEO_AU_SIZE_TO_CHUNCK_SIZE_RATIO); /**< from max AU size to
+                                                                  max Chunck size */
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, video) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    if( M4SYS_kAudioUnknown != pC_ewc->AudioStreamType )
+    {
+        M4WRITER_StreamAudioInfos streamAudioInfo;
+
+        streamAudioInfo.nbSamplesPerSec = 0; /**< unused by our shell writer */
+        streamAudioInfo.nbBitsPerSample = 0; /**< unused by our shell writer */
+        streamAudioInfo.nbChannels = 1;      /**< unused by our shell writer */
+
+        if( pC_ewc->pAudioOutputDsi != M4OSA_NULL )
+        {
+            /* If we copy the stream from the input, we copy its DSI */
+            streamAudioInfo.Header.Size = pC_ewc->uiAudioOutputDsiSize;
+            streamAudioInfo.Header.pBuf = pC_ewc->pAudioOutputDsi;
+        }
+        else
+        {
+            /* Writer will put a default DSI */
+            streamAudioInfo.Header.Size = 0;
+            streamAudioInfo.Header.pBuf = M4OSA_NULL;
+        }
+
+        pC_ewc->WriterAudioStream.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+        pC_ewc->WriterAudioStream.streamType = pC_ewc->AudioStreamType;
+        pC_ewc->WriterAudioStream.duration =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterAudioStream.profileLevel =
+            0; /**< Not used by the shell/core writer */
+        pC_ewc->WriterAudioStreamInfo.nbSamplesPerSec =
+            pC_ewc->uiSamplingFrequency;
+        pC_ewc->WriterAudioStream.timeScale = pC_ewc->uiSamplingFrequency;
+        pC_ewc->WriterAudioStreamInfo.nbChannels =
+            (M4OSA_UInt16)pC_ewc->uiNbChannels;
+        pC_ewc->WriterAudioStreamInfo.nbBitsPerSample =
+            0; /**< Not used by the shell/core writer */
+
+        /**
+        * Add the audio stream */
+        switch( pC_ewc->AudioStreamType )
+        {
+            case M4SYS_kAMR:
+                pC_ewc->WriterAudioStream.averageBitrate =
+                    0; /**< It is not used by the shell, the DSI is taken into account instead */
+                pC_ewc->WriterAudioStream.maxBitrate =
+                    0; /**< Not used by the shell/core writer */
+                break;
+
+            case M4SYS_kAAC:
+                pC_ewc->WriterAudioStream.averageBitrate =
+                    pC_ewc->uiAudioBitrate;
+                pC_ewc->WriterAudioStream.maxBitrate = pC_ewc->uiAudioBitrate;
+                break;
+
+            case M4SYS_kEVRC:
+                pC_ewc->WriterAudioStream.averageBitrate =
+                    0; /**< It is not used by the shell, the DSI is taken into account instead */
+                pC_ewc->WriterAudioStream.maxBitrate =
+                    0; /**< Not used by the shell/core writer */
+                break;
+
+            case M4SYS_kMP3: /**< there can't be MP3 track in 3GPP file -> error */
+            default:
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intCreate3GPPOutputFile: unknown output audio format (0x%x),\
+                    returning M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT!",
+                    pC_ewc->AudioStreamType);
+                return M4VSS3GPP_ERR_UNSUPPORTED_INPUT_AUDIO_FORMAT;
+        }
+
+        /**
+        * Our writer shell interface is a little tricky: we put M4WRITER_StreamAudioInfos
+        in the DSI pointer... */
+        pC_ewc->WriterAudioStream.decoderSpecificInfo =
+            (M4OSA_MemAddr32) &streamAudioInfo;
+
+        /**
+        * Link the AU and the stream */
+        pC_ewc->WriterAudioAU.stream = &(pC_ewc->WriterAudioStream);
+        pC_ewc->WriterAudioAU.dataAddress = M4OSA_NULL;
+        pC_ewc->WriterAudioAU.size = 0;
+        pC_ewc->WriterAudioAU.CTS =
+            -pC_ewc->iSilenceFrameDuration; /** Reset time */
+        pC_ewc->WriterAudioAU.DTS = 0;
+        pC_ewc->WriterAudioAU.attribute = 0;
+        pC_ewc->WriterAudioAU.nbFrag = 0; /** No fragment */
+        pC_ewc->WriterAudioAU.frag = M4OSA_NULL;
+
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctAddStream(
+            pC_ewc->p3gpWriterContext, &pC_ewc->WriterAudioStream);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctAddStream(audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Set the writer max audio AU size */
+        pC_ewc->uiAudioMaxAuSize = M4VSS3GPP_AUDIO_MAX_AU_SIZE;
+        temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+        temp.value = pC_ewc->uiAudioMaxAuSize;
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext, (M4OSA_UInt32)M4WRITER_kMaxAUSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * Set the writer max audio chunck size */
+        temp.streamID = M4VSS3GPP_WRITER_AUDIO_STREAM_ID;
+        temp.value = M4VSS3GPP_AUDIO_MAX_CHUNCK_SIZE;
+        err = pC_ShellAPI->pWriterGlobalFcts->pFctSetOption(
+            pC_ewc->p3gpWriterContext,
+            (M4OSA_UInt32)M4WRITER_kMaxChunckSize,
+            (M4OSA_DataOption) &temp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreate3GPPOutputFile:\
+                pWriterGlobalFcts->pFctSetOption(M4WRITER_kMaxAUSize, audio) returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * All streams added, we're now ready to write */
+    err = pC_ShellAPI->pWriterGlobalFcts->pFctStartWriting(
+        pC_ewc->p3gpWriterContext);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreate3GPPOutputFile:\
+            pWriterGlobalFcts->pFctStartWriting() returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCreate3GPPOutputFile(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intComputeOutputVideoAndAudioDsi()
+ * @brief    Generate a H263 or MPEG-4 decoder specific info compatible with all input video
+ *            tracks. Copy audio dsi from master clip.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intComputeOutputVideoAndAudioDsi( M4VSS3GPP_InternalEditContext *pC,
+                                           M4OSA_UInt8 uiMasterClip )
+{
+    M4OSA_UInt8 uiCurrentLevel, uiNewLevel;
+    M4OSA_UInt8 uiCurrentProf, uiNewProf;
+    M4OSA_Int32 iResynchMarkerDsiIndex;
+    M4_StreamHandler *pStreamForDsi;
+    M4VSS3GPP_ClipContext *pClip;
+    M4OSA_ERR err;
+    M4OSA_UInt32 i;
+
+    M4ENCODER_Header *encHeader;
+    M4SYS_StreamIDmemAddr streamHeader;
+
+    pStreamForDsi = M4OSA_NULL;
+    pClip = M4OSA_NULL;
+
+    /**
+    * H263 case */
+    if( M4SYS_kH263 == pC->ewc.VideoStreamType )
+    {
+        /**
+        * H263 output DSI is always 7 bytes */
+        pC->ewc.uiVideoOutputDsiSize = 7;
+        pC->ewc.pVideoOutputDsi =
+            (M4OSA_MemAddr8)M4OSA_malloc(pC->ewc.uiVideoOutputDsiSize,
+            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H263)");
+
+        if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                unable to allocate pVideoOutputDsi (H263), returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+
+        /**
+        * (We override the input vendor info.
+        * At least we know that nothing special will be tried with PHLP-stamped
+          edited streams...) */
+        pC->ewc.pVideoOutputDsi[0] = 'P';
+        pC->ewc.pVideoOutputDsi[1] = 'H';
+        pC->ewc.pVideoOutputDsi[2] = 'L';
+        pC->ewc.pVideoOutputDsi[3] = 'P';
+
+        /**
+        * Decoder version is 0 */
+        pC->ewc.pVideoOutputDsi[4] = 0;
+
+        /**
+        * We take the max level of all input streams, but 10 is the minimum */
+        uiCurrentLevel = 10;
+
+        for ( i = 0; i < pC->uiClipNumber; i++ )
+        {
+            uiNewLevel = pC->pClipList[i].ClipProperties.uiH263level;
+
+            if( uiNewLevel > uiCurrentLevel )
+            {
+                uiCurrentLevel = uiNewLevel;
+            }
+        }
+
+        /**
+        * Level is the sixth byte i the DSI */
+        pC->ewc.pVideoOutputDsi[5] = uiCurrentLevel;
+
+        /**
+        * Profile is always 0, and it's the seventh byte in the DSI */
+        pC->ewc.pVideoOutputDsi[6] = 0;
+    }
+
+    /**
+    * MPEG-4 case */
+    else if( M4SYS_kMPEG_4 == pC->ewc.VideoStreamType )
+    {
+        /**
+        * Profile combination rules:
+        *   8 and x -> x
+        *   1, 2 or 3 -> max
+        *   9 and 1 -> 2
+        *   9 and 2 -> 2
+        *   9 and 3 -> 3
+        */
+
+        /**
+        * Note:
+        *   The part of the output video encoded by the VSS3GPP
+        *   have a profile of 8.
+        *   Since 8 is the less "strong" profile (8 and x --> x),
+        *   we can check only the input clips to compute the
+        *   profile of the output combined clip.
+        */
+
+        /**
+        * Start with profile of the first clip */
+        uiCurrentProf = pC->pClipList[0].ClipProperties.uiVideoProfile;
+
+        /**
+        * Combine current profile with the one of the next clip */
+        for ( i = 1; i < pC->uiClipNumber; i++ )
+        {
+            uiNewProf = pC->pClipList[i].ClipProperties.uiVideoProfile;
+
+            switch( uiNewProf )
+            {
+                case 8:
+                    /**< 8 + x --> x */
+                    /**< uiCurrentProf is not updated */
+                    break;
+
+                case 1:
+                case 2:
+                case 3:
+                    switch( uiCurrentProf )
+                    {
+                        case 1:
+                        case 2:
+                        case 3:
+                        case 4:
+                        case 5:
+                            /**< 1, 2, 3, 4 or 5 -> max */
+                            uiCurrentProf = (uiCurrentProf > uiNewProf)
+                                ? uiCurrentProf : uiNewProf;
+                            break;
+
+                        case 8: /**< 8 + x -> x */
+                            uiCurrentProf = uiNewProf;
+                            break;
+
+                        case 9:
+                            /**< 9 and 1 -> 2 */
+                            /**< 9 and 2 -> 2 */
+                            /**< 9 and 3 -> 3 */
+                            /**< 9 and 4 -> 4 */
+                            /**< 9 and 5 -> 5 */
+                            uiCurrentProf = (uiNewProf > 2) ? uiNewProf : 2;
+                            break;
+                    }
+                    break;
+
+                case 9:
+                    switch( uiCurrentProf )
+                    {
+                        case 1:
+                        case 2:
+                        case 3:
+                            /**< 9 and 1 -> 2 */
+                            /**< 9 and 2 -> 2 */
+                            /**< 9 and 3 -> 3 */
+                            uiCurrentProf =
+                                (uiCurrentProf > 2) ? uiCurrentProf : 2;
+                            break;
+
+                        case 9: /**< 9 + x -> x */
+                        case 8: /**< 8 + x -> x */
+                            uiCurrentProf = uiNewProf;
+                            break;
+                }
+            }
+        }
+
+        /**
+        * Look for the DSI of an input video stream which would use the Resynch. Marker tool */
+        i = 0;
+        iResynchMarkerDsiIndex =
+            0; /**< By default we take the first DSI (if we find no Resynch Marker DSI) */
+
+        while( i < pC->uiClipNumber )
+        {
+            if( M4OSA_TRUE
+                == pC->pClipList[i].ClipProperties.bMPEG4resynchMarker )
+            {
+                iResynchMarkerDsiIndex = i;
+                break; /**< we found it, get out the while loop */
+            }
+            i++;
+        }
+
+        /**
+        * Get the DSI of the clip found. If it is the first clip, it is already opened.
+        * Else we must open it (and later close it...) */
+        if( 0 == iResynchMarkerDsiIndex )
+        {
+            pStreamForDsi = &(pC->pC1->pVideoStream->m_basicProperties);
+        }
+        else
+        {
+            /**
+            * We can use the fast open mode and the skip audio mode to get the DSI */
+            err = M4VSS3GPP_intClipInit(&pClip, pC->pOsaFileReadPtr);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipInit() returns 0x%x!",
+                    err);
+
+                if( M4OSA_NULL != pClip )
+                {
+                    M4VSS3GPP_intClipCleanUp(pClip);
+                }
+                return err;
+            }
+
+            err = M4VSS3GPP_intClipOpen(pClip,
+                &pC->pClipList[iResynchMarkerDsiIndex], M4OSA_TRUE,
+                M4OSA_TRUE, M4OSA_TRUE);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipOpen() returns 0x%x!",
+                    err);
+                M4VSS3GPP_intClipCleanUp(pClip);
+                return err;
+            }
+
+            pStreamForDsi = &(pClip->pVideoStream->m_basicProperties);
+        }
+
+        /**
+        * Allocate and copy the new DSI */
+        pC->ewc.pVideoOutputDsi = (M4OSA_MemAddr8)M4OSA_malloc(
+            pStreamForDsi->m_decoderSpecificInfoSize,
+            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (MPEG4)");
+
+        if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                unable to allocate pVideoOutputDsi (MPEG4), returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->ewc.uiVideoOutputDsiSize =
+            (M4OSA_UInt16)pStreamForDsi->m_decoderSpecificInfoSize;
+        M4OSA_memcpy(pC->ewc.pVideoOutputDsi,
+            (M4OSA_MemAddr8)pStreamForDsi->m_pDecoderSpecificInfo,
+            pC->ewc.uiVideoOutputDsiSize);
+
+        /**
+        * We rewrite the profile in the output DSI because it may not be the good one
+        * The profile and level is always at byte number 4 */
+        (pC->ewc.pVideoOutputDsi)[4] = uiCurrentProf;
+
+        /**
+        * If a clip has been temporarily opened to get its DSI, close it */
+        if( M4OSA_NULL != pClip )
+        {
+            err = M4VSS3GPP_intClipCleanUp(pClip);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipCleanUp() returns 0x%x!",
+                    err);
+                return err;
+            }
+        }
+    }
+    else if( M4SYS_kH264 == pC->ewc.VideoStreamType )
+    {
+
+        /* For H.264 encoder case
+        * Fetch the DSI from the shell video encoder, and feed it to the writer before
+        closing it. */
+
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: get DSI for H264 stream");
+
+        if( M4OSA_NULL == pC->ewc.pEncContext )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: pC->ewc.pEncContext is NULL");
+            err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intCreateVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+
+        if( M4OSA_NULL != pC->ewc.pEncContext )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctGetOption(
+                pC->ewc.pEncContext, M4ENCODER_kOptionID_EncoderHeader,
+                (M4OSA_DataOption) &encHeader);
+
+            if( ( M4NO_ERROR != err) || (M4OSA_NULL == encHeader->pBuf) )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    failed to get the encoder header (err 0x%x)",
+                    err);
+                M4OSA_TRACE1_2(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi: encHeader->pBuf=0x%x, size=0x%x",
+                    encHeader->pBuf, encHeader->Size);
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    send DSI for H264 stream to 3GP writer");
+
+                /**
+                * Allocate and copy the new DSI */
+                pC->ewc.pVideoOutputDsi =
+                    (M4OSA_MemAddr8)M4OSA_malloc(encHeader->Size, M4VSS3GPP,
+                    (M4OSA_Char *)"pC->ewc.pVideoOutputDsi (H264)");
+
+                if( M4OSA_NULL == pC->ewc.pVideoOutputDsi )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                        unable to allocate pVideoOutputDsi (H264), returning M4ERR_ALLOC");
+                    return M4ERR_ALLOC;
+                }
+                pC->ewc.uiVideoOutputDsiSize = (M4OSA_UInt16)encHeader->Size;
+                M4OSA_memcpy(pC->ewc.pVideoOutputDsi, encHeader->pBuf,
+                    encHeader->Size);
+            }
+
+            err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intDestroyVideoEncoder returned error 0x%x",
+                    err);
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                pC->ewc.pEncContext is NULL, cannot get the DSI");
+        }
+    }
+
+    pStreamForDsi = M4OSA_NULL;
+    pClip = M4OSA_NULL;
+
+    /* Compute Audio DSI */
+    if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+    {
+        if( uiMasterClip == 0 )
+        {
+            /* Clip is already opened */
+            pStreamForDsi = &(pC->pC1->pAudioStream->m_basicProperties);
+        }
+        else
+        {
+            /**
+            * We can use the fast open mode to get the DSI */
+            err = M4VSS3GPP_intClipInit(&pClip, pC->pOsaFileReadPtr);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipInit() returns 0x%x!",
+                    err);
+
+                if( pClip != M4OSA_NULL )
+                {
+                    M4VSS3GPP_intClipCleanUp(pClip);
+                }
+                return err;
+            }
+
+            err = M4VSS3GPP_intClipOpen(pClip, &pC->pClipList[uiMasterClip],
+                M4OSA_FALSE, M4OSA_TRUE, M4OSA_TRUE);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipOpen() returns 0x%x!",
+                    err);
+                M4VSS3GPP_intClipCleanUp(pClip);
+                return err;
+            }
+
+            pStreamForDsi = &(pClip->pAudioStream->m_basicProperties);
+        }
+
+        /**
+        * Allocate and copy the new DSI */
+        pC->ewc.pAudioOutputDsi = (M4OSA_MemAddr8)M4OSA_malloc(
+            pStreamForDsi->m_decoderSpecificInfoSize,
+            M4VSS3GPP, (M4OSA_Char *)"pC->ewc.pAudioOutputDsi");
+
+        if( M4OSA_NULL == pC->ewc.pAudioOutputDsi )
+        {
+            M4OSA_TRACE1_0(
+                "M4VSS3GPP_intComputeOutputVideoAndAudioDsi():\
+                unable to allocate pAudioOutputDsi, returning M4ERR_ALLOC");
+            return M4ERR_ALLOC;
+        }
+        pC->ewc.uiAudioOutputDsiSize =
+            (M4OSA_UInt16)pStreamForDsi->m_decoderSpecificInfoSize;
+        M4OSA_memcpy(pC->ewc.pAudioOutputDsi,
+            (M4OSA_MemAddr8)pStreamForDsi->m_pDecoderSpecificInfo,
+            pC->ewc.uiAudioOutputDsiSize);
+
+        /**
+        * If a clip has been temporarily opened to get its DSI, close it */
+        if( M4OSA_NULL != pClip )
+        {
+            err = M4VSS3GPP_intClipCleanUp(pClip);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intComputeOutputVideoAndAudioDsi:\
+                    M4VSS3GPP_intClipCleanUp() returns 0x%x!",
+                    err);
+                return err;
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0(
+        "M4VSS3GPP_intComputeOutputVideoAndAudioDsi(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intSwitchToNextClip()
+ * @brief    Switch from the current clip to the next one
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intSwitchToNextClip(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    if( M4OSA_NULL != pC->pC1 )
+    {
+        /**
+        * Close the current first clip */
+        err = M4VSS3GPP_intClipCleanUp(pC->pC1);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intClipCleanUp(C1) returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        *  increment clip counter */
+        pC->uiCurrentClip++;
+    }
+
+    /**
+    * Check if we reached the last clip */
+    if( pC->uiCurrentClip >= pC->uiClipNumber )
+    {
+        pC->pC1 = M4OSA_NULL;
+        pC->State = M4VSS3GPP_kEditState_FINISHED;
+
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intSwitchToNextClip:\
+            M4VSS3GPP_intClipClose(C1) returns M4VSS3GPP_WAR_EDITING_DONE");
+        return M4VSS3GPP_WAR_EDITING_DONE;
+    }
+
+    /**
+    * If the next clip has already be opened, set it as first clip */
+    if( M4OSA_NULL != pC->pC2 )
+    {
+        pC->pC1 = pC->pC2;
+        pC->pC2 = M4OSA_NULL;
+    }
+    /**
+    * else open it */
+    else
+    {
+        err = M4VSS3GPP_intOpenClip(pC, &pC->pC1,
+            &pC->pClipList[pC->uiCurrentClip]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSwitchToNextClip: M4VSS3GPP_intOpenClip() returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * If the second clip has not been opened yet,
+          that means that there has been no transition.
+        * So both output video and audio times are OK.
+        * So we can set both video2 and audio offsets */
+
+        /**
+        * Add current video output CTS to the clip video offset */
+
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        pC->pC1->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+        /**
+        * Add current audio output CTS to the clip audio offset */
+        pC->pC1->iAoffset +=
+            (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+        /**
+        * 2005-03-24: BugFix for audio-video synchro:
+        * There may be a portion of the duration of an audio AU of desynchro at each assembly.
+        * It leads to an audible desynchro when there are a lot of clips assembled.
+        * This bug fix allows to resynch the audio track when the delta is higher
+        * than one audio AU duration.
+        * We Step one AU in the second clip and we change the audio offset accordingly. */
+        if( ( pC->pC1->iAoffset
+            - (M4OSA_Int32)(pC->pC1->iVoffset *pC->pC1->scale_audio + 0.5))
+        > pC->ewc.iSilenceFrameDuration )
+        {
+            /**
+            * Advance one AMR frame */
+            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+            if( M4OSA_ERR_IS_ERROR(err) )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intSwitchToNextClip:\
+                    M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                    err);
+                return err;
+            }
+            /**
+            * Update audio offset accordingly*/
+            pC->pC1->iAoffset -= pC->ewc.iSilenceFrameDuration;
+        }
+    }
+
+    /**
+    * Init starting state for this clip processing */
+    if( M4SYS_kMP3 == pC->ewc.AudioStreamType )
+    {
+        /**
+        * In the MP3 case we use a special audio state */
+        pC->State = M4VSS3GPP_kEditState_MP3_JUMP;
+    }
+    else
+    {
+        /**
+        * We start with the video processing */
+        pC->State = M4VSS3GPP_kEditState_VIDEO;
+
+        if( pC->Vstate != M4VSS3GPP_kEditVideoState_TRANSITION )
+        {
+            /* if not a transition then reset previous video state */
+            pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+
+            if( pC->bIsMMS == M4OSA_FALSE ) /* RC */
+            {
+                /* There may be an encoder to destroy */
+                err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intSwitchToNextClip:\
+                        M4VSS3GPP_editDestroyVideoEncoder() returns 0x%x!",
+                        err);
+                    return err;
+                }
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intSwitchToNextClip(): returning M4NO_ERROR");
+    /* RC: to know when a file has been processed */
+    return M4VSS3GPP_WAR_SWITCH_CLIP;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo()
+ * @brief    Do what to do when the end of a clip video track is reached
+ * @note    If there is audio on the current clip, process it, else switch to the next clip
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intReachedEndOfVideo( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Video is done for this clip, now we do the audio */
+    if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+    {
+        pC->State = M4VSS3GPP_kEditState_AUDIO;
+    }
+    else
+    {
+        /**
+        * Clip done, do the next one */
+        err = M4VSS3GPP_intSwitchToNextClip(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intReachedEndOfVideo: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfVideo(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio()
+ * @brief    Do what to do when the end of a clip audio track is reached
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intReachedEndOfAudio( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Clip done, do the next one */
+    err = M4VSS3GPP_intSwitchToNextClip(pC);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intReachedEndOfAudio: M4VSS3GPP_intSwitchToNextClip() returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Start with the video */
+    if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+    {
+        pC->State = M4VSS3GPP_kEditState_VIDEO;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intReachedEndOfAudio(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intOpenClip()
+ * @brief    Open next clip
+ * @param   pC            (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intOpenClip( M4VSS3GPP_InternalEditContext *pC,
+                                M4VSS3GPP_ClipContext ** hClip,
+                                M4VSS3GPP_ClipSettings *pClipSettings )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClip; /**< shortcut */
+    M4VIDEOEDITING_ClipProperties *pClipProperties;
+    M4OSA_Int32 iCts;
+    M4OSA_UInt32 i;
+
+    M4OSA_TRACE2_1("M4VSS3GPP_intOpenClip: \"%s\"",
+        (M4OSA_Char *)pClipSettings->pFile);
+
+    err = M4VSS3GPP_intClipInit(hClip, pC->pOsaFileReadPtr);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipInit() returns 0x%x!",
+            err);
+
+        if( *hClip != M4OSA_NULL )
+        {
+            M4VSS3GPP_intClipCleanUp(*hClip);
+        }
+        return err;
+    }
+
+    /**
+    * Set shortcut */
+    pClip = *hClip;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    /* replay recorded external decoder registrations on the clip */
+
+    for ( i = 0; i < M4VD_kVideoType_NB; i++ )
+    {
+        if( pC->registeredExternalDecs[i].registered )
+        {
+            err = M4VSS3GPP_intClipRegisterExternalVideoDecoder(pClip, i,
+                pC->registeredExternalDecs[i].pDecoderInterface,
+                pC->registeredExternalDecs[i].pUserData);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intOpenClip:\
+                    M4VSS3GPP_intClipRegisterExternalVideoDecoder() returns 0x%x!",
+                    err);
+                M4VSS3GPP_intClipCleanUp(pClip);
+                return err;
+            }
+        }
+    }
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intOpenClip: pClip->ShellAPI = 0x%x",
+        &pClip->ShellAPI);
+    err = M4VSS3GPP_intSubscribeExternalCodecs((M4VSS3GPP_EditContext *)pC,
+        (M4OSA_Context) &pClip->ShellAPI);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intSubscribeExternalCodecs returned err 0x%x",
+            err);
+    }
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_intOpenClip: M4VSS3GPP_intSubscribeExternalCodecs returned 0x%x",
+        err);
+#endif /* M4VSS_SUPPORT_OMX_CODECS */
+
+    err = M4VSS3GPP_intClipOpen(pClip, pClipSettings, M4OSA_FALSE, M4OSA_FALSE,
+        M4OSA_FALSE);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipOpen() returns 0x%x!",
+            err);
+        M4VSS3GPP_intClipCleanUp(pClip);
+        *hClip = M4OSA_NULL;
+        return err;
+    }
+
+    pClipProperties = &pClip->pSettings->ClipProperties;
+
+    /**
+    * Copy common 'silence frame stuff' to ClipContext */
+    pClip->uiSilencePcmSize = pC->ewc.uiSilencePcmSize;
+    pClip->pSilenceFrameData = pC->ewc.pSilenceFrameData;
+    pClip->uiSilenceFrameSize = pC->ewc.uiSilenceFrameSize;
+    pClip->iSilenceFrameDuration = pC->ewc.iSilenceFrameDuration;
+    pClip->scale_audio = pC->ewc.scale_audio;
+
+    pClip->iAudioFrameCts = -pClip->iSilenceFrameDuration; /* Reset time */
+
+    /**
+    * If the audio track is not compatible with the output audio format,
+    * we remove it. So it will be replaced by silence */
+    if( M4OSA_FALSE == pClipProperties->bAudioIsCompatibleWithMasterClip )
+    {
+        M4VSS3GPP_intClipDeleteAudioTrack(pClip);
+    }
+
+    /**
+    * Actual begin cut */
+    if( 0 == pClipSettings->uiBeginCutTime )
+    {
+        pClip->iVoffset = 0;
+        pClip->iAoffset = 0;
+        pClip->iActualVideoBeginCut = 0;
+        pClip->iActualAudioBeginCut = 0;
+    }
+    else
+    {
+        if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+        {
+            /**
+            * Jump the video to the target begin cut to get the actual begin cut value */
+            pClip->iActualVideoBeginCut =
+                (M4OSA_Int32)pClipSettings->uiBeginCutTime;
+            iCts = pClip->iActualVideoBeginCut;
+
+            err = pClip->ShellAPI.m_pReader->m_pFctJump(pClip->pReaderContext,
+                (M4_StreamHandler *)pClip->pVideoStream, &iCts);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intOpenClip: m_pFctJump(V) returns 0x%x!", err);
+                return err;
+            }
+
+            /**
+            * Update clip offset with the video begin cut */
+            pClip->iVoffset = -pClip->iActualVideoBeginCut;
+        }
+
+        if( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType )
+        {
+            /**
+            * Jump the audio to the video actual begin cut */
+            if( M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType )
+            {
+                pClip->iActualAudioBeginCut = pClip->iActualVideoBeginCut;
+                iCts = (M4OSA_Int32)(pClip->iActualAudioBeginCut
+                    * pClip->scale_audio + 0.5);
+
+                err = M4VSS3GPP_intClipJumpAudioAt(pClip, &iCts);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
+                        err);
+                    return err;
+                }
+                /**
+                * Update clip offset with the audio begin cut */
+                pClip->iAoffset = -iCts;
+            }
+            else
+            {
+                /**
+                * For the MP3, the jump is not done because of the VBR,
+                  it could be not enough accurate */
+                pClip->iActualAudioBeginCut =
+                    (M4OSA_Int32)pClipSettings->uiBeginCutTime;
+            }
+        }
+    }
+
+    if( M4SYS_kVideoUnknown != pC->ewc.VideoStreamType )
+    {
+        /**
+        * Read the first Video AU of the clip */
+        err = pClip->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+            pClip->pReaderContext,
+            (M4_StreamHandler *)pClip->pVideoStream, &pClip->VideoAU);
+
+        if( M4WAR_NO_MORE_AU == err )
+        {
+            /**
+            * If we (already!) reach the end of the clip, we filter the error.
+            * It will be correctly managed at the first step. */
+            err = M4NO_ERROR;
+        }
+        else if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intOpenClip: m_pReaderDataIt->m_pFctGetNextAu() returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * The video is currently in reading mode */
+        pClip->Vstatus = M4VSS3GPP_kClipStatus_READ;
+    }
+
+    if( ( M4SYS_kAudioUnknown != pC->ewc.AudioStreamType)
+        && (M4VIDEOEDITING_kMP3 != pClipProperties->AudioStreamType) )
+    {
+        /**
+        * Read the first Audio AU of the clip */
+        err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
+
+        if( M4OSA_ERR_IS_ERROR(err) )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                err);
+            return err;
+        }
+
+        /**
+        * The audio is currently in reading mode */
+        pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intOpenClip(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR  M4VSS3GPP_intComputeOutputAverageVideoBitrate()
+ * @brief    Average bitrate of the output file, computed from input bitrates,
+ *          durations, transitions and cuts.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intComputeOutputAverageVideoBitrate(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4VSS3GPP_ClipSettings *pCS_0, *pCS_1, *pCS_2;
+    M4VSS3GPP_TransitionSettings *pT0, *pT2;
+    M4OSA_Int32 i;
+
+    M4OSA_UInt32 t0_duration, t2_duration;
+    M4OSA_UInt32 t0_bitrate, t2_bitrate;
+    M4OSA_UInt32 c1_duration;
+
+    M4OSA_UInt32 total_duration;
+    M4OSA_UInt32 total_bitsum;
+
+    total_duration = 0;
+    total_bitsum = 0;
+
+    /* Loop on the number of clips */
+    for ( i = 0; i < pC->uiClipNumber; i++ )
+    {
+        pCS_1 = &pC->pClipList[i];
+
+        t0_duration = 0;
+        t0_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
+        t2_duration = 0;
+        t2_bitrate = pCS_1->ClipProperties.uiVideoBitrate;
+
+        /* Transition with the previous clip */
+        if( i > 0 )
+        {
+            pCS_0 = &pC->pClipList[i - 1];
+            pT0 = &pC->pTransitionList[i - 1];
+
+            if( pT0->VideoTransitionType
+                != M4VSS3GPP_kVideoTransitionType_None )
+            {
+                t0_duration = pT0->uiTransitionDuration;
+
+                if( pCS_0->ClipProperties.uiVideoBitrate > t0_bitrate )
+                {
+                    t0_bitrate = pCS_0->ClipProperties.uiVideoBitrate;
+                }
+            }
+        }
+
+        /* Transition with the next clip */
+        if( i < pC->uiClipNumber - 1 )
+        {
+            pCS_2 = &pC->pClipList[i + 1];
+            pT2 = &pC->pTransitionList[i];
+
+            if( pT2->VideoTransitionType
+                != M4VSS3GPP_kVideoTransitionType_None )
+            {
+                t2_duration = pT2->uiTransitionDuration;
+
+                if( pCS_2->ClipProperties.uiVideoBitrate > t2_bitrate )
+                {
+                    t2_bitrate = pCS_2->ClipProperties.uiVideoBitrate;
+                }
+            }
+        }
+
+        /* Check for cut times */
+        if( pCS_1->uiEndCutTime > 0 )
+            c1_duration = pCS_1->uiEndCutTime;
+        else
+            c1_duration = pCS_1->ClipProperties.uiClipVideoDuration;
+
+        if( pCS_1->uiBeginCutTime > 0 )
+            c1_duration -= pCS_1->uiBeginCutTime;
+
+        c1_duration -= t0_duration + t2_duration;
+
+        /* Compute bitsum and duration */
+        total_duration += c1_duration + t0_duration / 2 + t2_duration / 2;
+
+        total_bitsum +=
+            c1_duration * (pCS_1->ClipProperties.uiVideoBitrate / 1000)
+            + (t0_bitrate / 1000) * t0_duration / 2
+            + (t2_bitrate / 1000) * t2_duration / 2;
+    }
+
+    pC->ewc.uiVideoBitrate = ( total_bitsum / total_duration) * 1000;
+}
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editRegisterExternalCodec(M4VSS3GPP_EditContext pContext,
+ *                                               M4VSS3GPP_codecType   codecType,
+ *                                               M4OSA_Context pCodecInterface,
+ *                                               M4OSA_Void* pUserData)
+ * @brief    Registers an external Video/Audio codec with VSS3GPP
+ * @note This is much different from the other external codec registration API to cope
+ *      up with specific requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @param  codecType        (IN) Type of codec (MPEG4 ...)
+ * @param  pCodecInterface  (IN) Codec interface
+ * @param  pUserData          (IN) Pointer on a user data to give to external codec
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:       VSS3GPP is not in an appropriate state for
+ *                             this function to be called
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_editRegisterExternalCodec( M4VSS3GPP_EditContext pContext,
+                                              M4VSS3GPP_codecType codecType,
+                                              M4OSA_Context pCodecInterface,
+                                              M4OSA_Void *pUserData )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( ( M4OSA_NULL == pContext) || (M4OSA_NULL == pCodecInterface) )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_editRegisterExternalCodec: NULL input parameter; pContext=0x%x,\
+            pCodecInterface=0x%x",
+            pContext, pCodecInterface);
+        return M4ERR_PARAMETER;
+    }
+
+    if( codecType >= M4VSS3GPP_kCodecType_NB )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editRegisterExternalCodec: invalid codec Type; codecType=0x%x",
+            codecType);
+        return M4ERR_PARAMETER;
+    }
+
+    pC->m_codecInterface[codecType] = pCodecInterface;
+    pC->pOMXUserData = pUserData;
+
+    M4OSA_TRACE3_2(
+        "M4VSS3GPP_editRegisterExternalCodec: pC->m_codecInterface[%d] = 0x%x",
+        codecType, pCodecInterface);
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editRegisterExternalCodec: pC->pOMXUserDatat = 0x%x",
+        pUserData);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_editSubscribeExternalCodecs(M4VSS3GPP_EditContext    pContext)
+ * @brief    Subscribes to previously registered external Video/Audio codec
+ * @note This is much different from the other external codec registration API to cope
+ *       up with specific    requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:       VSS3GPP is not in an appropriate state for
+ *                             this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_editSubscribeExternalCodecs(
+    M4VSS3GPP_EditContext pContext )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( M4OSA_NULL == pContext )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_editSubscribeExternalCodecs: NULL input parameter; pContext=0x%x",
+            pContext);
+        return M4ERR_PARAMETER;
+    }
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editSubscribeExternalCodecs: &pC->ShellAPI = 0x%x",
+        &pC->ShellAPI);
+    err = M4VSS3GPP_intSubscribeExternalCodecs(pContext,
+        (M4OSA_Context) &pC->ShellAPI);
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_editSubscribeExternalCodecs:\
+        M4VSS3GPP_intSubscribeExternalCodecs returns 0x%x",
+        err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intSubscribeExternalCodecs(M4VSS3GPP_EditContext    pContext,
+ *                                                 M4OSA_Context pShellCtxt)
+ * @brief    Subscribes to previously registered external Video/Audio codec
+ * @note This is much different from the other external codec registration API to cope
+ *       up with specific requirement of OMX codec implementation.
+ *
+ * @param  pContext           (IN) VSS3GPP context
+ * @param pShellContext    (IN) Media Codec shell context
+ * @return  M4NO_ERROR:         No error
+ * @return  M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ * @return  M4ERR_STATE:       VSS3GPP is not in an appropriate state for
+ *                             this function to be called
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intSubscribeExternalCodecs( M4VSS3GPP_EditContext pContext,
+                                               M4OSA_Context pShellCtxt )
+{
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    M4VSS3GPP_MediaAndCodecCtxt *pShellContext =
+        (M4VSS3GPP_MediaAndCodecCtxt *)pShellCtxt;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( ( M4OSA_NULL == pContext) || (M4OSA_NULL == pShellContext) )
+    {
+        M4OSA_TRACE1_2(
+            "M4VSS3GPP_intSubscribeExternalCodecs: NULL input parameter; pContext=0x%x,\
+            pShellContext=0x%x",
+            pContext, pShellContext);
+        return M4ERR_PARAMETER;
+    }
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intSubscribeExternalCodecs: pShellContext=0x%x",
+        pShellContext);
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoDecMPEG4] )
+    {
+        err = M4VSS3GPP_registerVideoDecoder(pShellContext,
+            M4DECODER_kVideoTypeMPEG4, (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecMPEG4]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoDecoder(Mpeg4) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->m_pVideoDecoderUserDataTable[M4DECODER_kVideoTypeMPEG4] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+             M4VSS3GPP_registerVideoDecoder(Mpeg4) OK: 0x%x",
+            (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecMPEG4]);
+    }
+
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoDecH264] )
+    {
+        err = M4VSS3GPP_registerVideoDecoder(pShellContext,
+            M4DECODER_kVideoTypeAVC, (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecH264]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoDecoder(AVC) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->m_pVideoDecoderUserDataTable[M4DECODER_kVideoTypeAVC] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerVideoDecoder(H264) OK: 0x%x",
+            (M4DECODER_VideoInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoDecH264]);
+    }
+
+#endif /* M4VSS_SUPPORT_VIDEO_AVC*/
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4] )
+    {
+        err = M4VSS3GPP_registerVideoEncoder(pShellContext, M4ENCODER_kMPEG4,
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoEncoder(Mpeg4) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->pVideoEncoderUserDataTable[M4ENCODER_kMPEG4] =
+            pC->pOMXUserData;
+        pShellContext->pVideoEncoderExternalAPITable[M4ENCODER_kMPEG4] =
+            pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4];
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+            M4VSS3GPP_registerVideoEncoder(Mpeg4) OK: 0x%x",
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncMPEG4]);
+    }
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoEncH263] )
+    {
+        err = M4VSS3GPP_registerVideoEncoder(pShellContext, M4ENCODER_kH263,
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH263]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoEncoder(H263) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->pVideoEncoderUserDataTable[M4ENCODER_kH263] =
+            pC->pOMXUserData;
+        pShellContext->pVideoEncoderExternalAPITable[M4ENCODER_kH263] =
+            pC->m_codecInterface[M4VSS3GPP_kVideoEncH263];
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerVideoEncoder(H263) OK: 0x%x",
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH263]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kVideoEncH264] )
+    {
+        err = M4VSS3GPP_registerVideoEncoder(pShellContext, M4ENCODER_kH264,
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH264]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerVideoEncoder(H264) returned err 0x%x",
+                err);
+            return err;
+        }
+        /** Provide the application user data back to the interface functions. **
+        **/
+        pShellContext->pVideoEncoderUserDataTable[M4ENCODER_kH264] =
+            pC->pOMXUserData;
+        pShellContext->pVideoEncoderExternalAPITable[M4ENCODER_kH264] =
+            pC->m_codecInterface[M4VSS3GPP_kVideoEncH264];
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerVideoEncoder(H264) OK: 0x%x",
+            (M4ENCODER_GlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kVideoEncH264]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_AVC */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AAC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioDecAAC] )
+    {
+        err = M4VSS3GPP_registerAudioDecoder(pShellContext, M4AD_kTypeAAC,
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAAC]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioDecoder(AAC) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioDecoderUserDataTable[M4AD_kTypeAAC] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerAudioDecoder(AAC) OK: 0x%x",
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAAC]);
+    }
+
+#endif /* M4VSS_SUPPORT_AUDEC_AAC*/
+
+#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioDecAMRNB] )
+    {
+        err = M4VSS3GPP_registerAudioDecoder(pShellContext, M4AD_kTypeAMRNB,
+            (M4AD_Interface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAMRNB]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioDecoder(AMRNB) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioDecoderUserDataTable[M4AD_kTypeAMRNB] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+            M4VSS3GPP_registerAudioDecoder(AMRNB) OK: 0x%x",
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecAMRNB]);
+    }
+
+#endif /* M4VSS_SUPPORT_AUDEC_AMRNB*/
+
+#ifdef M4VSS_SUPPORT_AUDEC_MP3
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioDecMP3] )
+    {
+        err = M4VSS3GPP_registerAudioDecoder(pShellContext, M4AD_kTypeMP3,
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecMP3]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioDecoder(MP3) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioDecoderUserDataTable[M4AD_kTypeMP3] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerAudioDecoder(MP3) OK: 0x%x",
+            (M4AD_Interface *)pC->m_codecInterface[M4VSS3GPP_kAudioDecMP3]);
+    }
+
+#endif /* M4VSS_SUPPORT_AUDEC_MP3*/
+
+#ifdef M4VSS_SUPPORT_ENCODER_AAC
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioEncAAC] )
+    {
+        err = M4VSS3GPP_registerAudioEncoder(pShellContext, M4ENCODER_kAAC,
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAAC]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioEncoder(AAC) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioEncoderUserDataTable[M4ENCODER_kAAC] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs: M4VSS3GPP_registerAudioEncoder(AAC) OK: 0x%x",
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAAC]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_AAC*/
+
+#ifdef M4VSS_SUPPORT_ENCODER_AMR
+
+    if( M4OSA_NULL != pC->m_codecInterface[M4VSS3GPP_kAudioEncAMRNB] )
+    {
+        err = M4VSS3GPP_registerAudioEncoder(pShellContext, M4ENCODER_kAMRNB,
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAMRNB]);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intSubscribeExternalCodecs:\
+                M4VSS3GPP_registerAudioEncoder(AMRNB) returned err 0x%x",
+                err);
+            return err;
+        }
+        pShellContext->pAudioEncoderUserDataTable[M4ENCODER_kAMRNB] =
+            pC->pOMXUserData;
+        M4OSA_TRACE3_1(
+            "M4VSS3GPP_intSubscribeExternalCodecs:\
+            M4VSS3GPP_registerAudioEncoder(AMRNB) OK: 0x%x",
+            (M4ENCODER_AudioGlobalInterface
+            *)pC->m_codecInterface[M4VSS3GPP_kAudioEncAMRNB]);
+    }
+
+#endif /* M4VSS_SUPPORT_ENCODER_AMR*/
+
+    if( M4OSA_NULL != pC->pOMXUserData )
+    {
+        /* If external OMX codecs are already registered with VSS3GPP internal context
+        * and are being subscribed by application, *
+        * then set boolean to prevent unregistration of external codec interfaces. *
+        * This is to prevent external codec interfaces from being reset
+          during VSS3GPP step function. *
+        * External OMX codecs are registered only once by application.
+          So pointers should remain valid*
+        * throughout life cycle of the application */
+
+        pShellContext->bAllowFreeingOMXCodecInterface = M4OSA_FALSE;
+    }
+
+    return M4NO_ERROR;
+}
+#endif /* M4VSS_SUPPORT_OMX_CODECS */
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c b/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
new file mode 100755
index 0000000..b118244
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_EditAudio.c
@@ -0,0 +1,2020 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_EditAudio.c
+ * @brief    Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our header */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h"  /**< OSAL debug management */
+
+#define PWR_FXP_FRACT_MAX            (32768)
+
+/************************************************************************/
+/* Static local functions                                               */
+/************************************************************************/
+static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
+                                             *pC );
+static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
+                                                 *pC, M4OSA_UInt8 uiClipNumber );
+static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
+                                               *pC, M4OSA_UInt8 uiClip1orClip2,
+                                               M4OSA_Int16 *pPCMdata,
+                                               M4OSA_UInt32 uiPCMsize );
+static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
+                                              *pC, M4OSA_Int16 *pPCMdata1,
+                                              M4OSA_Int16 *pPCMdata2,
+                                              M4OSA_UInt32 uiPCMsize );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditJumpMP3()
+ * @brief    One step of jumping processing for the MP3 clip.
+ * @note    On one step, the jump of several AU is done
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditJumpMP3( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
+    M4OSA_Int32 JumpCts;
+
+    JumpCts = pClip->iActualAudioBeginCut;
+
+    err = M4VSS3GPP_intClipJumpAudioAt(pClip, &JumpCts);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intOpenClip: M4VSS3GPP_intClipJumpAudioAt(A) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    if( JumpCts >= pClip->iActualAudioBeginCut )
+    {
+        pC->State = M4VSS3GPP_kEditState_MP3;
+
+        /**
+        * Update clip offset with the audio begin cut */
+        pClip->iAoffset = -JumpCts;
+
+        /**
+        * The audio is currently in reading mode */
+        pClip->Astatus = M4VSS3GPP_kClipStatus_READ;
+    }
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepMP3()
+ * @brief    One step of audio processing for the MP3 clip
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepMP3( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4VSS3GPP_ClipContext *pClip = pC->pC1; /**< shortcut */
+
+    /**
+    * Copy the input AU to the output AU */
+    err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+        pClip->pAudioFramePtr, (M4OSA_UInt32)pClip->uiAudioFrameSize);
+
+    /**
+    * Read the next audio frame */
+    err = M4VSS3GPP_intClipReadNextAudioFrame(pClip);
+
+    if( M4OSA_ERR_IS_ERROR(err) )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
+            M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",    err);
+        return err;
+    }
+    else
+    {
+        /**
+        * Update current time (to=tc+T) */
+        pC->ewc.dATo =
+            ( pClip->iAudioFrameCts + pClip->iAoffset) / pClip->scale_audio;
+
+        if( (M4OSA_Int32)(pClip->iAudioFrameCts / pClip->scale_audio + 0.5)
+            >= pClip->iEndTime )
+        {
+            M4READER_Buffer mp3tagBuffer;
+
+            /**
+            * The duration is better respected if the first AU and last AU are both above
+            the cut time */
+            err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+                pClip->pAudioFramePtr,
+                (M4OSA_UInt32)pClip->uiAudioFrameSize);
+
+            /* The ID3v1 tag is always at the end of the mp3 file so the end of the cutting
+            process is waited */
+            /* before writing the metadata in the output file*/
+
+            /* Retrieve the data of the ID3v1 Tag */
+            err = pClip->ShellAPI.m_pReader->m_pFctGetOption(
+                pClip->pReaderContext, M4READER_kOptionID_Mp3Id3v1Tag,
+                (M4OSA_DataOption) &mp3tagBuffer);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intEditStepMP3: M4MP3R_getOption returns 0x%x",
+                    err);
+                return err;
+            }
+
+            /* Write the data of the ID3v1 Tag in the output file */
+            if( 0 != mp3tagBuffer.m_uiBufferSize )
+            {
+                err = pC->pOsaFileWritPtr->writeData(pC->ewc.p3gpWriterContext,
+                    (M4OSA_MemAddr8)mp3tagBuffer.m_pData, mp3tagBuffer.m_uiBufferSize);
+                /**
+                * Free before the error checking anyway */
+                M4OSA_free((M4OSA_MemAddr32)mp3tagBuffer.m_pData);
+
+                /**
+                * Error checking */
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepMP3:\
+                        pOsaFileWritPtr->writeData(ID3v1Tag) returns 0x%x",    err);
+                    return err;
+                }
+
+                mp3tagBuffer.m_uiBufferSize = 0;
+                mp3tagBuffer.m_pData = M4OSA_NULL;
+            }
+
+            /* The End Cut has been reached */
+            err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intEditStepMP3 : M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+
+        if( ( M4WAR_NO_MORE_AU == err) && (M4OSA_FALSE
+            == pC->bSupportSilence) ) /**< Reached end of clip */
+        {
+            err = M4VSS3GPP_intReachedEndOfAudio(
+                pC); /**< Clip done, do the next one */
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intEditStepMP3: READ_WRITE:\
+                    M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                    err);
+                return err;
+            }
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepMP3: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepAudio()
+ * @brief    One step of audio processing
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepAudio( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+
+    M4ENCODER_AudioBuffer pEncInBuffer;  /**< Encoder input buffer for api */
+    M4ENCODER_AudioBuffer pEncOutBuffer; /**< Encoder output buffer for api */
+    M4OSA_Time
+        frameTimeDelta; /**< Duration of the encoded (then written) data */
+    M4OSA_Bool bStopAudio;
+
+    /**
+    * Check if we reached end cut */
+    if( ( pC->ewc.dATo - pC->pC1->iAoffset / pC->pC1->scale_audio + 0.5)
+        >= pC->pC1->iEndTime )
+    {
+        /**
+        * Audio is done for this clip */
+        err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+        /* RC: to know when a file has been processed */
+        if( M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                err);
+        }
+
+        return err;
+    }
+
+    /**
+    * Check Audio Mode, depending on the current output CTS */
+    err = M4VSS3GPP_intCheckAudioMode(
+        pC); /**< This function change the pC->Astate variable! */
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intCheckAudioMode returns 0x%x!",
+            err);
+        return err;
+    }
+
+    M4OSA_TRACE2_3("  AUDIO step : dATo = %f  state = %d  offset = %ld",
+        pC->ewc.dATo, pC->Astate, pC->pC1->iAoffset);
+
+    bStopAudio = M4OSA_FALSE;
+
+    switch( pC->Astate )
+    {
+            /* _________________ */
+            /*|                 |*/
+            /*| READ_WRITE MODE |*/
+            /*|_________________|*/
+
+        case M4VSS3GPP_kEditAudioState_READ_WRITE:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio READ_WRITE");
+
+                /**
+                * Get the output AU to write into */
+                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio:\
+                        READ_WRITE: pWriterDataFcts->pStartAU returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Compute output audio CTS */
+                pC->ewc.WriterAudioAU.CTS =
+                    pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
+
+                /**
+                * BZZZ bug fix (read-write case):
+                * Replace the first AMR AU of the stream with a silence AU.
+                * It removes annoying "BZZZ" audio glitch.
+                * It is not needed if there is a begin cut.
+                * It is not needed for the first clip.
+                * Because of another bugfix (2005-03-24), the first AU written may be
+                * the second one which CTS is 20. Hence the cts<21 test.
+                * (the BZZZ effect occurs even with the second AU!) */
+                if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
+                    && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
+                    < (pC->ewc.iSilenceFrameDuration + 1)) )
+                {
+                    /**
+                    * Copy a silence AU to the output */
+                    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+                        (M4OSA_MemAddr8)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+                    M4OSA_TRACE2_0("A #### silence AU");
+                }
+                else if( (M4OSA_UInt32)pC->pC1->uiAudioFrameSize
+                    < pC->ewc.uiAudioMaxAuSize )
+                {
+                    /**
+                    * Copy the input AU to the output AU */
+                    pC->ewc.WriterAudioAU.size =
+                        (M4OSA_UInt32)pC->pC1->uiAudioFrameSize;
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+                        pC->pC1->pAudioFramePtr, pC->ewc.WriterAudioAU.size);
+                }
+                else
+                {
+                    M4OSA_TRACE1_2(
+                        "M4VSS3GPP_intEditStepAudio: READ_WRITE: AU size greater than MaxAuSize \
+                        (%d>%d)! returning M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE",
+                        pC->pC1->uiAudioFrameSize, pC->ewc.uiAudioMaxAuSize);
+                    return M4VSS3GPP_ERR_INPUT_AUDIO_AU_TOO_LARGE;
+                }
+
+                /**
+                * This boolean is only used to fix the BZZ bug... */
+                pC->pC1->bFirstAuWritten = M4OSA_TRUE;
+
+                M4OSA_TRACE2_2("B ---- write : cts  = %ld [ 0x%x ]",
+                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                    pC->ewc.WriterAudioAU.size);
+
+                /**
+                * Write the AU */
+                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    /*11/12/2008 CR 3283 MMS use case for VideoArtist
+                    the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
+                    size is reached
+                    The editing is then finished,
+                     the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
+                    if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio:\
+                            READ_WRITE: pWriterDataFcts->pProcessAU returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Audio is now in read mode (there may be a "if(status!=READ)" here,
+                but it is removed for optimization) */
+                pC->pC1->Astatus = M4VSS3GPP_kClipStatus_READ;
+
+                /**
+                * Read the next audio frame */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                    pC->pC1->iAoffset / pC->pC1->scale_audio,
+                    pC->pC1->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: READ_WRITE:\
+                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+                else
+                {
+                    /**
+                    * Update current time (to=tc+T) */
+                    pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+                        / pC->pC1->scale_audio;
+
+                    if( ( M4WAR_NO_MORE_AU == err)
+                        && (M4OSA_FALSE == pC->bSupportSilence) )
+                    {
+                        /**
+                        * If output is other than AMR or AAC
+                        (i.e. EVRC,we can't write silence into it)
+                        * So we simply end here.*/
+                        bStopAudio = M4OSA_TRUE;
+                    }
+                }
+            }
+            break;
+
+            /* ____________________ */
+            /*|                    |*/
+            /*| DECODE_ENCODE MODE |*/
+            /*|____________________|*/
+
+        case M4VSS3GPP_kEditAudioState_DECODE_ENCODE:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio DECODE_ENCODE");
+
+                /**
+                * Get the output AU to write into */
+                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                        pWriterDataFcts->pStartAU returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * If we were reading the clip, we must jump a few AU backward to decode/encode
+                (without writing result) from that point. */
+                if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
+                {
+                    M4OSA_Int32 iTargetCts, iCurrentCts;
+
+                    if( 0
+                        != pC->pC1->
+                        iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning. */
+                    {
+                        /**
+                        * Jump a few AUs backward */
+                        iCurrentCts = pC->pC1->iAudioFrameCts;
+                        iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+                            * pC->ewc.iSilenceFrameDuration;
+
+                        if( iTargetCts < 0 )
+                        {
+                            iTargetCts = 0; /**< Sanity check */
+                        }
+
+                        err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+                                M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        err = M4VSS3GPP_intClipReadNextAudioFrame(
+                            pC->pC1); /**< read AU where we jumped */
+
+                        M4OSA_TRACE2_3("D .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                            pC->pC1->iAoffset / pC->pC1->scale_audio,
+                            pC->pC1->uiAudioFrameSize);
+
+                        if( M4OSA_ERR_IS_ERROR(err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+                                M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * Decode/encode up to the wanted position */
+                        while( pC->pC1->iAudioFrameCts < iCurrentCts )
+                        {
+                            err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch: \
+                                    M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+
+                            /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                            pEncInBuffer.pTableBuffer[0] =
+                                pC->pC1->AudioDecBufferOut.m_dataAddress;
+                            pEncInBuffer.pTableBufferSize[0] =
+                                pC->pC1->AudioDecBufferOut.m_bufferSize;
+                            pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                            pEncInBuffer.pTableBufferSize[1] = 0;
+
+                            /* Time in ms from data size, because it is PCM16 samples */
+                            frameTimeDelta =
+                                pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                                / pC->ewc.uiNbChannels;
+
+                            /**
+                            * Prepare output buffer */
+                            pEncOutBuffer.pTableBuffer[0] =
+                                (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                            pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                            M4OSA_TRACE2_0("E **** pre-encode");
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                            /*OMX Audio decoder used.
+                            * OMX Audio dec shell does internal buffering and hence does not return
+                            a PCM buffer for every decodeStep call.*
+                            * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                            if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                            {
+#endif
+                                /**
+                                * Encode the PCM audio */
+
+                                err =
+                                    pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                                    pC->ewc.pAudioEncCtxt,
+                                    &pEncInBuffer, &pEncOutBuffer);
+
+                                if( ( M4NO_ERROR != err)
+                                    && (M4WAR_NO_MORE_AU != err) )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intEditStepAudio():\
+                                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                                        err);
+                                    return err;
+                                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                            } //if(0 != pEncInBuffer.pTableBufferSize[0])
+
+#endif
+
+                            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                            M4OSA_TRACE2_3(
+                                "F .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                                pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                                pC->pC1->iAoffset / pC->pC1->scale_audio,
+                                pC->pC1->uiAudioFrameSize);
+
+                            if( M4OSA_ERR_IS_ERROR(err) )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE-prefetch:\
+                                    M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+                        }
+                    }
+
+                    /**
+                    * Audio is now OK for decoding */
+                    pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
+                }
+
+                /**
+                * Decode the input audio */
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Apply the effect */
+                if( pC->iClip1ActiveEffect >= 0 )
+                {
+                    err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
+                        *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+                        pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                            M4VSS3GPP_intEndAudioEffect returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Compute output audio CTS */
+                pC->ewc.WriterAudioAU.CTS =
+                    pC->pC1->iAudioFrameCts + pC->pC1->iAoffset;
+
+                /* May happen with corrupted input files (which have stts entries not
+                multiple of SilenceFrameDuration) */
+                if( pC->ewc.WriterAudioAU.CTS < 0 )
+                {
+                    pC->ewc.WriterAudioAU.CTS = 0;
+                }
+
+                /**
+                * BZZZ bug fix (decode-encode case):
+                * (Yes, the Bzz bug may also occur when we re-encode. It doesn't
+                *  occur at the decode before the encode, but at the playback!)
+                * Replace the first AMR AU of the encoded stream with a silence AU.
+                * It removes annoying "BZZZ" audio glitch.
+                * It is not needed if there is a begin cut.
+                * It is not needed for the first clip.
+                * Because of another bugfix (2005-03-24), the first AU written may be
+                * the second one which CTS is 20. Hence the cts<21 test.
+                * (the BZZZ effect occurs even with the second AU!) */
+                if( ( M4OSA_FALSE == pC->pC1->bFirstAuWritten)
+                    && (0 != pC->uiCurrentClip) && (pC->pC1->iAudioFrameCts
+                    < (pC->ewc.iSilenceFrameDuration + 1)) )
+                {
+                    /**
+                    * Copy a silence AMR AU to the output */
+                    pC->ewc.WriterAudioAU.size = pC->ewc.uiSilenceFrameSize;
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress,
+                        (M4OSA_MemAddr8)pC->ewc.pSilenceFrameData, pC->ewc.uiSilenceFrameSize);
+                    M4OSA_TRACE2_0("G #### silence AU");
+                }
+                else
+                {
+                    /**
+                    * Encode the filtered PCM audio directly into the output AU */
+
+                    /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                    pEncInBuffer.pTableBuffer[0] =
+                        pC->pC1->AudioDecBufferOut.m_dataAddress;
+                    pEncInBuffer.pTableBufferSize[0] =
+                        pC->pC1->AudioDecBufferOut.m_bufferSize;
+                    pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                    pEncInBuffer.pTableBufferSize[1] = 0;
+
+                    /* Time in ms from data size, because it is PCM16 samples */
+                    frameTimeDelta =
+                        pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                        / pC->ewc.uiNbChannels;
+
+                    /**
+                    * Prepare output buffer */
+                    pEncOutBuffer.pTableBuffer[0] =
+                        (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                    pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                    M4OSA_TRACE2_0("H ++++ encode AU");
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                    /*OMX Audio decoder used.
+                    * OMX Audio dec shell does internal buffering and hence does not return
+                    a PCM buffer for every decodeStep call.*
+                    * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                    if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                    {
+
+#endif
+
+                        /**
+                        * Encode the PCM audio */
+
+                        err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                            pC->ewc.pAudioEncCtxt,
+                            &pEncInBuffer, &pEncOutBuffer);
+
+                        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio():\
+                                pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                                err);
+                            return err;
+                        }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    }
+
+#endif
+
+                    /**
+                    * Set AU size */
+
+                    pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+                        0]; /**< Get the size of encoded data */
+                }
+
+                /**
+                * This boolean is only used to fix the BZZ bug... */
+                pC->pC1->bFirstAuWritten = M4OSA_TRUE;
+
+                M4OSA_TRACE2_2("I ---- write : cts  = %ld [ 0x%x ]",
+                    (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                    pC->ewc.WriterAudioAU.size);
+
+                /**
+                * Write the AU */
+                err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    /*11/12/2008 CR 3283 MMS use case for VideoArtist
+                    the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output file
+                     size is reached
+                    The editing is then finished,
+                     the warning M4VSS3GPP_WAR_EDITING_DONE is returned*/
+                    if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                            pWriterDataFcts->pProcessAU returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Read the next audio frame */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                M4OSA_TRACE2_3("J .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                    pC->pC1->iAoffset / pC->pC1->scale_audio,
+                    pC->pC1->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: DECODE_ENCODE:\
+                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+                else
+                {
+                    /**
+                    * Update current time (to=tc+T) */
+                    pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+                        / pC->pC1->scale_audio;
+
+                    if( ( M4WAR_NO_MORE_AU == err)
+                        && (M4OSA_FALSE == pC->bSupportSilence) )
+                    {
+                        /**
+                        * If output is other than AMR or AAC
+                        (i.e. EVRC,we can't write silence into it)
+                        * So we simply end here.*/
+                        bStopAudio = M4OSA_TRUE;
+                    }
+                }
+            }
+            break;
+
+            /* _________________ */
+            /*|                 |*/
+            /*| TRANSITION MODE |*/
+            /*|_________________|*/
+
+        case M4VSS3GPP_kEditAudioState_TRANSITION:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio TRANSITION");
+
+                /**
+                * Get the output AU to write into */
+                err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                    pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                    &pC->ewc.WriterAudioAU);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        pWriterDataFcts->pStartAU returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * If we were reading the clip, we must jump a few AU backward to decode/encode
+                (without writing result) from that point. */
+                if( M4VSS3GPP_kClipStatus_READ == pC->pC1->Astatus )
+                {
+                    M4OSA_Int32 iTargetCts, iCurrentCts;
+
+                    if( 0
+                        != pC->pC1->
+                        iAudioFrameCts ) /**<don't try to pre-decode if clip is at its beginning.*/
+                    {
+                        /**
+                        * Jump a few AUs backward */
+                        iCurrentCts = pC->pC1->iAudioFrameCts;
+                        iTargetCts = iCurrentCts - M4VSS3GPP_NB_AU_PREFETCH
+                            * pC->ewc.iSilenceFrameDuration;
+
+                        if( iTargetCts < 0 )
+                        {
+                            iTargetCts = 0; /**< Sanity check */
+                        }
+
+                        err = M4VSS3GPP_intClipJumpAudioAt(pC->pC1, &iTargetCts);
+
+                        if( M4NO_ERROR != err )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                M4VSS3GPP_intClipJumpAudioAt returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        err = M4VSS3GPP_intClipReadNextAudioFrame(
+                            pC->pC1); /**< read AU where we jumped */
+
+                        M4OSA_TRACE2_3("K .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                            pC->pC1->iAoffset / pC->pC1->scale_audio,
+                            pC->pC1->uiAudioFrameSize);
+
+                        if( M4OSA_ERR_IS_ERROR(err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                M4VSS3GPP_intClipReadNextAudioFrame(a) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        /**
+                        * Decode/encode up to the wanted position */
+                        while( pC->pC1->iAudioFrameCts < iCurrentCts )
+                        {
+                            err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                            if( M4NO_ERROR != err )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                    M4VSS3GPP_intClipDecodeCurrentAudioFrame returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+
+                            /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                            pEncInBuffer.pTableBuffer[0] =
+                                pC->pC1->AudioDecBufferOut.m_dataAddress;
+                            pEncInBuffer.pTableBufferSize[0] =
+                                pC->pC1->AudioDecBufferOut.m_bufferSize;
+                            pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                            pEncInBuffer.pTableBufferSize[1] = 0;
+
+                            /* Time in ms from data size, because it is PCM16 samples */
+                            frameTimeDelta =
+                                pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                                / pC->ewc.uiNbChannels;
+
+                            /**
+                            * Prepare output buffer */
+                            pEncOutBuffer.pTableBuffer[0] =
+                                (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                            pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                            M4OSA_TRACE2_0("L **** pre-encode");
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                            /*OMX Audio decoder used.
+                            * OMX Audio dec shell does internal buffering and hence does not return
+                            a PCM buffer for every decodeStep call.*
+                            * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                            if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                            {
+
+#endif
+                                /**
+                                * Encode the PCM audio */
+
+                                err =
+                                    pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                                    pC->ewc.pAudioEncCtxt,
+                                    &pEncInBuffer, &pEncOutBuffer);
+
+                                if( ( M4NO_ERROR != err)
+                                    && (M4WAR_NO_MORE_AU != err) )
+                                {
+                                    M4OSA_TRACE1_1(
+                                        "M4VSS3GPP_intEditStepAudio():\
+                                        pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                                        err);
+                                    return err;
+                                }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                            }
+
+#endif
+
+                            err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                            M4OSA_TRACE2_3(
+                                "M .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                                pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                                pC->pC1->iAoffset / pC->pC1->scale_audio,
+                                pC->pC1->uiAudioFrameSize);
+
+                            if( M4OSA_ERR_IS_ERROR(err) )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4VSS3GPP_intEditStepAudio: TRANSITION-prefetch:\
+                                    M4VSS3GPP_intClipReadNextAudioFrame(b) returns 0x%x!",
+                                    err);
+                                return err;
+                            }
+                        }
+                    }
+
+                    /**
+                    * Audio is now OK for decoding */
+                    pC->pC1->Astatus = M4VSS3GPP_kClipStatus_DECODE;
+                }
+
+                /**
+                * Decode the first input audio */
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC1);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame(C1) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Decode the second input audio */
+                err = M4VSS3GPP_intClipDecodeCurrentAudioFrame(pC->pC2);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        M4VSS3GPP_intClipDecodeCurrentAudioFrame(C2) returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /**
+                * Check both clips decoded the same amount of PCM samples */
+                if( pC->pC1->AudioDecBufferOut.m_bufferSize
+                    != pC->pC2->AudioDecBufferOut.m_bufferSize )
+                {
+                    M4OSA_TRACE1_2(
+                        "ERR : AudioTransition: both clips AU must have the same decoded\
+                        PCM size! pc1 size=0x%x, pC2 size = 0x%x",
+                        pC->pC1->AudioDecBufferOut.m_bufferSize,
+                        pC->pC2->AudioDecBufferOut.m_bufferSize);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                    /*OMX Audio decoder used.
+                    * OMX Audio dec shell does internal buffering and hence does not return
+                    a PCM buffer for every decodeStep call.*
+                    * So PCM buffer sizes might be 0 or different for clip1 and clip2.
+                    * So no need to return error in this case */
+
+                    M4OSA_TRACE1_2(
+                        "M4VSS3GPP_intEditStepAudio: , pc1 AudBuff size=0x%x,\
+                         pC2 AudBuff size = 0x%x",
+                        pC->pC1->AudioDecBufferOut.m_bufferSize,
+                        pC->pC2->AudioDecBufferOut.m_bufferSize);
+
+#else
+
+                    return M4VSS3GPP_ERR_AUDIO_DECODED_PCM_SIZE_ISSUE;
+
+#endif // M4VSS_SUPPORT_OMX_CODECS
+
+                }
+
+                /**
+                * Apply the audio effect on clip1 */
+                if( pC->iClip1ActiveEffect >= 0 )
+                {
+                    err = M4VSS3GPP_intApplyAudioEffect(pC, 1, (M4OSA_Int16
+                        *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+                        pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                            M4VSS3GPP_intApplyAudioEffect(C1) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Apply the audio effect on clip2 */
+                if( pC->iClip2ActiveEffect >= 0 )
+                {
+                    err = M4VSS3GPP_intApplyAudioEffect(pC, 2, (M4OSA_Int16
+                        *)pC->pC2->AudioDecBufferOut.m_dataAddress,
+                        pC->pC2->AudioDecBufferOut.m_bufferSize);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                            M4VSS3GPP_intApplyAudioEffect(C2) returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Apply the transition effect */
+                err = M4VSS3GPP_intAudioTransition(pC,
+                    (M4OSA_Int16 *)pC->pC1->AudioDecBufferOut.m_dataAddress,
+                    (M4OSA_Int16 *)pC->pC2->AudioDecBufferOut.m_dataAddress,
+                    pC->pC1->AudioDecBufferOut.m_bufferSize);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                        M4VSS3GPP_intAudioTransition returns 0x%x",
+                        err);
+                    return err;
+                }
+
+                /* [Mono] or [Stereo interleaved] : all is in one buffer */
+                pEncInBuffer.pTableBuffer[0] =
+                    pC->pC1->AudioDecBufferOut.m_dataAddress;
+                pEncInBuffer.pTableBufferSize[0] =
+                    pC->pC1->AudioDecBufferOut.m_bufferSize;
+                pEncInBuffer.pTableBuffer[1] = M4OSA_NULL;
+                pEncInBuffer.pTableBufferSize[1] = 0;
+
+                /* Time in ms from data size, because it is PCM16 samples */
+                frameTimeDelta = pEncInBuffer.pTableBufferSize[0] / sizeof(short)
+                    / pC->ewc.uiNbChannels;
+
+                /**
+                * Prepare output buffer */
+                pEncOutBuffer.pTableBuffer[0] =
+                    (M4OSA_MemAddr8)pC->ewc.WriterAudioAU.dataAddress;
+                pEncOutBuffer.pTableBufferSize[0] = 0;
+
+                M4OSA_TRACE2_0("N **** blend AUs");
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+                /*OMX Audio decoder used.
+                * OMX Audio dec shell does internal buffering and hence does not return
+                a PCM buffer for every decodeStep call.*
+                * So PCM buffer sizes might be 0. In this case donot call encode Step*/
+
+                if( 0 != pEncInBuffer.pTableBufferSize[0] )
+                {
+
+#endif
+
+                    /**
+                    * Encode the PCM audio */
+
+                    err = pC->ShellAPI.pAudioEncoderGlobalFcts->pFctStep(
+                        pC->ewc.pAudioEncCtxt, &pEncInBuffer, &pEncOutBuffer);
+
+                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio():\
+                            pAudioEncoderGlobalFcts->pFctStep returns 0x%x",
+                            err);
+                        return err;
+                    }
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                }
+
+#endif
+
+                /**
+                * Set AU cts and size */
+
+                pC->ewc.WriterAudioAU.size = pEncOutBuffer.pTableBufferSize[
+                    0]; /**< Get the size of encoded data */
+                    pC->ewc.WriterAudioAU.CTS += frameTimeDelta;
+
+                    M4OSA_TRACE2_2("O ---- write : cts  = %ld [ 0x%x ]",
+                        (M4OSA_Int32)(pC->ewc.WriterAudioAU.CTS / pC->ewc.scale_audio),
+                        pC->ewc.WriterAudioAU.size);
+
+                    /**
+                    * Write the AU */
+                    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                        pC->ewc.p3gpWriterContext, M4VSS3GPP_WRITER_AUDIO_STREAM_ID,
+                        &pC->ewc.WriterAudioAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        /*11/12/2008 CR 3283 MMS use case for VideoArtist
+                        the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                         file size is reached
+                        The editing is then finished,the warning M4VSS3GPP_WAR_EDITING_DONE
+                        is returned*/
+                        if( M4WAR_WRITER_STOP_REQ == err )
+                        {
+                            M4OSA_TRACE1_0(
+                                "M4VSS3GPP_intEditStepAudio: File was cut to avoid oversize");
+                            return M4VSS3GPP_WAR_EDITING_DONE;
+                        }
+                        else
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                                pWriterDataFcts->pProcessAU returns 0x%x!",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Read the next audio frame */
+                    err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC1);
+
+                    M4OSA_TRACE2_3("P .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                        pC->pC1->iAudioFrameCts / pC->pC1->scale_audio,
+                        pC->pC1->iAoffset / pC->pC1->scale_audio,
+                        pC->pC1->uiAudioFrameSize);
+
+                    if( M4OSA_ERR_IS_ERROR(err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                            M4VSS3GPP_intClipReadNextAudioFrame(C1) returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                    else
+                    {
+                        M4OSA_ERR secondaryError;
+
+                        /**
+                        * Update current time (to=tc+T) */
+                        pC->ewc.dATo = ( pC->pC1->iAudioFrameCts + pC->pC1->iAoffset)
+                            / pC->pC1->scale_audio;
+
+                        /**
+                        * Read the next audio frame in the second clip */
+                        secondaryError = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
+
+                        M4OSA_TRACE2_3("Q .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                            pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
+                            pC->pC2->iAoffset / pC->pC2->scale_audio,
+                            pC->pC2->uiAudioFrameSize);
+
+                        if( M4OSA_ERR_IS_ERROR(secondaryError) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepAudio: TRANSITION:\
+                                M4VSS3GPP_intClipReadNextAudioFrame(C2) returns 0x%x!",
+                                secondaryError);
+                            return err;
+                        }
+
+                        if( ( ( M4WAR_NO_MORE_AU == err)
+                            || (M4WAR_NO_MORE_AU == secondaryError))
+                            && (M4OSA_FALSE == pC->bSupportSilence) )
+                        {
+                            /**
+                            * If output is other than AMR or AAC
+                              (i.e. EVRC,we can't write silence into it)
+                            * So we simply end here.*/
+                            bStopAudio = M4OSA_TRUE;
+                        }
+                    }
+            }
+            break;
+
+            /* ____________ */
+            /*|            |*/
+            /*| ERROR CASE |*/
+            /*|____________|*/
+
+        default:
+
+            M4OSA_TRACE3_1(
+                "M4VSS3GPP_intEditStepAudio: invalid internal state (0x%x), \
+                returning M4VSS3GPP_ERR_INTERNAL_STATE",
+                pC->Astate);
+            return M4VSS3GPP_ERR_INTERNAL_STATE;
+    }
+
+    /**
+    * Check if we are forced to stop audio */
+    if( M4OSA_TRUE == bStopAudio )
+    {
+        /**
+        * Audio is done for this clip */
+        err = M4VSS3GPP_intReachedEndOfAudio(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepAudio: M4VSS3GPP_intReachedEndOfAudio returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepAudio: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckAudioMode()
+ * @brief    Check which audio process mode we must use, depending on the output CTS.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intCheckAudioMode( M4VSS3GPP_InternalEditContext
+                                             *pC )
+{
+    M4OSA_ERR err;
+    const M4OSA_Int32 TD = pC->pTransitionList[pC->
+        uiCurrentClip].uiTransitionDuration; /**< Transition duration */
+
+    const M4VSS3GPP_EditAudioState previousAstate = pC->Astate;
+
+    /**
+    * Check if Clip1 is on its begin cut, or in its begin effect or end effect zone */
+    M4VSS3GPP_intCheckAudioEffects(pC, 1);
+
+    /**
+    * Check if we are in the transition with next clip */
+    if( ( TD > 0) && ((M4OSA_Int32)(pC->ewc.dATo - pC->pC1->iAoffset
+        / pC->pC1->scale_audio + 0.5) >= (pC->pC1->iEndTime - TD)) )
+    {
+        /**
+        * We are in a transition */
+        pC->Astate = M4VSS3GPP_kEditAudioState_TRANSITION;
+        pC->bTransitionEffect = M4OSA_TRUE;
+
+        /**
+        * Do we enter the transition section ? */
+        if( M4VSS3GPP_kEditAudioState_TRANSITION != previousAstate )
+        {
+            /**
+            * Open second clip for transition, if not yet opened */
+            if( M4OSA_NULL == pC->pC2 )
+            {
+                err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
+                    &pC->pClipList[pC->uiCurrentClip + 1]);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intOpenClip() returns 0x%x!",
+                        err);
+                    return err;
+                }
+
+                /**
+                * In case of short transition and bad luck (...), there may be no video AU
+                * in the transition. In that case, the second clip has not been opened.
+                * So we must update the video offset here. */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                /**< Add current video output CTS to the clip offset */
+                pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+            }
+
+            /**
+            * Add current audio output CTS to the clip offset
+            * (video offset has already been set when doing the video transition) */
+            pC->pC2->iAoffset +=
+                (M4OSA_UInt32)(pC->ewc.dATo * pC->ewc.scale_audio + 0.5);
+
+            /**
+            * 2005-03-24: BugFix for audio-video synchro:
+            * There may be a portion of the duration of an audio AU of desynchro at each assembly.
+            * It leads to an audible desynchro when there are a lot of clips assembled.
+            * This bug fix allows to resynch the audio track when the delta is higher
+            * than one audio AU duration.
+            * We Step one AU in the second clip and we change the audio offset accordingly. */
+            if( ( pC->pC2->iAoffset
+                - (M4OSA_Int32)(pC->pC2->iVoffset *pC->pC2->scale_audio + 0.5))
+                    > pC->ewc.iSilenceFrameDuration )
+            {
+                /**
+                * Advance one AMR frame */
+                err = M4VSS3GPP_intClipReadNextAudioFrame(pC->pC2);
+
+                M4OSA_TRACE2_3("Z .... read  : cts  = %.0f + %.0f [ 0x%x ]",
+                    pC->pC2->iAudioFrameCts / pC->pC2->scale_audio,
+                    pC->pC2->iAoffset / pC->pC2->scale_audio,
+                    pC->pC2->uiAudioFrameSize);
+
+                if( M4OSA_ERR_IS_ERROR(err) )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCheckAudioMode:\
+                        M4VSS3GPP_intClipReadNextAudioFrame returns 0x%x!",
+                        err);
+                    return err;
+                }
+                /**
+                * Update audio offset accordingly*/
+                pC->pC2->iAoffset -= pC->ewc.iSilenceFrameDuration;
+            }
+        }
+
+        /**
+        * Check begin and end effects for clip2 */
+        M4VSS3GPP_intCheckAudioEffects(pC, 2);
+    }
+    else
+    {
+        /**
+        * We are not in a transition */
+        pC->bTransitionEffect = M4OSA_FALSE;
+
+        /**
+        * Check if current mode is Read/Write or Decode/Encode */
+        if( pC->iClip1ActiveEffect >= 0 )
+        {
+            pC->Astate = M4VSS3GPP_kEditAudioState_DECODE_ENCODE;
+        }
+        else
+        {
+            pC->Astate = M4VSS3GPP_kEditAudioState_READ_WRITE;
+        }
+    }
+
+    /**
+    * Check if we create/destroy an encoder */
+    if( ( M4VSS3GPP_kEditAudioState_READ_WRITE == previousAstate)
+        && /**< read mode */
+        (M4VSS3GPP_kEditAudioState_READ_WRITE != pC->Astate) ) /**< encode mode */
+    {
+        M4OSA_UInt32 uiAudioBitrate;
+
+        /* Compute max bitrate depending on input files bitrates and transitions */
+        if( pC->Astate == M4VSS3GPP_kEditAudioState_TRANSITION )
+        {
+            /* Max of the two blended files */
+            if( pC->pC1->pSettings->ClipProperties.uiAudioBitrate
+                > pC->pC2->pSettings->ClipProperties.uiAudioBitrate )
+                uiAudioBitrate =
+                pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
+            else
+                uiAudioBitrate =
+                pC->pC2->pSettings->ClipProperties.uiAudioBitrate;
+        }
+        else
+        {
+            /* Same as input file */
+            uiAudioBitrate = pC->pC1->pSettings->ClipProperties.uiAudioBitrate;
+        }
+
+        /**
+        * Create the encoder */
+        err = M4VSS3GPP_intCreateAudioEncoder(&pC->ewc, &pC->ShellAPI,
+            uiAudioBitrate);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckAudioMode: M4VSS3GPP_intResetAudioEncoder() returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCheckAudioMode(): returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intCheckAudioEffects()
+ * @brief    Check which audio effect must be applied at the current time
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intCheckAudioEffects( M4VSS3GPP_InternalEditContext
+                                                 *pC, M4OSA_UInt8 uiClipNumber )
+{
+    M4OSA_UInt8 uiClipIndex;
+    M4OSA_UInt8 uiFxIndex;
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4OSA_Int32 BC, EC;
+    M4OSA_Int8 *piClipActiveEffect;
+    M4OSA_Int32 t;
+
+    if( 1 == uiClipNumber )
+    {
+        uiClipIndex = pC->uiCurrentClip;
+        pClip = pC->pC1;
+        piClipActiveEffect = &(pC->iClip1ActiveEffect);
+    }
+    else /**< (2 == uiClipNumber) */
+    {
+        uiClipIndex = pC->uiCurrentClip + 1;
+        pClip = pC->pC2;
+        piClipActiveEffect = &(pC->iClip2ActiveEffect);
+    }
+
+    /**
+    * Shortcuts for code readability */
+    BC = pClip->iActualAudioBeginCut;
+    EC = pClip->iEndTime;
+
+    /**
+    Change the absolut time to clip related time
+     RC t = (M4OSA_Int32)(pC->ewc.dATo - pClip->iAoffset/pClip->scale_audio + 0.5);
+    < rounding */;
+    t = (M4OSA_Int32)(pC->ewc.dATo/*- pClip->iAoffset/pClip->scale_audio*/
+        + 0.5); /**< rounding */
+    ;
+
+    /**
+    * Default: no effect active */
+    *piClipActiveEffect = -1;
+
+    /**
+    * Check the three effects */
+    // RC    for (uiFxIndex=0; uiFxIndex<pC->pClipList[uiClipIndex].nbEffects; uiFxIndex++)
+    for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
+    {
+        /** Shortcut, reverse order because of priority between effects
+        ( EndEffect always clean ) */
+        pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
+
+        if( M4VSS3GPP_kAudioEffectType_None != pFx->AudioEffectType )
+        {
+            /**
+            * Check if there is actually a video effect */
+            if( ( t >= (M4OSA_Int32)(/*BC +*/pFx->uiStartTime))
+                && /**< Are we after the start time of the effect? */
+                (t < (M4OSA_Int32)(/*BC +*/pFx->uiStartTime + pFx->
+                uiDuration)) ) /**< Are we into the effect duration? */
+            {
+                /**
+                * Set the active effect */
+                *piClipActiveEffect = pC->nbEffects - 1 - uiFxIndex;
+
+                /**
+                * The first effect has the highest priority, then the second one,
+                  then the thirs one.
+                * Hence, as soon as we found an active effect, we can get out of this loop */
+                uiFxIndex = pC->nbEffects; /** get out of the for loop */
+            }
+            /**
+            * Bugfix: The duration of the end effect has been set according to the
+                      announced clip duration.
+            * If the announced duration is smaller than the real one, the end effect
+                      won't be applied at
+            * the very end of the clip. To solve this issue we force the end effect. */
+#if 0
+
+            else if( ( M4VSS3GPP_kEffectKind_End == pFx->EffectKind)
+                && (t >= (M4OSA_Int32)(BC + pFx->uiStartTime)) )
+            {
+                /**
+                * Set the active effect */
+                *piClipActiveEffect =
+                    pC->pClipList[uiClipIndex].nbEffects - 1 - uiFxIndex;
+
+                /**
+                * The third effect has the highest priority, then the second one,
+                   then the first one.
+                * Hence, as soon as we found an active effect, we can get out of this loop */
+                uiFxIndex = pC->
+                    pClipList[
+                        uiClipIndex].nbEffects; /** get out of the for loop */
+            }
+
+#endif                                                    /* RC */
+
+        }
+    }
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intApplyAudioEffect()
+ * @brief    Apply audio effect to pPCMdata
+ * @param   pC            (IN/OUT) Internal edit context
+ * @param   uiClip1orClip2    (IN/OUT) 1 for first clip, 2 for second clip
+ * @param    pPCMdata    (IN/OUT) Input and Output PCM audio data
+ * @param    uiPCMsize    (IN)     Size of pPCMdata
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intApplyAudioEffect( M4VSS3GPP_InternalEditContext
+                                               *pC, M4OSA_UInt8 uiClip1orClip2,
+                                               M4OSA_Int16 *pPCMdata,
+                                               M4OSA_UInt32 uiPCMsize )
+{
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_ClipSettings *pClipSettings;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4OSA_Int32
+        i32sample; /**< we will cast each Int16 sample into this Int32 variable */
+    M4OSA_Int32 iPos;
+    M4OSA_Int32 iDur;
+
+    M4OSA_DEBUG_IF2(( 1 != uiClip1orClip2) && (2 != uiClip1orClip2),
+        M4ERR_PARAMETER,
+        "M4VSS3GPP_intBeginAudioEffect: uiClip1orClip2 invalid");
+
+    if( 1 == uiClip1orClip2 )
+    {
+        pClip = pC->pC1;
+        pClipSettings = &(pC->pClipList[pC->
+            uiCurrentClip]); /**< Get a shortcut to the clip settings */
+        // RC        pFx = &(pClipSettings->Effects[pC->iClip1ActiveEffect]);/**< Get a shortcut
+        //                                                                to the active effect */
+        pFx = &(pC->
+            pEffectsList[pC->
+            iClip1ActiveEffect]); /**< Get a shortcut to the active effect */
+        M4OSA_DEBUG_IF2(( pC->iClip1ActiveEffect < 0)
+            || (pC->iClip1ActiveEffect > 2), M4ERR_PARAMETER,
+            "M4VSS3GPP_intApplyAudioEffect: iClip1ActiveEffect invalid");
+    }
+    else /**< if (2==uiClip1orClip2) */
+    {
+        pClip = pC->pC2;
+        pClipSettings = &(pC->pClipList[pC->uiCurrentClip
+            + 1]); /**< Get a shortcut to the clip settings */
+        // RC        pFx = &(pClipSettings->Effects[pC->iClip2ActiveEffect]);/**< Get a shortcut
+        //                                                                to the active effect */
+        pFx = &(pC->
+            pEffectsList[pC->
+            iClip2ActiveEffect]); /**< Get a shortcut to the active effect */
+        M4OSA_DEBUG_IF2(( pC->iClip2ActiveEffect < 0)
+            || (pC->iClip2ActiveEffect > 2), M4ERR_PARAMETER,
+            "M4VSS3GPP_intApplyAudioEffect: iClip2ActiveEffect invalid");
+    }
+
+    iDur = (M4OSA_Int32)pFx->uiDuration;
+
+    /**
+    * Compute how far from the beginning of the effect we are, in clip-base time.
+    * It is done with integers because the offset and begin cut have been rounded already. */
+    iPos =
+        (M4OSA_Int32)(pC->ewc.dATo + 0.5 - pClip->iAoffset / pClip->scale_audio)
+        - pClip->iActualAudioBeginCut - pFx->uiStartTime;
+
+    /**
+    * Sanity check */
+    if( iPos > iDur )
+    {
+        iPos = iDur;
+    }
+    else if( iPos < 0 )
+    {
+        iPos = 0;
+    }
+
+    /**
+    * At this point, iPos is the effect progress, in a 0 to iDur base */
+    switch( pFx->AudioEffectType )
+    {
+        case M4VSS3GPP_kAudioEffectType_FadeIn:
+
+            /**
+            * Original samples are signed 16bits.
+            * We convert it to signed 32bits and multiply it by iPos.
+            * So we must assure that iPos is not higher that 16bits max.
+            * iPos max value is iDur, so we test iDur. */
+            while( iDur > PWR_FXP_FRACT_MAX )
+            {
+                iDur >>=
+                    2; /**< divide by 2 would be more logical (instead of 4),
+                       but we have enough dynamic..) */
+                iPos >>= 2; /**< idem */
+            }
+
+            /**
+            * From buffer size (bytes) to number of sample (int16): divide by two */
+            uiPCMsize >>= 1;
+
+            /**
+            * Loop on samples */
+            while( uiPCMsize-- > 0 ) /**< decrementing to optimize */
+            {
+                i32sample = *pPCMdata;
+                i32sample *= iPos;
+                i32sample /= iDur;
+                *pPCMdata++ = (M4OSA_Int16)i32sample;
+            }
+
+            break;
+
+        case M4VSS3GPP_kAudioEffectType_FadeOut:
+
+            /**
+            * switch from 0->Dur to Dur->0 in order to do fadeOUT instead of fadeIN */
+            iPos = iDur - iPos;
+
+            /**
+            * Original samples are signed 16bits.
+            * We convert it to signed 32bits and multiply it by iPos.
+            * So we must assure that iPos is not higher that 16bits max.
+            * iPos max value is iDur, so we test iDur. */
+            while( iDur > PWR_FXP_FRACT_MAX )
+            {
+                iDur >>=
+                    2; /**< divide by 2 would be more logical (instead of 4),
+                       but we have enough dynamic..) */
+                iPos >>= 2; /**< idem */
+            }
+
+            /**
+            * From buffer size (bytes) to number of sample (int16): divide by two */
+            uiPCMsize >>= 1;
+
+            /**
+            * Loop on samples, apply the fade factor on each */
+            while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
+            {
+                i32sample = *pPCMdata;
+                i32sample *= iPos;
+                i32sample /= iDur;
+                *pPCMdata++ = (M4OSA_Int16)i32sample;
+            }
+
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intApplyAudioEffect: unknown audio effect type (0x%x),\
+                returning M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE",
+                pFx->AudioEffectType);
+            return M4VSS3GPP_ERR_INVALID_AUDIO_EFFECT_TYPE;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intApplyAudioEffect: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAudioTransition()
+ * @brief    Apply transition effect to two PCM buffer
+ * @note    The result of the transition is put in the first buffer.
+ *          I know it's not beautiful, but it fits my current needs, and it's efficient!
+ *          So why bother with a third output buffer?
+ * @param   pC            (IN/OUT) Internal edit context
+ * @param    pPCMdata1    (IN/OUT) First input and Output PCM audio data
+ * @param    pPCMdata2    (IN) Second input PCM audio data
+ * @param    uiPCMsize    (IN) Size of both PCM buffers
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAudioTransition( M4VSS3GPP_InternalEditContext
+                                              *pC, M4OSA_Int16 *pPCMdata1,
+                                              M4OSA_Int16 *pPCMdata2,
+                                              M4OSA_UInt32 uiPCMsize )
+{
+    M4OSA_Int32 i32sample1,
+        i32sample2; /**< we will cast each Int16 sample into this Int32 variable */
+    M4OSA_Int32 iPos1, iPos2;
+    M4OSA_Int32 iDur = (M4OSA_Int32)pC->
+        pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+    /**
+    * Compute how far from the end cut we are, in clip-base time.
+    * It is done with integers because the offset and begin cut have been rounded already. */
+    iPos1 = pC->pC1->iEndTime - (M4OSA_Int32)(pC->ewc.dATo
+        + 0.5 - pC->pC1->iAoffset / pC->pC1->scale_audio);
+
+    /**
+    * Sanity check */
+    if( iPos1 > iDur )
+    {
+        iPos1 = iDur;
+    }
+    else if( iPos1 < 0 )
+    {
+        iPos1 = 0;
+    }
+
+    /**
+    * Position of second clip in the transition */
+    iPos2 = iDur - iPos1;
+
+    /**
+    * At this point, iPos2 is the transition progress, in a 0 to iDur base.
+    * iPos1 is the transition progress, in a iDUr to 0 base. */
+    switch( pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType )
+    {
+        case M4VSS3GPP_kAudioTransitionType_CrossFade:
+
+            /**
+            * Original samples are signed 16bits.
+            * We convert it to signed 32bits and multiply it by iPos.
+            * So we must assure that iPos is not higher that 16bits max.
+            * iPos max value is iDur, so we test iDur. */
+            while( iDur > PWR_FXP_FRACT_MAX )
+            {
+                iDur >>=
+                    2; /**< divide by 2 would be more logical (instead of 4),
+                       but we have enough dynamic..) */
+                iPos1 >>= 2; /**< idem */
+                iPos2 >>= 2; /**< idem */
+            }
+
+            /**
+            * From buffer size (bytes) to number of sample (int16): divide by two */
+            uiPCMsize >>= 1;
+
+            /**
+            * Loop on samples, apply the fade factor on each */
+            while( uiPCMsize-- > 0 ) /**< decrementing counter to optimize */
+            {
+                i32sample1 = *pPCMdata1; /**< Get clip1 sample */
+                i32sample1 *= iPos1;     /**< multiply by fade numerator */
+                i32sample1 /= iDur;      /**< divide by fade denominator */
+
+                i32sample2 = *pPCMdata2; /**< Get clip2 sample */
+                i32sample2 *= iPos2;     /**< multiply by fade numerator */
+                i32sample2 /= iDur;      /**< divide by fade denominator */
+
+                *pPCMdata1++ = (M4OSA_Int16)(i32sample1
+                    + i32sample2); /**< mix the two samples */
+                pPCMdata2++; /**< don't forget to increment the second buffer */
+            }
+            break;
+
+        case M4VSS3GPP_kAudioTransitionType_None:
+            /**
+            * This is a stupid-non optimized version of the None transition...
+            * We copy the PCM frames */
+            if( iPos1 < (iDur >> 1) ) /**< second half of transition */
+            {
+                /**
+                * Copy the input PCM to the output buffer */
+                M4OSA_memcpy((M4OSA_MemAddr8)pPCMdata1,
+                    (M4OSA_MemAddr8)pPCMdata2, uiPCMsize);
+            }
+            /**
+            * the output must be put in the first buffer.
+            * For the first half of the non-transition it's already the case!
+            * So we have nothing to do here...
+            */
+
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intAudioTransition: unknown transition type (0x%x),\
+                returning M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE",
+                pC->pTransitionList[pC->uiCurrentClip].AudioTransitionType);
+            return M4VSS3GPP_ERR_INVALID_AUDIO_TRANSITION_TYPE;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAudioTransition: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder()
+ * @brief    Reset the audio encoder (Create it if needed)
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCreateAudioEncoder( M4VSS3GPP_EncodeWriteContext *pC_ewc,
+                                          M4VSS3GPP_MediaAndCodecCtxt *pC_ShellAPI,
+                                          M4OSA_UInt32 uiAudioBitrate )
+{
+    M4OSA_ERR err;
+
+    /**
+    * If an encoder already exist, we destroy it */
+    if( M4OSA_NULL != pC_ewc->pAudioEncCtxt )
+    {
+        err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctClose(
+            pC_ewc->pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctClose returns 0x%x",
+                err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctCleanUp(
+            pC_ewc->pAudioEncCtxt);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intResetAudioEncoder:\
+                pAudioEncoderGlobalFcts->pFctCleanUp returns 0x%x",    err);
+            /**< don't return, we still have stuff to free */
+        }
+
+        pC_ewc->pAudioEncCtxt = M4OSA_NULL;
+    }
+
+    /**
+    * Creates a new encoder  */
+    switch( pC_ewc->AudioStreamType )
+    {
+            //EVRC
+            //        case M4SYS_kEVRC:
+            //
+            //            err = M4VSS3GPP_setCurrentAudioEncoder(&pC->ShellAPI,
+            //                                                   pC_ewc->AudioStreamType);
+            //            M4ERR_CHECK_RETURN(err);
+            //
+            //            pC_ewc->AudioEncParams.Format = M4ENCODER_kEVRC;
+            //            pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+            //            pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+            //            pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_EVRC_DEFAULT_BITRATE;
+            //            break;
+
+        case M4SYS_kAMR:
+
+            err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
+                pC_ewc->AudioStreamType);
+            M4ERR_CHECK_RETURN(err);
+
+            pC_ewc->AudioEncParams.Format = M4ENCODER_kAMRNB;
+            pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+            pC_ewc->AudioEncParams.ChannelNum = M4ENCODER_kMono;
+            pC_ewc->AudioEncParams.Bitrate = M4VSS3GPP_AMR_DEFAULT_BITRATE;
+            pC_ewc->AudioEncParams.SpecifParam.AmrSID = M4ENCODER_kAmrNoSID;
+            break;
+
+        case M4SYS_kAAC:
+
+            err = M4VSS3GPP_setCurrentAudioEncoder(pC_ShellAPI,
+                pC_ewc->AudioStreamType);
+            M4ERR_CHECK_RETURN(err);
+
+            pC_ewc->AudioEncParams.Format = M4ENCODER_kAAC;
+
+            switch( pC_ewc->uiSamplingFrequency )
+            {
+                case 8000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k8000Hz;
+                    break;
+
+                case 16000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k16000Hz;
+                    break;
+
+                case 22050:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k22050Hz;
+                    break;
+
+                case 24000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k24000Hz;
+                    break;
+
+                case 32000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k32000Hz;
+                    break;
+
+                case 44100:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k44100Hz;
+                    break;
+
+                case 48000:
+                    pC_ewc->AudioEncParams.Frequency = M4ENCODER_k48000Hz;
+                    break;
+
+                default:
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCreateAudioEncoder: invalid input AAC sampling frequency\
+                        (%d Hz), returning M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED",
+                        pC_ewc->uiSamplingFrequency);
+                    return M4VSS3GPP_ERR_AUDIO_DECODER_INIT_FAILED;
+            }
+            pC_ewc->AudioEncParams.ChannelNum = (pC_ewc->uiNbChannels == 1)
+                ? M4ENCODER_kMono : M4ENCODER_kStereo;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.Regulation =
+                M4ENCODER_kAacRegulNone; //M4ENCODER_kAacBitReservoir
+            /* unused */
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bIS = M4OSA_FALSE;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bMS = M4OSA_FALSE;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bPNS = M4OSA_FALSE;
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bTNS = M4OSA_FALSE;
+            /* TODO change into highspeed asap */
+            pC_ewc->AudioEncParams.SpecifParam.AacParam.bHighSpeed =
+                M4OSA_FALSE;
+
+            /* Quantify value (ceil one) */
+            if( uiAudioBitrate <= 16000 )
+                pC_ewc->AudioEncParams.Bitrate = 16000;
+
+            else if( uiAudioBitrate <= 24000 )
+                pC_ewc->AudioEncParams.Bitrate = 24000;
+
+            else if( uiAudioBitrate <= 32000 )
+                pC_ewc->AudioEncParams.Bitrate = 32000;
+
+            else if( uiAudioBitrate <= 48000 )
+                pC_ewc->AudioEncParams.Bitrate = 48000;
+
+            else if( uiAudioBitrate <= 64000 )
+                pC_ewc->AudioEncParams.Bitrate = 64000;
+
+            else
+                pC_ewc->AudioEncParams.Bitrate = 96000;
+
+            /* Special requirement of our encoder */
+            if( ( pC_ewc->uiNbChannels == 2)
+                && (pC_ewc->AudioEncParams.Bitrate < 32000) )
+                pC_ewc->AudioEncParams.Bitrate = 32000;
+
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intResetAudioEncoder: Undefined output audio format (%d),\
+                returning M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT",
+                pC_ewc->AudioStreamType);
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_AUDIO_FORMAT;
+    }
+
+    /* Initialise the audio encoder */
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+    M4OSA_TRACE3_1(
+        "M4VSS3GPP_intResetAudioEncoder:\
+        pAudioEncoderGlobalFcts->pFctInit called with userdata 0x%x",
+        pC_ShellAPI->pCurrentAudioEncoderUserData);
+    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
+        pC_ShellAPI->pCurrentAudioEncoderUserData);
+
+#else
+
+    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctInit(&pC_ewc->pAudioEncCtxt,
+        M4OSA_NULL /* no HW encoder */);
+
+#endif
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    /* Open the audio encoder */
+    err = pC_ShellAPI->pAudioEncoderGlobalFcts->pFctOpen(pC_ewc->pAudioEncCtxt,
+        &pC_ewc->AudioEncParams, &pC_ewc->pAudioEncDSI,
+        M4OSA_NULL /* no grabbing */);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intResetAudioEncoder: pAudioEncoderGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intResetAudioEncoder: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
new file mode 100755
index 0000000..270453f
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_EditVideo.c
@@ -0,0 +1,2554 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4VSS3GPP_EditVideo.c
+ * @brief    Video Studio Service 3GPP edit API implementation.
+ * @note
+ ******************************************************************************
+ */
+
+/****************/
+/*** Includes ***/
+/****************/
+
+#include "NXPSW_CompilerSwitches.h"
+/**
+ * Our header */
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_InternalTypes.h"
+#include "M4VSS3GPP_InternalFunctions.h"
+#include "M4VSS3GPP_InternalConfig.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h" /**< OSAL memory management */
+#include "M4OSA_Debug.h"  /**< OSAL debug management */
+
+/**
+ * component includes */
+#include "M4VFL_transition.h" /**< video effects */
+
+/*for transition behaviour*/
+#include <math.h>
+
+/************************************************************************/
+/* Static local functions                                               */
+/************************************************************************/
+
+static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
+    M4VSS3GPP_InternalEditContext *pC );
+static M4OSA_Void
+M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
+                               M4OSA_UInt8 uiClipNumber );
+static M4OSA_ERR
+M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC,/*M4OSA_UInt8 uiClip1orClip2,*/
+                              M4VIFI_ImagePlane *pPlaneIn, M4VIFI_ImagePlane *pPlaneOut );
+static M4OSA_ERR
+M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
+                             M4VIFI_ImagePlane *pPlaneOut );
+
+static M4OSA_Void
+M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
+                            M4SYS_AccessUnit *pAU );
+static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
+                                                  M4OSA_UInt8 uiCts );
+static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 uiCtsSec );
+static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 *pCtsSec );
+static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
+                                             M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight );
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intEditStepVideo()
+ * @brief    One step of video processing
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intEditStepVideo( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iCts, iNextCts;
+    M4ENCODER_FrameMode FrameMode;
+    M4OSA_Bool bSkipFrame;
+    M4OSA_UInt16 offset;
+
+    /**
+    * Check if we reached end cut */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    if ( ((M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset) >= pC->pC1->iEndTime )
+    {
+        /* Re-adjust video to precise cut time */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+
+        /**
+        * Video is done for this clip */
+        err = M4VSS3GPP_intReachedEndOfVideo(pC);
+
+        /* RC: to know when a file has been processed */
+        if (M4NO_ERROR != err && err != M4VSS3GPP_WAR_SWITCH_CLIP)
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intReachedEndOfVideo returns 0x%x",
+                err);
+        }
+
+        return err;
+    }
+
+    /* Don't change the states if we are in decodeUpTo() */
+    if ( (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
+        && (( pC->pC2 == M4OSA_NULL)
+        || (M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)) )
+    {
+        /**
+        * Check Video Mode, depending on the current output CTS */
+        err = M4VSS3GPP_intCheckVideoMode(
+            pC); /**< This function change the pC->Vstate variable! */
+
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepVideo: M4VSS3GPP_intCheckVideoMode returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+
+    switch( pC->Vstate )
+    {
+        /* _________________ */
+        /*|                 |*/
+        /*| READ_WRITE MODE |*/
+        /*|_________________|*/
+
+        case M4VSS3GPP_kEditVideoState_READ_WRITE:
+        case M4VSS3GPP_kEditVideoState_AFTER_CUT:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo READ_WRITE");
+
+                bSkipFrame = M4OSA_FALSE;
+
+                /**
+                * If we were decoding the clip, we must jump to be sure
+                * to get to the good position. */
+                if( M4VSS3GPP_kClipStatus_READ != pC->pC1->Vstatus )
+                {
+                    /**
+                    * Jump to target video time (tc = to-T) */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                iCts = (M4OSA_Int32)(pC->ewc.dInputVidCts) - pC->pC1->iVoffset;
+                    err = pC->pC1->ShellAPI.m_pReader->m_pFctJump(
+                        pC->pC1->pReaderContext,
+                        (M4_StreamHandler *)pC->pC1->pVideoStream, &iCts);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo:\
+                            READ_WRITE: m_pReader->m_pFctJump(V1) returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                        pC->pC1->pReaderContext,
+                        (M4_StreamHandler *)pC->pC1->pVideoStream,
+                        &pC->pC1->VideoAU);
+
+                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo:\
+                            READ_WRITE: m_pReader->m_pFctGetNextAu returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    M4OSA_TRACE2_3("A .... read  : cts  = %.0f + %ld [ 0x%x ]",
+                        pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+                        pC->pC1->VideoAU.m_size);
+
+                    /* This frame has been already written in BEGIN CUT step -> skip it */
+                    if( pC->pC1->VideoAU.m_CTS == iCts
+                        && pC->pC1->iVideoRenderCts >= iCts )
+                    {
+                        bSkipFrame = M4OSA_TRUE;
+                    }
+                }
+
+                /* This frame has been already written in BEGIN CUT step -> skip it */
+                if( ( pC->Vstate == M4VSS3GPP_kEditVideoState_AFTER_CUT)
+                    && (pC->pC1->VideoAU.m_CTS
+                    + pC->pC1->iVoffset <= pC->ewc.WriterVideoAU.CTS) )
+                {
+                    bSkipFrame = M4OSA_TRUE;
+                }
+
+                /**
+                * Remember the clip reading state */
+                pC->pC1->Vstatus = M4VSS3GPP_kClipStatus_READ;
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                // Rounding is to compensate reader imprecision (m_CTS is actually an integer)
+                iCts = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pC->pC1->iVoffset - 1;
+                iNextCts = iCts + ((M4OSA_Int32)pC->dOutputFrameDuration) + 1;
+                /* Avoid to write a last frame of duration 0 */
+                if( iNextCts > pC->pC1->iEndTime )
+                    iNextCts = pC->pC1->iEndTime;
+
+                /**
+                * If the AU is good to be written, write it, else just skip it */
+                if( ( M4OSA_FALSE == bSkipFrame)
+                    && (( pC->pC1->VideoAU.m_CTS >= iCts)
+                    && (pC->pC1->VideoAU.m_CTS < iNextCts)
+                    && (pC->pC1->VideoAU.m_size > 0)) )
+                {
+                    /**
+                    * Get the output AU to write into */
+                    err = pC->ShellAPI.pWriterDataFcts->pStartAU(
+                        pC->ewc.p3gpWriterContext,
+                        M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
+                        &pC->ewc.WriterVideoAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                            pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    /**
+                    * Copy the input AU to the output AU */
+                    pC->ewc.WriterVideoAU.attribute = pC->pC1->VideoAU.m_attribute;
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    pC->ewc.WriterVideoAU.CTS = (M4OSA_Time)pC->pC1->VideoAU.m_CTS +
+                        (M4OSA_Time)pC->pC1->iVoffset;
+                    pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+                    offset = 0;
+                    /* for h.264 stream do not read the 1st 4 bytes as they are header
+                     indicators */
+                    if( pC->pC1->pVideoStream->m_basicProperties.m_streamType
+                        == M4DA_StreamTypeVideoMpeg4Avc )
+                        offset = 4;
+
+                    pC->ewc.WriterVideoAU.size = pC->pC1->VideoAU.m_size - offset;
+                    if( pC->ewc.WriterVideoAU.size > pC->ewc.uiVideoMaxAuSize )
+                    {
+                        M4OSA_TRACE1_2(
+                            "M4VSS3GPP_intEditStepVideo: READ_WRITE: AU size greater than\
+                             MaxAuSize (%d>%d)! returning M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE",
+                            pC->ewc.WriterVideoAU.size, pC->ewc.uiVideoMaxAuSize);
+                        return M4VSS3GPP_ERR_INPUT_VIDEO_AU_TOO_LARGE;
+                    }
+
+                    M4OSA_memcpy((M4OSA_MemAddr8)pC->ewc.WriterVideoAU.dataAddress,
+                        (pC->pC1->VideoAU.m_dataAddress + offset),
+                        (pC->ewc.WriterVideoAU.size));
+
+                    /**
+                    * Update time info for the Counter Time System to be equal to the bit
+                    -stream time*/
+                    M4VSS3GPP_intUpdateTimeInfo(pC, &pC->ewc.WriterVideoAU);
+                    M4OSA_TRACE2_2("B ---- write : cts  = %lu [ 0x%x ]",
+                        pC->ewc.WriterVideoAU.CTS, pC->ewc.WriterVideoAU.size);
+
+                    /**
+                    * Write the AU */
+                    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(
+                        pC->ewc.p3gpWriterContext,
+                        M4VSS3GPP_WRITER_VIDEO_STREAM_ID,
+                        &pC->ewc.WriterVideoAU);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                         file size is reached
+                        The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
+                        is returned*/
+                        if( M4WAR_WRITER_STOP_REQ == err )
+                        {
+                            M4OSA_TRACE1_0(
+                                "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+                            return M4VSS3GPP_WAR_EDITING_DONE;
+                        }
+                        else
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                                pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+                                err);
+                            return err;
+                        }
+                    }
+
+                    /**
+                    * Read next AU for next step */
+                    err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                        pC->pC1->pReaderContext,
+                        (M4_StreamHandler *)pC->pC1->pVideoStream,
+                        &pC->pC1->VideoAU);
+
+                    if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                            m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
+                            err);
+                        return err;
+                    }
+
+                    M4OSA_TRACE2_3("C .... read  : cts  = %.0f + %ld [ 0x%x ]",
+                        pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+                        pC->pC1->VideoAU.m_size);
+                }
+                else
+                {
+                    /**
+                    * Decide wether to read or to increment time increment */
+                    if( ( pC->pC1->VideoAU.m_size == 0)
+                        || (pC->pC1->VideoAU.m_CTS >= iNextCts) )
+                    {
+                        /*Increment time by the encoding period (NO_MORE_AU or reader in advance */
+                       // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                       pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+
+                        /* Switch (from AFTER_CUT) to normal mode because time is
+                        no more frozen */
+                        pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+                    }
+                    else
+                    {
+                        /* In other cases (reader late), just let the reader catch up
+                         pC->ewc.dVTo */
+                        err = pC->pC1->ShellAPI.m_pReaderDataIt->m_pFctGetNextAu(
+                            pC->pC1->pReaderContext,
+                            (M4_StreamHandler *)pC->pC1->pVideoStream,
+                            &pC->pC1->VideoAU);
+
+                        if( ( M4NO_ERROR != err) && (M4WAR_NO_MORE_AU != err) )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4VSS3GPP_intEditStepVideo: READ_WRITE:\
+                                m_pReaderDataIt->m_pFctGetNextAu returns 0x%x!",
+                                err);
+                            return err;
+                        }
+
+                        M4OSA_TRACE2_3("D .... read  : cts  = %.0f + %ld [ 0x%x ]",
+                            pC->pC1->VideoAU.m_CTS, pC->pC1->iVoffset,
+                            pC->pC1->VideoAU.m_size);
+                    }
+                }
+            }
+            break;
+
+            /* ____________________ */
+            /*|                    |*/
+            /*| DECODE_ENCODE MODE |*/
+            /*|   BEGIN_CUT MODE   |*/
+            /*|____________________|*/
+
+        case M4VSS3GPP_kEditVideoState_DECODE_ENCODE:
+        case M4VSS3GPP_kEditVideoState_BEGIN_CUT:
+            {
+                M4OSA_TRACE3_0(
+                    "M4VSS3GPP_intEditStepVideo DECODE_ENCODE / BEGIN_CUT");
+
+                /**
+                * Decode the video up to the target time
+                (will jump to the previous RAP if needed ) */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1, (M4OSA_Int32)pC->ewc.dInputVidCts);
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                        M4VSS3GPP_intDecodeVideoUpToCts returns err=0x%x",
+                        err);
+                    return err;
+                }
+
+                /* If the decoding is not completed, do one more step with time frozen */
+                if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
+                {
+                    return M4NO_ERROR;
+                }
+
+                /**
+                * Reset the video pre-processing error before calling the encoder */
+                pC->ewc.VppError = M4NO_ERROR;
+
+                M4OSA_TRACE2_0("E ++++ encode AU");
+
+                /**
+                * Encode the frame(rendering,filtering and writing will be done
+                 in encoder callbacks)*/
+                if( pC->Vstate == M4VSS3GPP_kEditVideoState_BEGIN_CUT )
+                    FrameMode = M4ENCODER_kIFrame;
+                else
+                    FrameMode = M4ENCODER_kNormalFrame;
+
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
+                pC->ewc.dInputVidCts, FrameMode);
+                /**
+                * Check if we had a VPP error... */
+                if( M4NO_ERROR != pC->ewc.VppError )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                        pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
+                        pC->ewc.VppError);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
+                    {
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+                        return pC->ewc.VppError;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    }
+
+#endif                                   //M4VSS_SUPPORT_OMX_CODECS
+
+                }
+                else if( M4NO_ERROR != err ) /**< ...or an encoder error */
+                {
+                    if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                            returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+                        return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
+                    }
+                    /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                    file size is reached
+                    The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE
+                    is returned*/
+                    else if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: DECODE_ENCODE:\
+                            pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Increment time by the encoding period (for begin cut, do not increment to not
+                loose P-frames) */
+                if( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate )
+                {
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+                }
+            }
+            break;
+
+            /* _________________ */
+            /*|                 |*/
+            /*| TRANSITION MODE |*/
+            /*|_________________|*/
+
+        case M4VSS3GPP_kEditVideoState_TRANSITION:
+            {
+                M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo TRANSITION");
+
+                /* Don't decode more than needed */
+                if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC1->Vstatus)
+                    && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus)) )
+                {
+                    /**
+                    * Decode the clip1 video up to the target time
+                    (will jump to the previous RAP if needed */
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC1,
+                         (M4OSA_Int32)pC->ewc.dInputVidCts);
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            M4VSS3GPP_intDecodeVideoUpToCts(C1) returns err=0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /* If the decoding is not completed, do one more step with time frozen */
+                    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus )
+                    {
+                        return M4NO_ERROR;
+                    }
+                }
+
+                /* Don't decode more than needed */
+                if( !(( M4VSS3GPP_kClipStatus_DECODE_UP_TO != pC->pC2->Vstatus)
+                    && (M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC1->Vstatus)) )
+                {
+                    /**
+                    * Decode the clip2 video up to the target time
+                        (will jump to the previous RAP if needed) */
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    err = M4VSS3GPP_intClipDecodeVideoUpToCts(pC->pC2,
+                         (M4OSA_Int32)pC->ewc.dInputVidCts);
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            M4VSS3GPP_intDecodeVideoUpToCts(C2) returns err=0x%x",
+                            err);
+                        return err;
+                    }
+
+                    /* If the decoding is not completed, do one more step with time frozen */
+                    if( M4VSS3GPP_kClipStatus_DECODE_UP_TO == pC->pC2->Vstatus )
+                    {
+                        return M4NO_ERROR;
+                    }
+                }
+
+                /**
+                * Reset the video pre-processing error before calling the encoder */
+                pC->ewc.VppError = M4NO_ERROR;
+
+                M4OSA_TRACE2_0("F **** blend AUs");
+
+                /**
+                * Encode the frame (rendering, filtering and writing will be done
+                in encoder callbacks */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctEncode(pC->ewc.pEncContext, M4OSA_NULL,
+                    pC->ewc.dInputVidCts, M4ENCODER_kNormalFrame);
+
+                /**
+                * If encode returns a process frame error, it is likely to be a VPP error */
+                if( M4NO_ERROR != pC->ewc.VppError )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                        pVideoEncoderGlobalFcts->pFctEncode, returning VppErr=0x%x",
+                        pC->ewc.VppError);
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    if( M4WAR_VIDEORENDERER_NO_NEW_FRAME != pC->ewc.VppError )
+                    {
+
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+                        return pC->ewc.VppError;
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+
+                    }
+
+#endif //M4VSS_SUPPORT_OMX_CODECS
+
+                }
+                else if( M4NO_ERROR != err ) /**< ...or an encoder error */
+                {
+                    if( ((M4OSA_UInt32)M4ERR_ALLOC) == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            returning M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR");
+                        return M4VSS3GPP_ERR_ENCODER_ACCES_UNIT_ERROR;
+                    }
+
+                    /* the warning M4WAR_WRITER_STOP_REQ is returned when the targeted output
+                     file size is reached
+                    The editing is then finished, the warning M4VSS3GPP_WAR_EDITING_DONE is
+                     returned*/
+                    else if( M4WAR_WRITER_STOP_REQ == err )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4VSS3GPP_intEditStepVideo: File was cut to avoid oversize");
+                        return M4VSS3GPP_WAR_EDITING_DONE;
+                    }
+                    else
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intEditStepVideo: TRANSITION:\
+                            pVideoEncoderGlobalFcts->pFctEncode returns 0x%x",
+                            err);
+                        return err;
+                    }
+                }
+
+                /**
+                * Increment time by the encoding period */
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                pC->ewc.dInputVidCts += pC->dOutputFrameDuration;
+            }
+            break;
+
+            /* ____________ */
+            /*|            |*/
+            /*| ERROR CASE |*/
+            /*|____________|*/
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intEditStepVideo: invalid internal state (0x%x),\
+                returning M4VSS3GPP_ERR_INTERNAL_STATE",
+                pC->Vstate);
+            return M4VSS3GPP_ERR_INTERNAL_STATE;
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intEditStepVideo: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCheckVideoMode()
+ * @brief    Check which video process mode we must use, depending on the output CTS.
+ * @param   pC    (IN/OUT) Internal edit context
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intCheckVideoMode(
+    M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    const M4OSA_Int32  t = (M4OSA_Int32)pC->ewc.dInputVidCts;
+    /**< Transition duration */
+    const M4OSA_Int32 TD = pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+    M4OSA_Int32 iTmp;
+
+    const M4VSS3GPP_EditVideoState previousVstate = pC->Vstate;
+
+    /**
+    * Check if Clip1 is on its begin cut, or in an effect zone */
+    M4VSS3GPP_intCheckVideoEffects(pC, 1);
+
+    /**
+    * Check if we are in the transition with next clip */
+    if( ( TD > 0) && (( t - pC->pC1->iVoffset) >= (pC->pC1->iEndTime - TD)) )
+    {
+        /**
+        * We are in a transition */
+        pC->Vstate = M4VSS3GPP_kEditVideoState_TRANSITION;
+        pC->bTransitionEffect = M4OSA_TRUE;
+
+        /**
+        * Open second clip for transition, if not yet opened */
+        if( M4OSA_NULL == pC->pC2 )
+        {
+            err = M4VSS3GPP_intOpenClip(pC, &pC->pC2,
+                &pC->pClipList[pC->uiCurrentClip + 1]);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_editOpenClip returns 0x%x!",
+                    err);
+                return err;
+            }
+
+            /**
+            * Add current video output CTS to the clip offset
+            * (audio output CTS is not yet at the transition, so audio
+            *  offset can't be updated yet). */
+            // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            pC->pC2->iVoffset += (M4OSA_UInt32)pC->ewc.dInputVidCts;
+
+            /**
+            * 2005-03-24: BugFix for audio-video synchro:
+            * Update transition duration due to the actual video transition beginning time.
+            * It will avoid desynchronization when doing the audio transition. */
+           // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            iTmp = ((M4OSA_Int32)pC->ewc.dInputVidCts)\
+             - (pC->pC1->iEndTime - TD + pC->pC1->iVoffset);
+            if (iTmp < (M4OSA_Int32)pC->pTransitionList[pC->uiCurrentClip].uiTransitionDuration)
+            /**< Test in case of a very short transition */
+            {
+                pC->pTransitionList[pC->
+                    uiCurrentClip].uiTransitionDuration -= iTmp;
+
+                /**
+                * Don't forget to also correct the total duration used for the progress bar
+                * (it was computed with the original transition duration). */
+                pC->ewc.iOutputDuration += iTmp;
+            }
+            /**< No "else" here because it's hard predict the effect of 0 duration transition...*/
+        }
+
+        /**
+        * Check effects for clip2 */
+        M4VSS3GPP_intCheckVideoEffects(pC, 2);
+    }
+    else
+    {
+        /**
+        * We are not in a transition */
+        pC->bTransitionEffect = M4OSA_FALSE;
+
+        /* If there is an effect we go to decode/encode mode */
+        if ((pC->nbActiveEffects > 0) ||(pC->nbActiveEffects1 > 0))
+        {
+            pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
+        }
+        /* We do a begin cut, except if already done (time is not progressing because we want
+        to catch all P-frames after the cut) */
+        else if( M4OSA_TRUE == pC->bClip1AtBeginCut )
+        {
+            if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+                || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) )
+                pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
+            else
+                pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
+        }
+        /* Else we are in default copy/paste mode */
+        else
+        {
+            if( ( M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+                || (M4VSS3GPP_kEditVideoState_AFTER_CUT == previousVstate) )
+            {
+                pC->Vstate = M4VSS3GPP_kEditVideoState_AFTER_CUT;
+            }
+            else if( pC->bIsMMS == M4OSA_TRUE )
+            {
+                M4OSA_UInt32 currentBitrate;
+                M4OSA_ERR err = M4NO_ERROR;
+
+                /* Do we need to reencode the video to downgrade the bitrate or not ? */
+                /* Let's compute the cirrent bitrate of the current edited clip */
+                err = pC->pC1->ShellAPI.m_pReader->m_pFctGetOption(
+                    pC->pC1->pReaderContext,
+                    M4READER_kOptionID_Bitrate, &currentBitrate);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intCheckVideoMode:\
+                        Error when getting next bitrate of edited clip: 0x%x",
+                        err);
+                    return err;
+                }
+
+                /* Remove audio bitrate */
+                currentBitrate -= 12200;
+
+                /* Test if we go into copy/paste mode or into decode/encode mode */
+                if( currentBitrate > pC->uiMMSVideoBitrate )
+                {
+                    pC->Vstate = M4VSS3GPP_kEditVideoState_DECODE_ENCODE;
+                }
+                else
+                {
+                    pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+                }
+            }
+            else
+            {
+                pC->Vstate = M4VSS3GPP_kEditVideoState_READ_WRITE;
+            }
+        }
+    }
+
+    /**
+    * Check if we create an encoder */
+    if( ( ( M4VSS3GPP_kEditVideoState_READ_WRITE == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_AFTER_CUT
+        == previousVstate)) /**< read mode */
+        && (( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == pC->Vstate)
+        || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == pC->Vstate)
+        || (M4VSS3GPP_kEditVideoState_TRANSITION
+        == pC->Vstate)) /**< encode mode */
+        && pC->bIsMMS == M4OSA_FALSE )
+    {
+        /**
+        * Create the encoder */
+        err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+    else if( pC->bIsMMS == M4OSA_TRUE && pC->ewc.pEncContext == M4OSA_NULL )
+    {
+        /**
+        * Create the encoder */
+        err = M4VSS3GPP_intCreateVideoEncoder(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intCreateVideoEncoder returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * When we go from filtering to read/write, we must act like a begin cut,
+    * because the last filtered image may be different than the original image. */
+    else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_TRANSITION
+        == previousVstate)) /**< encode mode */
+        && (M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate) /**< read mode */
+        )
+    {
+        pC->Vstate = M4VSS3GPP_kEditVideoState_BEGIN_CUT;
+    }
+
+    /**
+    * Check if we destroy an encoder */
+    else if( ( ( M4VSS3GPP_kEditVideoState_DECODE_ENCODE == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_BEGIN_CUT == previousVstate)
+        || (M4VSS3GPP_kEditVideoState_TRANSITION
+        == previousVstate)) /**< encode mode */
+        && (( M4VSS3GPP_kEditVideoState_READ_WRITE == pC->Vstate)
+        || (M4VSS3GPP_kEditVideoState_AFTER_CUT
+        == pC->Vstate)) /**< read mode */
+        && pC->bIsMMS == M4OSA_FALSE )
+    {
+        /**
+        * Destroy the previously created encoder */
+        err = M4VSS3GPP_intDestroyVideoEncoder(pC);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCheckVideoMode: M4VSS3GPP_intDestroyVideoEncoder returns 0x%x!",
+                err);
+            return err;
+        }
+    }
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCheckVideoMode: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intStartAU()
+ * @brief    StartAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param    pContext: (IN) It is the VSS 3GPP context in our case
+ * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param    pAU:      (IN/OUT) Access Unit to be prepared.
+ * @return    M4NO_ERROR: there is no error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intStartAU( M4WRITER_Context pContext,
+                               M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32 uiMaxAuSize;
+
+    /**
+    * Given context is actually the VSS3GPP context */
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    /**
+    * Get the output AU to write into */
+    err = pC->ShellAPI.pWriterDataFcts->pStartAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intStartAU: pWriterDataFcts->pStartAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intStartAU: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intProcessAU()
+ * @brief    ProcessAU writer-like interface used for the VSS 3GPP only
+ * @note
+ * @param    pContext: (IN) It is the VSS 3GPP context in our case
+ * @param    streamID: (IN) Id of the stream to which the Access Unit is related.
+ * @param    pAU:      (IN/OUT) Access Unit to be written
+ * @return    M4NO_ERROR: there is no error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intProcessAU( M4WRITER_Context pContext,
+                                 M4SYS_StreamID streamID, M4SYS_AccessUnit *pAU )
+{
+    M4OSA_ERR err;
+
+    /**
+    * Given context is actually the VSS3GPP context */
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+
+    /**
+    * Fix the encoded AU time */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    pC->ewc.dOutputVidCts = pAU->CTS;
+    /**
+    * Update time info for the Counter Time System to be equal to the bit-stream time */
+    M4VSS3GPP_intUpdateTimeInfo(pC, pAU);
+
+    /**
+    * Write the AU */
+    err = pC->ShellAPI.pWriterDataFcts->pProcessAU(pC->ewc.p3gpWriterContext,
+        M4VSS3GPP_WRITER_VIDEO_STREAM_ID, pAU);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intProcessAU: pWriterDataFcts->pProcessAU(Video) returns 0x%x!",
+            err);
+        return err;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intProcessAU: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVPP()
+ * @brief    We implement our own VideoPreProcessing function
+ * @note    It is called by the video encoder
+ * @param    pContext    (IN) VPP context, which actually is the VSS 3GPP context in our case
+ * @param    pPlaneIn    (IN)
+ * @param    pPlaneOut    (IN/OUT) Pointer to an array of 3 planes that will contain the output
+ *                                  YUV420 image
+ * @return    M4NO_ERROR:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intVPP( M4VPP_Context pContext, M4VIFI_ImagePlane *pPlaneIn,
+                           M4VIFI_ImagePlane *pPlaneOut )
+{
+    M4OSA_ERR err;
+    M4_MediaTime t;
+    M4VIFI_ImagePlane *pTmp = M4OSA_NULL;
+    M4VIFI_ImagePlane pTemp1[3],pTemp2[3];
+    M4OSA_UInt32  i =0;
+    /**
+    * VPP context is actually the VSS3GPP context */
+    M4VSS3GPP_InternalEditContext *pC =
+        (M4VSS3GPP_InternalEditContext *)pContext;
+    pTemp1[0].pac_data = pTemp2[0].pac_data = M4OSA_NULL;
+    /**
+    * Reset VPP error remembered in context */
+    pC->ewc.VppError = M4NO_ERROR;
+
+    /**
+    * At the end of the editing, we may be called when no more clip is loaded.
+    * (because to close the encoder properly it must be stepped one or twice...) */
+    if( M4OSA_NULL == pC->pC1 )
+    {
+        /**
+        * We must fill the input of the encoder with a dummy image, because
+        * encoding noise leads to a huge video AU, and thus a writer buffer overflow. */
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[0].pac_data,
+            pPlaneOut[0].u_stride * pPlaneOut[0].u_height, 0);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[1].pac_data,
+            pPlaneOut[1].u_stride * pPlaneOut[1].u_height, 0);
+        M4OSA_memset((M4OSA_MemAddr8)pPlaneOut[2].pac_data,
+            pPlaneOut[2].u_stride * pPlaneOut[2].u_height, 0);
+
+        M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR (abort)");
+        return M4NO_ERROR;
+    }
+
+    /**
+    **************** Transition case ****************/
+    if( M4OSA_TRUE == pC->bTransitionEffect )
+    {
+        if (M4OSA_NULL == pTemp1[0].pac_data)
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pTemp1, pC->ewc.uiVideoWidth,
+                                              pC->ewc.uiVideoHeight);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(1) returns 0x%x, \
+                               returning M4NO_ERROR", err);
+                pC->ewc.VppError = err;
+                return M4NO_ERROR; /**< Return no error to the encoder core
+                                   (else it may leak in some situations...) */
+            }
+        }
+        if (M4OSA_NULL == pTemp2[0].pac_data)
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pTemp2, pC->ewc.uiVideoWidth,
+                                              pC->ewc.uiVideoHeight);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(2) returns 0x%x, \
+                               returning M4NO_ERROR", err);
+                pC->ewc.VppError = err;
+                return M4NO_ERROR; /**< Return no error to the encoder core
+                                  (else it may leak in some situations...) */
+            }
+        }
+        /**
+        * We need two intermediate planes */
+        if( M4OSA_NULL == pC->yuv1[0].pac_data )
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth,
+                pC->ewc.uiVideoHeight);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+
+        if( M4OSA_NULL == pC->yuv2[0].pac_data )
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pC->yuv2, pC->ewc.uiVideoWidth,
+                pC->ewc.uiVideoHeight);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+
+        /**
+        * Allocate new temporary plane if needed */
+        if( M4OSA_NULL == pC->yuv3[0].pac_data )
+        {
+            err = M4VSS3GPP_intAllocateYUV420(pC->yuv3, pC->ewc.uiVideoWidth,
+                pC->ewc.uiVideoHeight);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420(3) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+
+        /**
+        * Compute the time in the clip1 base: t = to - Offset */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        t = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
+
+        /**
+        * Render Clip1 */
+        if( pC->pC1->isRenderDup == M4OSA_FALSE )
+        {
+            if(pC->nbActiveEffects > 0)
+            {
+                err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt,
+                                                                      &t, pTemp1,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->bIssecondClip = M4OSA_FALSE;
+                err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp1 ,pC->yuv1 );
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->pC1->lastDecodedPlane = pTemp1;
+            }
+            else
+            {
+                err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC1->pViDecCtxt,
+                                                                      &t, pC->yuv1,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                      (else it may leak in some situations...) */
+                }
+                pC->pC1->lastDecodedPlane = pC->yuv1;
+            }
+            pC->pC1->iVideoRenderCts = (M4OSA_Int32)t;
+        }
+        else
+        {
+            /* Copy last decoded plane to output plane */
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data,
+                (pTmp[0].u_height * pTmp[0].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data,
+                (pTmp[1].u_height * pTmp[1].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data,
+                (pTmp[2].u_height * pTmp[2].u_width));
+            pC->pC1->lastDecodedPlane = pTmp;
+        }
+
+        /**
+        * Compute the time in the clip2 base: t = to - Offset */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        t = pC->ewc.dInputVidCts - pC->pC2->iVoffset;
+        /**
+        * Render Clip2 */
+        if( pC->pC2->isRenderDup == M4OSA_FALSE )
+        {
+            if(pC->nbActiveEffects1 > 0)
+            {
+                err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt,
+                                                                      &t, pTemp2,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \
+                                   returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+
+                pC->bIssecondClip = M4OSA_TRUE;
+                err = M4VSS3GPP_intApplyVideoEffect(pC, pTemp2 ,pC->yuv2);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->pC2->lastDecodedPlane = pTemp2;
+            }
+            else
+            {
+                err = pC->pC2->ShellAPI.m_pVideoDecoder->m_pFctRender(pC->pC2->pViDecCtxt,
+                                                                      &t, pC->yuv2,
+                                                                      M4OSA_TRUE);
+                if (M4NO_ERROR != err)
+                {
+                    M4OSA_TRACE1_1("M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender(C2) returns 0x%x, \
+                                    returning M4NO_ERROR", err);
+                    pC->ewc.VppError = err;
+                    return M4NO_ERROR; /**< Return no error to the encoder core
+                                       (else it may leak in some situations...) */
+                }
+                pC->pC2->lastDecodedPlane = pC->yuv2;
+            }
+            pC->pC2->iVideoRenderCts = (M4OSA_Int32)t;
+        }
+        else
+        {
+            /* Copy last decoded plane to output plane */
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data,
+                (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[0].pac_data,
+                (pTmp[0].u_height * pTmp[0].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data,
+                (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[1].pac_data,
+                (pTmp[1].u_height * pTmp[1].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data,
+                (M4OSA_MemAddr8)pC->pC2->lastDecodedPlane[2].pac_data,
+                (pTmp[2].u_height * pTmp[2].u_width));
+            pC->pC2->lastDecodedPlane = pTmp;
+        }
+
+
+        pTmp = pPlaneOut;
+        err = M4VSS3GPP_intVideoTransition(pC, pTmp);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intVPP: M4VSS3GPP_intVideoTransition returns 0x%x,\
+                returning M4NO_ERROR",
+                err);
+            pC->ewc.VppError = err;
+            return  M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+        }
+        for (i=0; i < 3; i++)
+        {
+            if (pTemp2[i].pac_data != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pTemp2[i].pac_data);
+                pTemp2[i].pac_data = M4OSA_NULL;
+            }
+
+
+            if (pTemp1[i].pac_data != M4OSA_NULL)
+            {
+                    M4OSA_free((M4OSA_MemAddr32)pTemp1[i].pac_data);
+                    pTemp1[i].pac_data = M4OSA_NULL;
+                }
+            }
+    }
+    /**
+    **************** No Transition case ****************/
+    else
+    {
+        /**
+        * Check if there is a filter */
+        if( pC->nbActiveEffects > 0 )
+        {
+            /**
+            * If we do modify the image, we need an intermediate image plane */
+            if( M4OSA_NULL == pC->yuv1[0].pac_data )
+            {
+                err =
+                    M4VSS3GPP_intAllocateYUV420(pC->yuv1, pC->ewc.uiVideoWidth,
+                    pC->ewc.uiVideoHeight);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intVPP: M4VSS3GPP_intAllocateYUV420 returns 0x%x,\
+                        returning M4NO_ERROR",
+                        err);
+                    pC->ewc.VppError = err;
+                    return
+                        M4NO_ERROR; /**< Return no error to the encoder core
+                                    (else it may leak in some situations...) */
+                }
+            }
+            /**
+            * The image is rendered in the intermediate image plane */
+            pTmp = pC->yuv1;
+        }
+        else
+        {
+            /**
+            * No filter, the image is directly rendered in pPlaneOut */
+            pTmp = pPlaneOut;
+        }
+
+        /**
+        * Compute the time in the clip base: t = to - Offset */
+        // Decorrelate input and output encoding timestamp to handle encoder prefetch
+        t = pC->ewc.dInputVidCts - pC->pC1->iVoffset;
+
+        if( pC->pC1->isRenderDup == M4OSA_FALSE )
+        {
+            err = pC->pC1->ShellAPI.m_pVideoDecoder->m_pFctRender(
+                pC->pC1->pViDecCtxt, &t, pTmp, M4OSA_TRUE);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: m_pVideoDecoder->m_pFctRender returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+            pC->pC1->lastDecodedPlane = pTmp;
+            pC->pC1->iVideoRenderCts = (M4OSA_Int32)t;
+        }
+        else
+        {
+            /* Copy last decoded plane to output plane */
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[0].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[0].pac_data,
+                (pTmp[0].u_height * pTmp[0].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[1].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[1].pac_data,
+                (pTmp[1].u_height * pTmp[1].u_width));
+            M4OSA_memcpy((M4OSA_MemAddr8)pTmp[2].pac_data,
+                (M4OSA_MemAddr8)pC->pC1->lastDecodedPlane[2].pac_data,
+                (pTmp[2].u_height * pTmp[2].u_width));
+            pC->pC1->lastDecodedPlane = pTmp;
+        }
+
+        M4OSA_TRACE3_1("M4VSS3GPP_intVPP: Rendered at CTS %.3f", t);
+
+        /**
+        * Apply the clip1 effect */
+        //        if (pC->iClip1ActiveEffect >= 0)
+        if( pC->nbActiveEffects > 0 )
+        {
+            err = M4VSS3GPP_intApplyVideoEffect(pC,/*1,*/ pC->yuv1, pPlaneOut);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVPP: M4VSS3GPP_intApplyVideoEffect(1) returns 0x%x,\
+                    returning M4NO_ERROR",
+                    err);
+                pC->ewc.VppError = err;
+                return
+                    M4NO_ERROR; /**< Return no error to the encoder core
+                                (else it may leak in some situations...) */
+            }
+        }
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intVPP: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intApplyVideoEffect()
+ * @brief    Apply video effect from pPlaneIn to pPlaneOut
+ * @param   pC                (IN/OUT) Internal edit context
+ * @param   uiClip1orClip2    (IN/OUT) 1 for first clip, 2 for second clip
+ * @param    pInputPlanes    (IN) Input raw YUV420 image
+ * @param    pOutputPlanes    (IN/OUT) Output raw YUV420 image
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intApplyVideoEffect( M4VSS3GPP_InternalEditContext *pC,
+                               M4VIFI_ImagePlane *pPlaneIn,
+                               M4VIFI_ImagePlane *pPlaneOut )
+{
+    M4OSA_ERR err;
+
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4VFL_CurtainParam curtainParams;
+    M4VSS3GPP_ExternalProgress extProgress;
+
+    M4OSA_Double VideoEffectTime;
+    M4OSA_Double PercentageDone;
+    M4OSA_Int32 tmp;
+
+    M4VIFI_ImagePlane *pPlaneTempIn;
+    M4VIFI_ImagePlane *pPlaneTempOut;
+    M4OSA_UInt8 i;
+    M4OSA_UInt8 NumActiveEffects =0;
+
+
+    pClip = pC->pC1;
+    if (pC->bIssecondClip == M4OSA_TRUE)
+    {
+        NumActiveEffects = pC->nbActiveEffects1;
+    }
+    else
+    {
+        NumActiveEffects = pC->nbActiveEffects;
+    }
+
+    /**
+    * Allocate temporary plane if needed RC */
+    if (M4OSA_NULL == pC->yuv4[0].pac_data && NumActiveEffects  > 1)
+    {
+        err = M4VSS3GPP_intAllocateYUV420(pC->yuv4, pC->ewc.uiVideoWidth,
+            pC->ewc.uiVideoHeight);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intApplyVideoEffect: M4VSS3GPP_intAllocateYUV420(4) returns 0x%x,\
+                returning M4NO_ERROR",
+                err);
+            pC->ewc.VppError = err;
+            return
+                M4NO_ERROR; /**< Return no error to the encoder core
+                            (else it may leak in some situations...) */
+        }
+    }
+
+    if (NumActiveEffects  % 2 == 0)
+    {
+        pPlaneTempIn = pPlaneIn;
+        pPlaneTempOut = pC->yuv4;
+    }
+    else
+    {
+        pPlaneTempIn = pPlaneIn;
+        pPlaneTempOut = pPlaneOut;
+    }
+
+    for (i=0; i<NumActiveEffects; i++)
+    {
+        if (pC->bIssecondClip == M4OSA_TRUE)
+        {
+
+
+            pFx = &(pC->pEffectsList[pC->pActiveEffectsList1[i]]);
+            /* Compute how far from the beginning of the effect we are, in clip-base time. */
+            // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) +
+                              pC->pTransitionList[pC->uiCurrentClip].
+                              uiTransitionDuration- pFx->uiStartTime;
+        }
+        else
+        {
+            pFx = &(pC->pEffectsList[pC->pActiveEffectsList[i]]);
+            /* Compute how far from the beginning of the effect we are, in clip-base time. */
+            // Decorrelate input and output encoding timestamp to handle encoder prefetch
+            VideoEffectTime = ((M4OSA_Int32)pC->ewc.dInputVidCts) - pFx->uiStartTime;
+        }
+
+
+
+        /* To calculate %, substract timeIncrement because effect should finish on the last frame*/
+        /* which is presented from CTS = eof-timeIncrement till CTS = eof */
+        PercentageDone = VideoEffectTime
+            / ((M4OSA_Float)pFx->uiDuration/*- pC->dOutputFrameDuration*/);
+
+        if( PercentageDone < 0.0 )
+            PercentageDone = 0.0;
+
+        if( PercentageDone > 1.0 )
+            PercentageDone = 1.0;
+
+        switch( pFx->VideoEffectType )
+        {
+            case M4VSS3GPP_kVideoEffectType_FadeFromBlack:
+                /**
+                * Compute where we are in the effect (scale is 0->1024). */
+                tmp = (M4OSA_Int32)(PercentageDone * 1024);
+
+                /**
+                * Apply the darkening effect */
+                err =
+                    M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect:\
+                        M4VFL_modifyLumaWithScale returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+                }
+                break;
+
+            case M4VSS3GPP_kVideoEffectType_CurtainOpening:
+                /**
+                * Compute where we are in the effect (scale is 0->height).
+                * It is done with floats because tmp x height can be very large
+                (with long clips).*/
+                curtainParams.nb_black_lines =
+                    (M4OSA_UInt16)(( 1.0 - PercentageDone)
+                    * pPlaneTempIn[0].u_height);
+                /**
+                * The curtain is hanged on the ceiling */
+                curtainParams.top_is_black = 1;
+
+                /**
+                * Apply the curtain effect */
+                err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, &curtainParams,
+                    M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR;
+                }
+                break;
+
+            case M4VSS3GPP_kVideoEffectType_FadeToBlack:
+                /**
+                * Compute where we are in the effect (scale is 0->1024) */
+                tmp = (M4OSA_Int32)(( 1.0 - PercentageDone) * 1024);
+
+                /**
+                * Apply the darkening effect */
+                err =
+                    M4VFL_modifyLumaWithScale((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, tmp, M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect:\
+                        M4VFL_modifyLumaWithScale returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+                }
+                break;
+
+            case M4VSS3GPP_kVideoEffectType_CurtainClosing:
+                /**
+                * Compute where we are in the effect (scale is 0->height) */
+                curtainParams.nb_black_lines =
+                    (M4OSA_UInt16)(PercentageDone * pPlaneTempIn[0].u_height);
+
+                /**
+                * The curtain is hanged on the ceiling */
+                curtainParams.top_is_black = 1;
+
+                /**
+                * Apply the curtain effect */
+                err = M4VFL_applyCurtain((M4ViComImagePlane *)pPlaneTempIn,
+                    (M4ViComImagePlane *)pPlaneTempOut, &curtainParams,
+                    M4OSA_NULL);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect: M4VFL_applyCurtain returns error 0x%x,\
+                        returning M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR",
+                        err);
+                    return M4VSS3GPP_ERR_CURTAIN_FILTER_ERROR;
+                }
+                break;
+
+            default:
+                if( pFx->VideoEffectType
+                    >= M4VSS3GPP_kVideoEffectType_External )
+                {
+                    M4OSA_UInt32 Cts = 0;
+                    M4OSA_Int32 nextEffectTime;
+
+                    /**
+                    * Compute where we are in the effect (scale is 0->1000) */
+                    tmp = (M4OSA_Int32)(PercentageDone * 1000);
+
+                    /**
+                    * Set the progress info provided to the external function */
+                    extProgress.uiProgress = (M4OSA_UInt32)tmp;
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
+                    extProgress.uiClipTime = extProgress.uiOutputTime - pClip->iVoffset;
+                    extProgress.bIsLast = M4OSA_FALSE;
+                    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                    nextEffectTime = (M4OSA_Int32)(pC->ewc.dInputVidCts \
+                        + pC->dOutputFrameDuration);
+                    if(nextEffectTime >= (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration))
+                    {
+                        extProgress.bIsLast = M4OSA_TRUE;
+                    }
+
+                    err = pFx->ExtVideoEffectFct(pFx->pExtVideoEffectFctCtxt,
+                        pPlaneTempIn, pPlaneTempOut, &extProgress,
+                        pFx->VideoEffectType
+                        - M4VSS3GPP_kVideoEffectType_External);
+
+                    if( M4NO_ERROR != err )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_intApplyVideoEffect: \
+                            External video effect function returns 0x%x!",
+                            err);
+                        return err;
+                    }
+                    break;
+                }
+                else
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intApplyVideoEffect: unknown effect type (0x%x),\
+                        returning M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE",
+                        pFx->VideoEffectType);
+                    return M4VSS3GPP_ERR_INVALID_VIDEO_EFFECT_TYPE;
+                }
+        }
+        /**
+        * RC Updates pTempPlaneIn and pTempPlaneOut depending on current effect */
+        if (((i % 2 == 0) && (NumActiveEffects  % 2 == 0))
+            || ((i % 2 != 0) && (NumActiveEffects % 2 != 0)))
+        {
+            pPlaneTempIn = pC->yuv4;
+            pPlaneTempOut = pPlaneOut;
+        }
+        else
+        {
+            pPlaneTempIn = pPlaneOut;
+            pPlaneTempOut = pC->yuv4;
+        }
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intApplyVideoEffect: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intVideoTransition()
+ * @brief    Apply video transition effect pC1+pC2->pPlaneOut
+ * @param   pC                (IN/OUT) Internal edit context
+ * @param    pOutputPlanes    (IN/OUT) Output raw YUV420 image
+ * @return    M4NO_ERROR:                        No error
+ ******************************************************************************
+ */
+static M4OSA_ERR
+M4VSS3GPP_intVideoTransition( M4VSS3GPP_InternalEditContext *pC,
+                             M4VIFI_ImagePlane *pPlaneOut )
+{
+    M4OSA_ERR err;
+    M4OSA_Int32 iProgress;
+    M4VSS3GPP_ExternalProgress extProgress;
+    M4VIFI_ImagePlane *pPlane;
+    M4OSA_Int32 i;
+    const M4OSA_Int32 iDur = (M4OSA_Int32)pC->
+        pTransitionList[pC->uiCurrentClip].uiTransitionDuration;
+
+    /**
+    * Compute how far from the end cut we are, in clip-base time.
+    * It is done with integers because the offset and begin cut have been rounded already. */
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    iProgress = (M4OSA_Int32)((M4OSA_Double)pC->pC1->iEndTime) - pC->ewc.dInputVidCts +
+        ((M4OSA_Double)pC->pC1->iVoffset);
+    /**
+    * We must remove the duration of one frame, else we would almost never reach the end
+    * (It's kind of a "pile and intervals" issue). */
+    iProgress -= (M4OSA_Int32)pC->dOutputFrameDuration;
+
+    if( iProgress < 0 ) /**< Sanity checks */
+    {
+        iProgress = 0;
+    }
+
+    /**
+    * Compute where we are in the transition, on a base 1000 */
+    iProgress = ( ( iDur - iProgress) * 1000) / iDur;
+
+    /**
+    * Sanity checks */
+    if( iProgress < 0 )
+    {
+        iProgress = 0;
+    }
+    else if( iProgress > 1000 )
+    {
+        iProgress = 1000;
+    }
+
+    switch( pC->pTransitionList[pC->uiCurrentClip].TransitionBehaviour )
+    {
+        case M4VSS3GPP_TransitionBehaviour_SpeedUp:
+            iProgress = ( iProgress * iProgress) / 1000;
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_Linear:
+            /*do nothing*/
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_SpeedDown:
+            iProgress = (M4OSA_Int32)(sqrt(iProgress * 1000));
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_SlowMiddle:
+            if( iProgress < 500 )
+            {
+                iProgress = (M4OSA_Int32)(sqrt(iProgress * 500));
+            }
+            else
+            {
+                iProgress =
+                    (M4OSA_Int32)(( ( ( iProgress - 500) * (iProgress - 500))
+                    / 500) + 500);
+            }
+            break;
+
+        case M4VSS3GPP_TransitionBehaviour_FastMiddle:
+            if( iProgress < 500 )
+            {
+                iProgress = (M4OSA_Int32)(( iProgress * iProgress) / 500);
+            }
+            else
+            {
+                iProgress = (M4OSA_Int32)(sqrt(( iProgress - 500) * 500) + 500);
+            }
+            break;
+
+        default:
+            /*do nothing*/
+            break;
+    }
+
+    switch( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType )
+    {
+        case M4VSS3GPP_kVideoTransitionType_CrossFade:
+            /**
+            * Apply the transition effect */
+            err = M4VIFI_ImageBlendingonYUV420(M4OSA_NULL,
+                (M4ViComImagePlane *)pC->yuv1,
+                (M4ViComImagePlane *)pC->yuv2,
+                (M4ViComImagePlane *)pPlaneOut, iProgress);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVideoTransition:\
+                     M4VIFI_ImageBlendingonYUV420 returns error 0x%x,\
+                    returning M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR",
+                    err);
+                return M4VSS3GPP_ERR_TRANSITION_FILTER_ERROR;
+            }
+            break;
+
+        case M4VSS3GPP_kVideoTransitionType_None:
+            /**
+            * This is a stupid-non optimized version of the None transition...
+            * We copy the YUV frame */
+            if( iProgress < 500 ) /**< first half of transition */
+            {
+                pPlane = pC->yuv1;
+            }
+            else /**< second half of transition */
+            {
+                pPlane = pC->yuv2;
+            }
+            /**
+            * Copy the input YUV frames */
+            i = 3;
+
+            while( i-- > 0 )
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)pPlaneOut[i].pac_data,
+                 (M4OSA_MemAddr8)pPlane[i].pac_data,
+                    pPlaneOut[i].u_stride * pPlaneOut[i].u_height);
+            }
+            break;
+
+        default:
+            if( pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType
+                >= M4VSS3GPP_kVideoTransitionType_External )
+            {
+                /**
+                * Set the progress info provided to the external function */
+                extProgress.uiProgress = (M4OSA_UInt32)iProgress;
+                // Decorrelate input and output encoding timestamp to handle encoder prefetch
+                extProgress.uiOutputTime = (M4OSA_UInt32)pC->ewc.dInputVidCts;
+                extProgress.uiClipTime = extProgress.uiOutputTime - pC->pC1->iVoffset;
+
+                err = pC->pTransitionList[pC->
+                    uiCurrentClip].ExtVideoTransitionFct(
+                    pC->pTransitionList[pC->
+                    uiCurrentClip].pExtVideoTransitionFctCtxt,
+                    pC->yuv1, pC->yuv2, pPlaneOut, &extProgress,
+                    pC->pTransitionList[pC->
+                    uiCurrentClip].VideoTransitionType
+                    - M4VSS3GPP_kVideoTransitionType_External);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intVideoTransition:\
+                        External video transition function returns 0x%x!",
+                        err);
+                    return err;
+                }
+                break;
+            }
+            else
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intVideoTransition: unknown transition type (0x%x),\
+                    returning M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE",
+                    pC->pTransitionList[pC->uiCurrentClip].VideoTransitionType);
+                return M4VSS3GPP_ERR_INVALID_VIDEO_TRANSITION_TYPE;
+            }
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intVideoTransition: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intUpdateTimeInfo()
+ * @brief    Update bit stream time info by Counter Time System to be compliant with
+ *          players using bit stream time info
+ * @note    H263 uses an absolute time counter unlike MPEG4 which uses Group Of Vops
+ *          (GOV, see the standard)
+ * @param   pC                    (IN/OUT) returns time updated video AU,
+ *                                the offset between system and video time (MPEG4 only)
+ *                                and the state of the current clip (MPEG4 only)
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void
+M4VSS3GPP_intUpdateTimeInfo( M4VSS3GPP_InternalEditContext *pC,
+                            M4SYS_AccessUnit *pAU )
+{
+    M4OSA_UInt8 uiTmp;
+    M4OSA_UInt32 uiCts = 0;
+    M4OSA_MemAddr8 pTmp;
+    M4OSA_UInt32 uiAdd;
+    M4OSA_UInt32 uiCurrGov;
+    M4OSA_Int8 iDiff;
+
+    M4VSS3GPP_ClipContext *pClipCtxt = pC->pC1;
+    M4OSA_Int32 *pOffset = &(pC->ewc.iMpeg4GovOffset);
+
+    /**
+    * Set H263 time counter from system time */
+    if( M4SYS_kH263 == pAU->stream->streamType )
+    {
+        uiTmp = (M4OSA_UInt8)((M4OSA_UInt32)( ( pAU->CTS * 30) / 1001 + 0.5)
+            % M4VSS3GPP_EDIT_H263_MODULO_TIME);
+        M4VSS3GPP_intSetH263TimeCounter((M4OSA_MemAddr8)(pAU->dataAddress),
+            uiTmp);
+    }
+    /*
+    * Set MPEG4 GOV time counter regarding video and system time */
+    else if( M4SYS_kMPEG_4 == pAU->stream->streamType )
+    {
+        /*
+        * If GOV.
+        * beware of little/big endian! */
+        /* correction: read 8 bits block instead of one 32 bits block */
+        M4OSA_UInt8 *temp8 = (M4OSA_UInt8 *)(pAU->dataAddress);
+        M4OSA_UInt32 temp32 = 0;
+
+        temp32 = ( 0x000000ff & (M4OSA_UInt32)(*temp8))
+            + (0x0000ff00 & ((M4OSA_UInt32)(*(temp8 + 1))) << 8)
+            + (0x00ff0000 & ((M4OSA_UInt32)(*(temp8 + 2))) << 16)
+            + (0xff000000 & ((M4OSA_UInt32)(*(temp8 + 3))) << 24);
+
+        M4OSA_TRACE3_2("RC: Temp32: 0x%x, dataAddress: 0x%x\n", temp32,
+            *(pAU->dataAddress));
+
+        if( M4VSS3GPP_EDIT_GOV_HEADER == temp32 )
+        {
+            pTmp =
+                (M4OSA_MemAddr8)(pAU->dataAddress
+                + 1); /**< Jump to the time code (just after the 32 bits header) */
+            uiAdd = (M4OSA_UInt32)(pAU->CTS)+( *pOffset);
+
+            switch( pClipCtxt->bMpeg4GovState )
+            {
+                case M4OSA_FALSE: /*< INIT */
+                    {
+                        /* video time = ceil (system time + offset) */
+                        uiCts = ( uiAdd + 999) / 1000;
+
+                        /* offset update */
+                        ( *pOffset) += (( uiCts * 1000) - uiAdd);
+
+                        /* Save values */
+                        pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
+
+                        /* State to 'first' */
+                        pClipCtxt->bMpeg4GovState = M4OSA_TRUE;
+                    }
+                    break;
+
+                case M4OSA_TRUE: /*< UPDATE */
+                    {
+                        /* Get current Gov value */
+                        M4VSS3GPP_intGetMPEG4Gov(pTmp, &uiCurrGov);
+
+                        /* video time = floor or ceil (system time + offset) */
+                        uiCts = (uiAdd / 1000);
+                        iDiff = (M4OSA_Int8)(uiCurrGov
+                            - pClipCtxt->uiMpeg4PrevGovValueGet - uiCts
+                            + pClipCtxt->uiMpeg4PrevGovValueSet);
+
+                        /* ceiling */
+                        if( iDiff > 0 )
+                        {
+                            uiCts += (M4OSA_UInt32)(iDiff);
+
+                            /* offset update */
+                            ( *pOffset) += (( uiCts * 1000) - uiAdd);
+                        }
+
+                        /* Save values */
+                        pClipCtxt->uiMpeg4PrevGovValueGet = uiCurrGov;
+                        pClipCtxt->uiMpeg4PrevGovValueSet = uiCts;
+                    }
+                    break;
+            }
+
+            M4VSS3GPP_intSetMPEG4Gov(pTmp, uiCts);
+        }
+    }
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intCheckVideoEffects()
+ * @brief    Check which video effect must be applied at the current time
+ ******************************************************************************
+ */
+static M4OSA_Void
+M4VSS3GPP_intCheckVideoEffects( M4VSS3GPP_InternalEditContext *pC,
+                               M4OSA_UInt8 uiClipNumber )
+{
+    M4OSA_UInt8 uiClipIndex;
+    M4OSA_UInt8 uiFxIndex, i;
+    M4VSS3GPP_ClipContext *pClip;
+    M4VSS3GPP_EffectSettings *pFx;
+    M4OSA_Int32 Off, BC, EC;
+    // Decorrelate input and output encoding timestamp to handle encoder prefetch
+    M4OSA_Int32 t = (M4OSA_Int32)pC->ewc.dInputVidCts;
+
+    uiClipIndex = pC->uiCurrentClip;
+    pClip = pC->pC1;
+    /**
+    * Shortcuts for code readability */
+    Off = pClip->iVoffset;
+    BC = pClip->iActualVideoBeginCut;
+    EC = pClip->iEndTime;
+
+    i = 0;
+
+    for ( uiFxIndex = 0; uiFxIndex < pC->nbEffects; uiFxIndex++ )
+    {
+        /** Shortcut, reverse order because of priority between effects(EndEffect always clean )*/
+        pFx = &(pC->pEffectsList[pC->nbEffects - 1 - uiFxIndex]);
+
+        if( M4VSS3GPP_kVideoEffectType_None != pFx->VideoEffectType )
+        {
+            /**
+            * Check if there is actually a video effect */
+
+             if(uiClipNumber ==1)
+             {
+                if ((t >= (M4OSA_Int32)(pFx->uiStartTime)) &&                  /**< Are we after the start time of the effect? */
+                    (t <  (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) /**< Are we into the effect duration? */
+                    {
+                /**
+                 * Set the active effect(s) */
+                    pC->pActiveEffectsList[i] = pC->nbEffects-1-uiFxIndex;
+
+                /**
+                 * Update counter of active effects */
+                    i++;
+
+                /**
+                 * The third effect has the highest priority, then the second one, then the first one.
+                 * Hence, as soon as we found an active effect, we can get out of this loop */
+
+                }
+            }
+            else
+            {
+                if ((t + pC->pTransitionList[uiClipIndex].uiTransitionDuration >=
+                   (M4OSA_Int32)(pFx->uiStartTime)) && (t + pC->pTransitionList[uiClipIndex].uiTransitionDuration
+                    <  (M4OSA_Int32)(pFx->uiStartTime + pFx->uiDuration)) ) /**< Are we into the effect duration? */
+                 {
+                /**
+                 * Set the active effect(s) */
+                    pC->pActiveEffectsList1[i] = pC->nbEffects-1-uiFxIndex;
+
+                /**
+                 * Update counter of active effects */
+                    i++;
+
+                /**
+                 * The third effect has the highest priority, then the second one, then the first one.
+                 * Hence, as soon as we found an active effect, we can get out of this loop */
+                }
+
+
+            }
+
+        }
+    }
+
+    if(1==uiClipNumber)
+    {
+    /**
+     * Save number of active effects */
+        pC->nbActiveEffects = i;
+    }
+    else
+    {
+        pC->nbActiveEffects1 = i;
+    }
+
+    /**
+    * Change the absolut time to clip related time */
+    t -= Off;
+
+    /**
+    * Check if we are on the begin cut (for clip1 only) */
+    if( ( 0 != BC) && (t == BC) && (1 == uiClipNumber) )
+    {
+        pC->bClip1AtBeginCut = M4OSA_TRUE;
+    }
+    else
+    {
+        pC->bClip1AtBeginCut = M4OSA_FALSE;
+    }
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder()
+ * @brief    Creates the video encoder
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intCreateVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err;
+    M4ENCODER_AdvancedParams EncParams;
+
+    /**
+    * Simulate a writer interface with our specific function */
+    pC->ewc.OurWriterDataInterface.pProcessAU =
+        M4VSS3GPP_intProcessAU; /**< This function is VSS 3GPP specific,
+                                but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pStartAU =
+        M4VSS3GPP_intStartAU; /**< This function is VSS 3GPP specific,
+                              but it follow the writer interface */
+    pC->ewc.OurWriterDataInterface.pWriterContext =
+        (M4WRITER_Context)
+        pC; /**< We give the internal context as writer context */
+
+    /**
+    * Get the encoder interface, if not already done */
+    if( M4OSA_NULL == pC->ShellAPI.pVideoEncoderGlobalFcts )
+    {
+        err = M4VSS3GPP_setCurrentVideoEncoder(&pC->ShellAPI,
+            pC->ewc.VideoStreamType);
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateVideoEncoder: setCurrentEncoder returns 0x%x",
+            err);
+        M4ERR_CHECK_RETURN(err);
+    }
+
+    /**
+    * Set encoder shell parameters according to VSS settings */
+
+    /* Common parameters */
+    EncParams.InputFormat = M4ENCODER_kIYUV420;
+    EncParams.FrameWidth = pC->ewc.uiVideoWidth;
+    EncParams.FrameHeight = pC->ewc.uiVideoHeight;
+    EncParams.uiTimeScale = pC->ewc.uiVideoTimeScale;
+
+    if( pC->bIsMMS == M4OSA_FALSE )
+    {
+        /* No strict regulation in video editor */
+        /* Because of the effects and transitions we should allow more flexibility */
+        /* Also it prevents to drop important frames (with a bad result on sheduling and
+        block effetcs) */
+        EncParams.bInternalRegulation = M4OSA_FALSE;
+        // Variable framerate is not supported by StageFright encoders
+        EncParams.FrameRate = M4ENCODER_k30_FPS;
+    }
+    else
+    {
+        /* In case of MMS mode, we need to enable bitrate regulation to be sure */
+        /* to reach the targeted output file size */
+        EncParams.bInternalRegulation = M4OSA_TRUE;
+        EncParams.FrameRate = pC->MMSvideoFramerate;
+    }
+
+    /**
+    * Other encoder settings (defaults) */
+    EncParams.uiHorizontalSearchRange = 0;     /* use default */
+    EncParams.uiVerticalSearchRange = 0;       /* use default */
+    EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+    EncParams.uiIVopPeriod = 0;                /* use default */
+    EncParams.uiMotionEstimationTools = 0;     /* M4V_MOTION_EST_TOOLS_ALL */
+    EncParams.bAcPrediction = M4OSA_TRUE;      /* use AC prediction */
+    EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+    EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+
+    switch ( pC->ewc.VideoStreamType )
+    {
+        case M4SYS_kH263:
+
+            EncParams.Format = M4ENCODER_kH263;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            break;
+
+        case M4SYS_kMPEG_4:
+
+            EncParams.Format = M4ENCODER_kMPEG4;
+
+            EncParams.uiStartingQuantizerValue = 8;
+            EncParams.uiRateFactor = (M4OSA_UInt8)(( pC->dOutputFrameDuration
+                * pC->ewc.uiVideoTimeScale) / 1000.0 + 0.5);
+
+            if( EncParams.uiRateFactor == 0 )
+                EncParams.uiRateFactor = 1; /* default */
+
+            if( M4OSA_FALSE == pC->ewc.bVideoDataPartitioning )
+            {
+                EncParams.bErrorResilience = M4OSA_FALSE;
+                EncParams.bDataPartitioning = M4OSA_FALSE;
+            }
+            else
+            {
+                EncParams.bErrorResilience = M4OSA_TRUE;
+                EncParams.bDataPartitioning = M4OSA_TRUE;
+            }
+            break;
+
+        case M4SYS_kH264:
+            M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: M4SYS_H264");
+
+            EncParams.Format = M4ENCODER_kH264;
+
+            EncParams.uiStartingQuantizerValue = 10;
+            EncParams.uiRateFactor = 1; /* default */
+
+            EncParams.bErrorResilience = M4OSA_FALSE;
+            EncParams.bDataPartitioning = M4OSA_FALSE;
+            //EncParams.FrameRate = M4VIDEOEDITING_k5_FPS;
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreateVideoEncoder: Unknown videoStreamType 0x%x",
+                pC->ewc.VideoStreamType);
+            return M4VSS3GPP_ERR_EDITING_UNSUPPORTED_VIDEO_FORMAT;
+    }
+
+    /* In case of EMP we overwrite certain parameters */
+    if( M4OSA_TRUE == pC->ewc.bActivateEmp )
+    {
+        EncParams.uiHorizontalSearchRange = 15;    /* set value */
+        EncParams.uiVerticalSearchRange = 15;      /* set value */
+        EncParams.bErrorResilience = M4OSA_FALSE;  /* no error resilience */
+        EncParams.uiIVopPeriod = 15; /* one I frame every 15 frames */
+        EncParams.uiMotionEstimationTools = 1; /* M4V_MOTION_EST_TOOLS_NO_4MV */
+        EncParams.bAcPrediction = M4OSA_FALSE;     /* no AC prediction */
+        EncParams.uiStartingQuantizerValue = 10;   /* initial QP = 10 */
+        EncParams.bDataPartitioning = M4OSA_FALSE; /* no data partitioning */
+    }
+
+    if( pC->bIsMMS == M4OSA_FALSE )
+    {
+        /* Compute max bitrate depending on input files bitrates and transitions */
+        if( pC->Vstate == M4VSS3GPP_kEditVideoState_TRANSITION )
+        {
+            /* Max of the two blended files */
+            if( pC->pC1->pSettings->ClipProperties.uiVideoBitrate
+                > pC->pC2->pSettings->ClipProperties.uiVideoBitrate )
+                EncParams.Bitrate =
+                pC->pC1->pSettings->ClipProperties.uiVideoBitrate;
+            else
+                EncParams.Bitrate =
+                pC->pC2->pSettings->ClipProperties.uiVideoBitrate;
+        }
+        else
+        {
+            /* Same as input file */
+            EncParams.Bitrate =
+                pC->pC1->pSettings->ClipProperties.uiVideoBitrate;
+        }
+    }
+    else
+    {
+        EncParams.Bitrate = pC->uiMMSVideoBitrate; /* RC */
+        EncParams.uiTimeScale = 0; /* We let the encoder choose the timescale */
+    }
+
+    M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctInit");
+    /**
+    * Init the video encoder (advanced settings version of the encoder Open function) */
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctInit(&pC->ewc.pEncContext,
+        &pC->ewc.OurWriterDataInterface, M4VSS3GPP_intVPP, pC,
+        pC->ShellAPI.pCurrentVideoEncoderExternalAPI,
+        pC->ShellAPI.pCurrentVideoEncoderUserData);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctInit returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+    M4OSA_TRACE1_0("M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctOpen");
+
+    err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctOpen(pC->ewc.pEncContext,
+        &pC->ewc.WriterVideoAU, &EncParams);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1(
+            "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctOpen returns 0x%x",
+            err);
+        return err;
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+    M4OSA_TRACE1_0(
+        "M4VSS3GPP_intCreateVideoEncoder: calling encoder pFctStart");
+
+    if( M4OSA_NULL != pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart )
+    {
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStart(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intCreateVideoEncoder: pVideoEncoderGlobalFcts->pFctStart returns 0x%x",
+                err);
+            return err;
+        }
+    }
+
+    pC->ewc.encoderState = M4VSS3GPP_kEncoderRunning;
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intCreateVideoEncoder: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder()
+ * @brief    Destroy the video encoder
+ * @note
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_intDestroyVideoEncoder( M4VSS3GPP_InternalEditContext *pC )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    if( M4OSA_NULL != pC->ewc.pEncContext )
+    {
+        if( M4VSS3GPP_kEncoderRunning == pC->ewc.encoderState )
+        {
+            if( pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop != M4OSA_NULL )
+            {
+                err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctStop(
+                    pC->ewc.pEncContext);
+
+                if( M4NO_ERROR != err )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4VSS3GPP_intDestroyVideoEncoder:\
+                        pVideoEncoderGlobalFcts->pFctStop returns 0x%x",
+                        err);
+                    /* Well... how the heck do you handle a failed cleanup? */
+                }
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderStopped;
+        }
+
+        /* Has the encoder actually been opened? Don't close it if that's not the case. */
+        if( M4VSS3GPP_kEncoderStopped == pC->ewc.encoderState )
+        {
+            err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctClose(
+                pC->ewc.pEncContext);
+
+            if( M4NO_ERROR != err )
+            {
+                M4OSA_TRACE1_1(
+                    "M4VSS3GPP_intDestroyVideoEncoder:\
+                    pVideoEncoderGlobalFcts->pFctClose returns 0x%x",
+                    err);
+                /* Well... how the heck do you handle a failed cleanup? */
+            }
+
+            pC->ewc.encoderState = M4VSS3GPP_kEncoderClosed;
+        }
+
+        err = pC->ShellAPI.pVideoEncoderGlobalFcts->pFctCleanup(
+            pC->ewc.pEncContext);
+
+        if( M4NO_ERROR != err )
+        {
+            M4OSA_TRACE1_1(
+                "M4VSS3GPP_intDestroyVideoEncoder:\
+                pVideoEncoderGlobalFcts->pFctCleanup returns 0x%x!",
+                err);
+            /**< We do not return the error here because we still have stuff to free */
+        }
+
+        pC->ewc.encoderState = M4VSS3GPP_kNoEncoder;
+        /**
+        * Reset variable */
+        pC->ewc.pEncContext = M4OSA_NULL;
+    }
+
+    M4OSA_TRACE3_1("M4VSS3GPP_intDestroyVideoEncoder: returning 0x%x", err);
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intSetH263TimeCounter()
+ * @brief    Modify the time counter of the given H263 video AU
+ * @note
+ * @param    pAuDataBuffer    (IN/OUT) H263 Video AU to modify
+ * @param    uiCts            (IN)     New time counter value
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intSetH263TimeCounter( M4OSA_MemAddr8 pAuDataBuffer,
+                                                  M4OSA_UInt8 uiCts )
+{
+    /*
+    *  The H263 time counter is 8 bits located on the "x" below:
+    *
+    *   |--------|--------|--------|--------|
+    *    ???????? ???????? ??????xx xxxxxx??
+    */
+
+    /**
+    * Write the 2 bits on the third byte */
+    pAuDataBuffer[2] = ( pAuDataBuffer[2] & 0xFC) | (( uiCts >> 6) & 0x3);
+
+    /**
+    * Write the 6 bits on the fourth byte */
+    pAuDataBuffer[3] = ( ( uiCts << 2) & 0xFC) | (pAuDataBuffer[3] & 0x3);
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intSetMPEG4Gov()
+ * @brief    Modify the time info from Group Of VOP video AU
+ * @note
+ * @param    pAuDataBuffer    (IN)    MPEG4 Video AU to modify
+ * @param    uiCtsSec            (IN)     New GOV time info in second unit
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intSetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 uiCtsSec )
+{
+    /*
+    *  The MPEG-4 time code length is 18 bits:
+    *
+    *     hh     mm    marker    ss
+    *    xxxxx|xxx xxx     1    xxxx xx ??????
+    *   |----- ---|---     -    ----|-- ------|
+    */
+    M4OSA_UInt8 uiHh;
+    M4OSA_UInt8 uiMm;
+    M4OSA_UInt8 uiSs;
+    M4OSA_UInt8 uiTmp;
+
+    /**
+    * Write the 2 last bits ss */
+    uiSs = (M4OSA_UInt8)(uiCtsSec % 60); /**< modulo part */
+    pAuDataBuffer[2] = (( ( uiSs & 0x03) << 6) | (pAuDataBuffer[2] & 0x3F));
+
+    if( uiCtsSec < 60 )
+    {
+        /**
+        * Write the 3 last bits of mm, the marker bit (0x10 */
+        pAuDataBuffer[1] = (( 0x10) | (uiSs >> 2));
+
+        /**
+        * Write the 5 bits of hh and 3 of mm (out of 6) */
+        pAuDataBuffer[0] = 0;
+    }
+    else
+    {
+        /**
+        * Write the 3 last bits of mm, the marker bit (0x10 */
+        uiTmp = (M4OSA_UInt8)(uiCtsSec / 60); /**< integer part */
+        uiMm = (M4OSA_UInt8)(uiTmp % 60);
+        pAuDataBuffer[1] = (( uiMm << 5) | (0x10) | (uiSs >> 2));
+
+        if( uiTmp < 60 )
+        {
+            /**
+            * Write the 5 bits of hh and 3 of mm (out of 6) */
+            pAuDataBuffer[0] = ((uiMm >> 3));
+        }
+        else
+        {
+            /**
+            * Write the 5 bits of hh and 3 of mm (out of 6) */
+            uiHh = (M4OSA_UInt8)(uiTmp / 60);
+            pAuDataBuffer[0] = (( uiHh << 3) | (uiMm >> 3));
+        }
+    }
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_Void M4VSS3GPP_intGetMPEG4Gov()
+ * @brief    Get the time info from Group Of VOP video AU
+ * @note
+ * @param    pAuDataBuffer    (IN)    MPEG4 Video AU to modify
+ * @param    pCtsSec            (OUT)    Current GOV time info in second unit
+ * @return    nothing
+ ******************************************************************************
+ */
+static M4OSA_Void M4VSS3GPP_intGetMPEG4Gov( M4OSA_MemAddr8 pAuDataBuffer,
+                                           M4OSA_UInt32 *pCtsSec )
+{
+    /*
+    *  The MPEG-4 time code length is 18 bits:
+    *
+    *     hh     mm    marker    ss
+    *    xxxxx|xxx xxx     1    xxxx xx ??????
+    *   |----- ---|---     -    ----|-- ------|
+    */
+    M4OSA_UInt8 uiHh;
+    M4OSA_UInt8 uiMm;
+    M4OSA_UInt8 uiSs;
+    M4OSA_UInt8 uiTmp;
+    M4OSA_UInt32 uiCtsSec;
+
+    /**
+    * Read ss */
+    uiSs = (( pAuDataBuffer[2] & 0xC0) >> 6);
+    uiTmp = (( pAuDataBuffer[1] & 0x0F) << 2);
+    uiCtsSec = uiSs + uiTmp;
+
+    /**
+    * Read mm */
+    uiMm = (( pAuDataBuffer[1] & 0xE0) >> 5);
+    uiTmp = (( pAuDataBuffer[0] & 0x07) << 3);
+    uiMm = uiMm + uiTmp;
+    uiCtsSec = ( uiMm * 60) + uiCtsSec;
+
+    /**
+    * Read hh */
+    uiHh = (( pAuDataBuffer[0] & 0xF8) >> 3);
+
+    if( uiHh )
+    {
+        uiCtsSec = ( uiHh * 3600) + uiCtsSec;
+    }
+
+    /*
+    * in sec */
+    *pCtsSec = uiCtsSec;
+
+    return;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_intAllocateYUV420()
+ * @brief    Allocate the three YUV 4:2:0 planes
+ * @note
+ * @param    pPlanes    (IN/OUT) valid pointer to 3 M4VIFI_ImagePlane structures
+ * @param    uiWidth    (IN)     Image width
+ * @param    uiHeight(IN)     Image height
+ ******************************************************************************
+ */
+static M4OSA_ERR M4VSS3GPP_intAllocateYUV420( M4VIFI_ImagePlane *pPlanes,
+                                             M4OSA_UInt32 uiWidth, M4OSA_UInt32 uiHeight )
+{
+
+    pPlanes[0].u_width = uiWidth;
+    pPlanes[0].u_height = uiHeight;
+    pPlanes[0].u_stride = uiWidth;
+    pPlanes[0].u_topleft = 0;
+    pPlanes[0].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[0].u_stride
+        * pPlanes[0].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[0].pac_data");
+
+    if( M4OSA_NULL == pPlanes[0].pac_data )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[0].pac_data,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    pPlanes[1].u_width = pPlanes[0].u_width >> 1;
+    pPlanes[1].u_height = pPlanes[0].u_height >> 1;
+    pPlanes[1].u_stride = pPlanes[1].u_width;
+    pPlanes[1].u_topleft = 0;
+    pPlanes[1].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[1].u_stride
+        * pPlanes[1].u_height, M4VSS3GPP,(M4OSA_Char *) "pPlanes[1].pac_data");
+
+    if( M4OSA_NULL == pPlanes[1].pac_data )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[1].pac_data,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    pPlanes[2].u_width = pPlanes[1].u_width;
+    pPlanes[2].u_height = pPlanes[1].u_height;
+    pPlanes[2].u_stride = pPlanes[2].u_width;
+    pPlanes[2].u_topleft = 0;
+    pPlanes[2].pac_data = (M4VIFI_UInt8 *)M4OSA_malloc(pPlanes[2].u_stride
+        * pPlanes[2].u_height, M4VSS3GPP, (M4OSA_Char *)"pPlanes[2].pac_data");
+
+    if( M4OSA_NULL == pPlanes[2].pac_data )
+    {
+        M4OSA_TRACE1_0(
+            "M4VSS3GPP_intAllocateYUV420: unable to allocate pPlanes[2].pac_data,\
+            returning M4ERR_ALLOC");
+        return M4ERR_ALLOC;
+    }
+
+    /**
+    *    Return */
+    M4OSA_TRACE3_0("M4VSS3GPP_intAllocateYUV420: returning M4NO_ERROR");
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c b/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
new file mode 100755
index 0000000..f7226a3
--- /dev/null
+++ b/libvideoeditor/vss/src/M4VSS3GPP_MediaAndCodecSubscription.c
@@ -0,0 +1,457 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ************************************************************************
+ * @file    M4VSS3GPP_MediaAndCodecSubscription.c
+ * @brief    Media readers and codecs subscription
+ * @note    This file implements the subscription of supported media
+ *            readers and decoders for the VSS. Potential support can
+ *            be activated or de-activated
+ *            using compilation flags set in the projects settings.
+ *************************************************************************
+ */
+
+#include "NXPSW_CompilerSwitches.h"
+
+
+#include "M4OSA_Debug.h"
+#include "M4VSS3GPP_InternalTypes.h"                /**< Include for VSS specific types */
+#include "M4VSS3GPP_InternalFunctions.h"            /**< Registration module */
+
+/* _______________________ */
+/*|                       |*/
+/*|  reader subscription  |*/
+/*|_______________________|*/
+
+/* Reader registration : at least one reader must be defined */
+#ifndef M4VSS_SUPPORT_READER_3GP
+#ifndef M4VSS_SUPPORT_READER_AMR
+#ifndef M4VSS_SUPPORT_READER_MP3
+#ifndef M4VSS_SUPPORT_READER_PCM
+#ifndef M4VSS_SUPPORT_AUDEC_NULL
+#error "no reader registered"
+#endif /* M4VSS_SUPPORT_AUDEC_NULL */
+#endif /* M4VSS_SUPPORT_READER_PCM */
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+#endif /* M4VSS_SUPPORT_READER_AMR */
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+/* There must be at least one MPEG4 decoder */
+#if !defined(M4VSS_SUPPORT_VIDEC_3GP) && !defined(M4VSS_ENABLE_EXTERNAL_DECODERS)
+#error "Wait, what?"
+/* "Hey, this is the VSS3GPP speaking. Pray tell, how the heck do you expect me to be able to do
+any editing without a built-in video decoder, nor the possibility to receive an external one?!
+Seriously, I'd love to know." */
+#endif
+
+/* Include files for each reader to subscribe */
+#ifdef M4VSS_SUPPORT_READER_3GP
+#include "VideoEditor3gpReader.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_AMR
+#include "M4READER_Amr.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_MP3
+#include "VideoEditorMp3Reader.h"
+#endif
+#ifdef M4VSS_SUPPORT_READER_PCM
+#include "M4READER_Pcm.h"
+#endif
+
+
+/* ______________________________ */
+/*|                              |*/
+/*|  audio decoder subscription  |*/
+/*|______________________________|*/
+
+#include "VideoEditorAudioDecoder.h"
+#include "VideoEditorVideoDecoder.h"
+#ifdef M4VSS_SUPPORT_AUDEC_NULL
+#include "M4AD_Null.h"
+#endif
+
+/* _______________________ */
+/*|                       |*/
+/*|  writer subscription  |*/
+/*|_______________________|*/
+
+/* Writer registration : at least one writer must be defined */
+//#ifndef M4VSS_SUPPORT_WRITER_AMR
+#ifndef M4VSS_SUPPORT_WRITER_3GPP
+#error "no writer registered"
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+//#endif /* M4VSS_SUPPORT_WRITER_AMR */
+
+/* Include files for each writer to subscribe */
+//#ifdef M4VSS_SUPPORT_WRITER_AMR
+/*extern M4OSA_ERR M4WRITER_AMR_getInterfaces( M4WRITER_OutputFileType* Type,
+M4WRITER_GlobalInterface** SrcGlobalInterface,
+M4WRITER_DataInterface** SrcDataInterface);*/
+//#endif
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+extern M4OSA_ERR M4WRITER_3GP_getInterfaces( M4WRITER_OutputFileType* Type,
+                                            M4WRITER_GlobalInterface** SrcGlobalInterface,
+                                            M4WRITER_DataInterface** SrcDataInterface);
+#endif
+
+/* ______________________________ */
+/*|                              |*/
+/*|  video encoder subscription  |*/
+/*|______________________________|*/
+#include "VideoEditorAudioEncoder.h"
+#include "VideoEditorVideoEncoder.h"
+
+
+/* ______________________________ */
+/*|                              |*/
+/*|  audio encoder subscription  |*/
+/*|______________________________|*/
+
+
+#define M4ERR_CHECK_NULL_RETURN_VALUE(retval, pointer) if ((pointer) == M4OSA_NULL)\
+    return ((M4OSA_ERR)(retval));
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_SubscribeMediaAndCodec()
+ * @brief    This function registers the reader, decoders, writers and encoders
+ *          in the VSS.
+ * @note
+ * @param    pContext:    (IN) Execution context.
+ * @return    M4NO_ERROR: there is no error
+ * @return    M4ERR_PARAMETER    pContext is NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_subscribeMediaAndCodec(M4VSS3GPP_MediaAndCodecCtxt *pContext)
+{
+    M4OSA_ERR                   err = M4NO_ERROR;
+
+    M4READER_MediaType          readerMediaType;
+    M4READER_GlobalInterface*   pReaderGlobalInterface;
+    M4READER_DataInterface*     pReaderDataInterface;
+
+    M4WRITER_OutputFileType     writerMediaType;
+    M4WRITER_GlobalInterface*   pWriterGlobalInterface;
+    M4WRITER_DataInterface*     pWriterDataInterface;
+
+    M4AD_Type                   audioDecoderType;
+    M4ENCODER_AudioFormat       audioCodecType;
+    M4ENCODER_AudioGlobalInterface* pAudioCodecInterface;
+    M4AD_Interface*             pAudioDecoderInterface;
+
+    M4DECODER_VideoType         videoDecoderType;
+    M4ENCODER_Format            videoCodecType;
+    M4ENCODER_GlobalInterface*  pVideoCodecInterface;
+    M4DECODER_VideoInterface*   pVideoDecoderInterface;
+
+    M4ERR_CHECK_NULL_RETURN_VALUE(M4ERR_PARAMETER, pContext);
+
+    /* _______________________ */
+    /*|                       |*/
+    /*|  reader subscription  |*/
+    /*|_______________________|*/
+
+    /* --- 3GP --- */
+
+#ifdef M4VSS_SUPPORT_READER_3GP
+    err = VideoEditor3gpReader_getInterface( &readerMediaType, &pReaderGlobalInterface,
+         &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_3GP interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GP reader");
+#endif /* M4VSS_SUPPORT_READER_3GP */
+
+    /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_READER_AMR
+    err = M4READER_AMR_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
+        &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_AMR interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register AMR reader");
+#endif /* M4VSS_SUPPORT_READER_AMR */
+
+    /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_READER_MP3
+    err = VideoEditorMp3Reader_getInterface( &readerMediaType, &pReaderGlobalInterface,
+         &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_MP3 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 reader");
+#endif /* M4VSS_SUPPORT_READER_MP3 */
+
+    /* --- PCM --- */
+
+#ifdef M4VSS_SUPPORT_READER_PCM
+    err = M4READER_PCM_getInterfaces( &readerMediaType, &pReaderGlobalInterface,
+        &pReaderDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4READER_PCM interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerReader( pContext, readerMediaType, pReaderGlobalInterface,
+        pReaderDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register PCM reader");
+#endif /* M4VSS_SUPPORT_READER_PCM */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  video decoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- MPEG4 & H263 --- */
+
+#ifdef M4VSS_SUPPORT_VIDEC_3GP
+    err = VideoEditorVideoDecoder_getInterface_MPEG4(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4DECODER_MPEG4 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register MPEG4 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+#ifdef M4VSS_SUPPORT_VIDEO_AVC
+    err = VideoEditorVideoDecoder_getInterface_H264(&videoDecoderType, (M4OSA_Void *)&pVideoDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4DECODER_H264 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoDecoder( pContext, videoDecoderType, pVideoDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register H264 decoder");
+#endif /* M4VSS_SUPPORT_VIDEC_3GP */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  audio decoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- AMRNB --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AMRNB
+    err = VideoEditorAudioDecoder_getInterface_AMRNB(&audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4 AMRNB interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register AMRNB decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AMRNB */
+
+    /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_AAC
+    err = VideoEditorAudioDecoder_getInterface_AAC(&audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4 AAC interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register AAC decoder");
+#endif /* M4VSS_SUPPORT_AUDEC_AAC */
+
+    /* --- MP3 --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_MP3
+    err = VideoEditorAudioDecoder_getInterface_MP3(&audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4 MP3 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register MP3 decoder");
+#endif  /* M4VSS_SUPPORT_AUDEC_MP3 */`
+
+
+    /* --- NULL --- */
+
+#ifdef M4VSS_SUPPORT_AUDEC_NULL
+    err = M4AD_NULL_getInterface( &audioDecoderType, &pAudioDecoderInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AD NULL Decoder interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioDecoder( pContext, audioDecoderType, pAudioDecoderInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register EVRC decoder");
+#endif  /* M4VSS_SUPPORT_AUDEC_NULL */
+
+    /* _______________________ */
+    /*|                       |*/
+    /*|  writer subscription  |*/
+    /*|_______________________|*/
+
+
+    /* --- 3GPP --- */
+
+#ifdef M4VSS_SUPPORT_WRITER_3GPP
+    /* retrieves the 3GPP writer media type and pointer to functions*/
+    err = M4WRITER_3GP_getInterfaces( &writerMediaType, &pWriterGlobalInterface,
+        &pWriterDataInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4WRITER_3GP interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerWriter( pContext, writerMediaType, pWriterGlobalInterface,
+        pWriterDataInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register 3GPP writer");
+#endif /* M4VSS_SUPPORT_WRITER_3GPP */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  video encoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- MPEG4 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+    /* retrieves the MPEG4 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_MPEG4(&videoCodecType, &pVideoCodecInterface,
+         M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4MP4E_MPEG4 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register video MPEG4 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+    /* --- H263 --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_MPEG4
+    /* retrieves the H263 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_H263(&videoCodecType, &pVideoCodecInterface,
+         M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4MP4E_H263 interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register video H263 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_MPEG4 */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AVC
+    /* retrieves the H264 encoder type and pointer to functions*/
+    err = VideoEditorVideoEncoder_getInterface_H264(&videoCodecType, &pVideoCodecInterface,
+         M4ENCODER_OPEN_ADVANCED);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4VSS3GPP_subscribeMediaAndCodec: M4H264E interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerVideoEncoder( pContext, videoCodecType, pVideoCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register video H264 encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AVC */
+
+    /* ______________________________ */
+    /*|                              |*/
+    /*|  audio encoder subscription  |*/
+    /*|______________________________|*/
+
+    /* --- AMR --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AMR
+    /* retrieves the AMR encoder type and pointer to functions*/
+    err = VideoEditorAudioEncoder_getInterface_AMRNB(&audioCodecType, &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AMR interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AMR encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AMR */
+
+    /* --- AAC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_AAC
+    /* retrieves the AAC encoder type and pointer to functions*/
+    err = VideoEditorAudioEncoder_getInterface_AAC(&audioCodecType, &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4AAC interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio AAC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_AAC */
+
+    /* --- EVRC --- */
+
+#ifdef M4VSS_SUPPORT_ENCODER_EVRC
+    /* retrieves the EVRC encoder type and pointer to functions*/
+    err = M4EVRC_getInterfaces( &audioCodecType, &pAudioCodecInterface);
+    if (M4NO_ERROR != err)
+    {
+        M4OSA_TRACE1_0("M4EVRC interface allocation error");
+        return err;
+    }
+    err = M4VSS3GPP_registerAudioEncoder( pContext, audioCodecType, pAudioCodecInterface);
+    M4OSA_DEBUG_IF1((err != M4NO_ERROR), err,
+        "M4VSS3GPP_subscribeMediaAndCodec: can't register audio EVRC encoder");
+#endif /* M4VSS_SUPPORT_ENCODER_EVRC */
+
+#ifdef M4VSS_SUPPORT_OMX_CODECS
+    pContext->bAllowFreeingOMXCodecInterface = M4OSA_TRUE;   /* when NXP SW codecs are registered,
+                                                               then allow unregistration*/
+#endif
+
+
+    return err;
+}
+
diff --git a/libvideoeditor/vss/src/M4xVSS_API.c b/libvideoeditor/vss/src/M4xVSS_API.c
new file mode 100755
index 0000000..33c28b0
--- /dev/null
+++ b/libvideoeditor/vss/src/M4xVSS_API.c
@@ -0,0 +1,7004 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4xVSS_API.c
+ * @brief    API of eXtended Video Studio Service (Video Studio 2.1)
+ * @note
+ ******************************************************************************
+ */
+
+/**
+ * OSAL main types and errors ***/
+#include "M4OSA_Types.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_FileReader.h"
+#include "M4OSA_FileWriter.h"
+#include "M4OSA_FileExtra.h"
+#include "M4OSA_CoreID.h"
+#include "M4OSA_CharStar.h"
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+
+
+/**
+ * VSS 3GPP API definition */
+#include "M4VSS3GPP_ErrorCodes.h"
+
+/*************************
+Begin of xVSS API
+ **************************/
+
+#include "M4xVSS_API.h"
+#include "M4xVSS_Internal.h"
+
+/* RC: to delete unecessary temp files on the fly */
+#include "M4VSS3GPP_InternalTypes.h"
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_Init(M4OSA_Context* pContext, M4xVSS_InitParams* pParams)
+ * @brief        This function initializes the xVSS
+ * @note        Initializes the xVSS edit operation (allocates an execution context).
+ *
+ * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
+ * @param    params                (IN) Parameters mandatory for xVSS
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_Init( M4OSA_Context *pContext, M4xVSS_InitParams *pParams )
+{
+    M4xVSS_Context *xVSS_context;
+    M4OSA_UInt32 length = 0, i;
+
+    if( pParams == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Parameter structure for M4xVSS_Init function is NULL");
+        return M4ERR_PARAMETER;
+    }
+
+    if( pParams->pFileReadPtr == M4OSA_NULL
+        || pParams->pFileWritePtr == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0(
+            "pFileReadPtr or pFileWritePtr in M4xVSS_InitParams structure is NULL");
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context = (M4xVSS_Context *)M4OSA_malloc(sizeof(M4xVSS_Context), M4VS,
+        (M4OSA_Char *)"Context of the xVSS layer");
+
+    if( xVSS_context == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+        return M4ERR_ALLOC;
+    }
+
+    /* Initialize file read/write functions pointers */
+    xVSS_context->pFileReadPtr = pParams->pFileReadPtr;
+    xVSS_context->pFileWritePtr = pParams->pFileWritePtr;
+
+    /*UTF Conversion support: copy conversion functions pointers and allocate the temporary
+     buffer*/
+    if( pParams->pConvFromUTF8Fct != M4OSA_NULL )
+    {
+        if( pParams->pConvToUTF8Fct != M4OSA_NULL )
+        {
+            xVSS_context->UTFConversionContext.pConvFromUTF8Fct =
+                pParams->pConvFromUTF8Fct;
+            xVSS_context->UTFConversionContext.pConvToUTF8Fct =
+                pParams->pConvToUTF8Fct;
+            xVSS_context->UTFConversionContext.m_TempOutConversionSize =
+                UTF_CONVERSION_BUFFER_SIZE;
+            xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+                (M4OSA_Void *)M4OSA_malloc(UTF_CONVERSION_BUFFER_SIZE
+                * sizeof(M4OSA_UInt8),
+                M4VA, (M4OSA_Char *)"M4xVSS_Init: UTF conversion buffer");
+
+            if( M4OSA_NULL
+                == xVSS_context->UTFConversionContext.pTempOutConversionBuffer )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+                xVSS_context->pTempPath = M4OSA_NULL;
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+                xVSS_context = M4OSA_NULL;
+                return M4ERR_ALLOC;
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0("M4xVSS_Init: one UTF conversion pointer is null and the other\
+                           is not null");
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+            xVSS_context->pTempPath = M4OSA_NULL;
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+            xVSS_context = M4OSA_NULL;
+            return M4ERR_PARAMETER;
+        }
+    }
+    else
+    {
+        xVSS_context->UTFConversionContext.pConvFromUTF8Fct = M4OSA_NULL;
+        xVSS_context->UTFConversionContext.pConvToUTF8Fct = M4OSA_NULL;
+        xVSS_context->UTFConversionContext.m_TempOutConversionSize = 0;
+        xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+            M4OSA_NULL;
+    }
+
+    if( pParams->pTempPath != M4OSA_NULL )
+    {
+        /*No need to convert into UTF8 as all input of xVSS are in UTF8
+        (the conversion customer format into UTF8
+        is done in VA/VAL)*/
+        xVSS_context->pTempPath =
+            (M4OSA_Void *)M4OSA_malloc(M4OSA_chrLength(pParams->pTempPath) + 1,
+            M4VS, (M4OSA_Char *)"xVSS Path for temporary files");
+
+        if( xVSS_context->pTempPath == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pTempPath, pParams->pTempPath,
+            M4OSA_chrLength(pParams->pTempPath) + 1);
+        /* TODO: Check that no previous xVSS temporary files are present ? */
+    }
+    else
+    {
+        M4OSA_TRACE1_0("Path for temporary files is NULL");
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+        xVSS_context = M4OSA_NULL;
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context->pSettings =
+        (M4VSS3GPP_EditSettings *)M4OSA_malloc(sizeof(M4VSS3GPP_EditSettings),
+        M4VS, (M4OSA_Char *)"Copy of VSS structure");
+
+    if( xVSS_context->pSettings == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+        xVSS_context->pTempPath = M4OSA_NULL;
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+        xVSS_context = M4OSA_NULL;
+        return M4ERR_ALLOC;
+    }
+
+    /* Initialize pointers in pSettings */
+    xVSS_context->pSettings->pClipList = M4OSA_NULL;
+    xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+    xVSS_context->pSettings->Effects = M4OSA_NULL; /* RC */
+    xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+
+    /* This is used to know if the user has added or removed some medias */
+    xVSS_context->previousClipNumber = 0;
+
+    /* "State machine" */
+    xVSS_context->editingStep = 0;
+    xVSS_context->analyseStep = 0;
+
+    xVSS_context->pcmPreviewFile = M4OSA_NULL;
+
+    /* Initialize Pto3GPP and MCS lists */
+    xVSS_context->pMCSparamsList = M4OSA_NULL;
+    xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+    xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
+    xVSS_context->pMCScurrentParams = M4OSA_NULL;
+
+    xVSS_context->tempFileIndex = 0;
+
+    xVSS_context->targetedBitrate = 0;
+
+    xVSS_context->targetedTimescale = 0;
+
+    xVSS_context->pAudioMixContext = M4OSA_NULL;
+    xVSS_context->pAudioMixSettings = M4OSA_NULL;
+
+    /*FB: initialize to avoid crash when error during the editing*/
+    xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    for ( i = 0; i < M4VD_kVideoType_NB; i++ )
+    {
+        xVSS_context->registeredExternalDecs[i].pDecoderInterface = M4OSA_NULL;
+        xVSS_context->registeredExternalDecs[i].pUserData = M4OSA_NULL;
+        xVSS_context->registeredExternalDecs[i].registered = M4OSA_FALSE;
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    for ( i = 0; i < M4VE_kEncoderType_NB; i++ )
+    {
+        xVSS_context->registeredExternalEncs[i].pEncoderInterface = M4OSA_NULL;
+        xVSS_context->registeredExternalEncs[i].pUserData = M4OSA_NULL;
+        xVSS_context->registeredExternalEncs[i].registered = M4OSA_FALSE;
+    }
+
+    /* Initialize state if all initializations are corrects */
+    xVSS_context->m_state = M4xVSS_kStateInitialized;
+
+    /* initialize MCS context*/
+    xVSS_context->pMCS_Ctxt = M4OSA_NULL;
+
+    *pContext = xVSS_context;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_ReduceTranscode
+ * @brief        This function changes the given editing structure in order to
+ *                minimize the transcoding time.
+ * @note        The xVSS analyses this structure, and if needed, changes the
+ *                output parameters (Video codec, video size, audio codec,
+ *                audio nb of channels) to minimize the transcoding time.
+ *
+ * @param    pContext            (OUT) Pointer on the xVSS edit context to allocate
+ * @param    pSettings            (IN) Edition settings (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_ReduceTranscode( M4OSA_Context pContext,
+                                 M4VSS3GPP_EditSettings *pSettings )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VIDEOEDITING_ClipProperties fileProperties;
+    M4OSA_UInt8 i, j;
+    M4OSA_Bool bAudioTransition = M4OSA_FALSE;
+    M4OSA_Bool bIsBGMReplace = M4OSA_FALSE;
+    M4OSA_Bool bFound;
+    M4OSA_UInt32 videoConfig[9] =
+    {
+        0, 0, 0, 0, 0, 0, 0, 0, 0
+    };
+    /** Index <-> Video config **/
+    /* 0:        H263  SQCIF        */
+    /* 1:        H263  QCIF        */
+    /* 2:        H263  CIF        */
+    /* 3:        MPEG4 SQCIF        */
+    /* 4:        MPEG4 QQVGA        */
+    /* 5:        MPEG4 QCIF        */
+    /* 6:        MPEG4 QVGA        */
+    /* 7:        MPEG4 CIF        */
+    /* 8:        MPEG4 VGA        */
+    /****************************/
+    M4OSA_UInt32 audioConfig[3] =
+    {
+        0, 0, 0
+    };
+    /** Index <-> Audio config **/
+    /* 0:    AMR                    */
+    /* 1:    AAC    16kHz mono        */
+    /* 2:    AAC 16kHz stereo    */
+    /****************************/
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateInitialized \
+        && xVSS_context->m_state != M4xVSS_kStateOpened )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_ReduceTranscode function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* Check number of clips */
+    if( pSettings->uiClipNumber == 0 )
+    {
+        M4OSA_TRACE1_0("The number of input clip must be greater than 0 !");
+        return M4ERR_PARAMETER;
+    }
+
+    /* Check if there is a background music, and if its audio will replace input clip audio */
+    if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+    {
+        if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100 )
+        {
+            bIsBGMReplace = M4OSA_TRUE;
+        }
+    }
+
+    /* Parse all clips, and give occurences of each combination */
+    for ( i = 0; i < pSettings->uiClipNumber; i++ )
+    {
+        /* We ignore JPG input files as they are always transcoded */
+        if( pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP )
+        {
+            /**
+            * UTF conversion: convert into the customer format*/
+            M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+            M4OSA_UInt32 ConvertedSize = 0;
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)pSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &ConvertedSize);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1("M4xVSS_ReduceTranscode:\
+                                   M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+            /**
+            * End of the utf conversion, now use the converted path*/
+            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+                &fileProperties);
+
+            //err = M4xVSS_internalGetProperties(xVSS_context, pSettings->pClipList[i]->pFile,
+            //     &fileProperties);
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
+                    err);
+                /* TODO: Translate error code of MCS to an xVSS error code ? */
+                return err;
+            }
+
+            /* Check best video settings */
+            if( fileProperties.uiVideoWidth == 128
+                && fileProperties.uiVideoHeight == 96 )
+            {
+                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+                {
+                    videoConfig[0] += fileProperties.uiClipVideoDuration;
+                }
+                else if( ( fileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[3] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 160
+                && fileProperties.uiVideoHeight == 120 )
+            {
+                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[4] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 176
+                && fileProperties.uiVideoHeight == 144 )
+            {
+                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+                {
+                    videoConfig[1] += fileProperties.uiClipVideoDuration;
+                }
+                else if( ( fileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[5] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 320
+                && fileProperties.uiVideoHeight == 240 )
+            {
+                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[6] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 352
+                && fileProperties.uiVideoHeight == 288 )
+            {
+                if( fileProperties.VideoStreamType == M4VIDEOEDITING_kH263 )
+                {
+                    videoConfig[2] += fileProperties.uiClipVideoDuration;
+                }
+                else if( ( fileProperties.VideoStreamType
+                    == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[7] += fileProperties.uiClipVideoDuration;
+                }
+            }
+            else if( fileProperties.uiVideoWidth == 640
+                && fileProperties.uiVideoHeight == 480 )
+            {
+                if( ( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4) \
+                    || (fileProperties.VideoStreamType == M4VIDEOEDITING_kH264) )
+                {
+                    videoConfig[8] += fileProperties.uiClipVideoDuration;
+                }
+            }
+
+            /* If there is a BGM that replaces existing audio track, we do not care about
+            audio track as it will be replaced */
+            /* If not, we try to minimize audio reencoding */
+            if( bIsBGMReplace == M4OSA_FALSE )
+            {
+                if( fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC )
+                {
+                    if( fileProperties.uiSamplingFrequency == 16000 && \
+                        fileProperties.uiNbChannels == 1 )
+                    {
+                        audioConfig[1] += fileProperties.uiClipAudioDuration;
+                    }
+                    else if( fileProperties.uiSamplingFrequency == 16000 && \
+                        fileProperties.uiNbChannels == 2 )
+                    {
+                        audioConfig[2] += fileProperties.uiClipAudioDuration;
+                    }
+                }
+                else if( fileProperties.AudioStreamType
+                    == M4VIDEOEDITING_kAMR_NB )
+                {
+                    audioConfig[0] += fileProperties.uiClipAudioDuration;
+                }
+            }
+        }
+    }
+
+    /* Find best output video format (the most occuring combination) */
+    j = 0;
+    bFound = M4OSA_FALSE;
+
+    for ( i = 0; i < 9; i++ )
+    {
+        if( videoConfig[i] >= videoConfig[j] )
+        {
+            j = i;
+            bFound = M4OSA_TRUE;
+        }
+    }
+
+    if( bFound )
+    {
+        switch( j )
+        {
+            case 0:
+                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
+                break;
+
+            case 1:
+                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
+                break;
+
+            case 2:
+                pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
+                break;
+
+            case 3:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kSQCIF;
+                break;
+
+            case 4:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQQVGA;
+                break;
+
+            case 5:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQCIF;
+                break;
+
+            case 6:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kQVGA;
+                break;
+
+            case 7:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kCIF;
+                break;
+
+            case 8:
+                pSettings->xVSS.outputVideoFormat =
+                    (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+                    ? M4VIDEOEDITING_kMPEG4 : M4VIDEOEDITING_kH264;
+                pSettings->xVSS.outputVideoSize = M4VIDEOEDITING_kVGA;
+                break;
+        }
+    }
+
+    /* Find best output audio format (the most occuring combination) */
+    j = 0;
+    bFound = M4OSA_FALSE;
+
+    for ( i = 0; i < 3; i++ )
+    {
+        if( audioConfig[i] >= audioConfig[j] )
+        {
+            j = i;
+            bFound = M4OSA_TRUE;
+        }
+    }
+
+    if( bFound )
+    {
+        switch( j )
+        {
+            case 0:
+                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+                pSettings->xVSS.bAudioMono = M4OSA_TRUE;
+                break;
+
+            case 1:
+                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
+                pSettings->xVSS.bAudioMono = M4OSA_TRUE;
+                break;
+
+            case 2:
+                pSettings->xVSS.outputAudioFormat = M4VIDEOEDITING_kAAC;
+                pSettings->xVSS.bAudioMono = M4OSA_FALSE;
+                break;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_SendCommand(M4OSA_Context pContext,
+ *                                         M4VSS3GPP_EditSettings* pSettings)
+ * @brief        This function gives to the xVSS an editing structure
+ * @note        The xVSS analyses this structure, and prepare edition
+ *                This function must be called after M4xVSS_Init, after
+ *                M4xVSS_CloseCommand, or after M4xVSS_PreviewStop.
+ *                After this function, the user must call M4xVSS_Step until
+ *                it returns another error than M4NO_ERROR.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    pSettings            (IN) Edition settings (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SendCommand( M4OSA_Context pContext,
+                             M4VSS3GPP_EditSettings *pSettings )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_UInt8 i, j;
+    M4OSA_UInt8 nbOfSameClip = 0;
+    M4OSA_ERR err;
+    M4OSA_Bool isNewBGM = M4OSA_TRUE;
+    M4xVSS_Pto3GPP_params *pPto3GPP_last = M4OSA_NULL;
+    M4xVSS_MCS_params *pMCS_last = M4OSA_NULL;
+    M4OSA_UInt32 width, height, samplingFreq;
+    M4OSA_Bool bIsTranscoding = M4OSA_FALSE;
+    M4OSA_Int32 totalDuration;
+    M4OSA_UInt32 outputSamplingFrequency = 0;
+    M4OSA_UInt32 length = 0;
+    M4OSA_Int8 masterClip = -1;
+
+    i = 0;
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateInitialized \
+        && xVSS_context->m_state != M4xVSS_kStateOpened )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_SendCommand function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* State is back to initialized to allow call of cleanup function in case of error */
+    xVSS_context->m_state = M4xVSS_kStateInitialized;
+
+    /* Check if a previous sendCommand has been called */
+    if( xVSS_context->previousClipNumber != 0 )
+    {
+        M4OSA_UInt32 pCmpResult = 0;
+
+        /* Compare BGM input */
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL \
+            && pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            M4OSA_chrCompare(xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+                pSettings->xVSS.pBGMtrack->pFile, (M4OSA_Int32 *) &pCmpResult);
+
+            if( pCmpResult == 0 )
+            {
+                /* Check if audio output parameters have changed */
+                if( xVSS_context->pSettings->xVSS.outputAudioFormat ==
+                    pSettings->xVSS.outputAudioFormat
+                    && xVSS_context->pSettings->xVSS.bAudioMono
+                    == pSettings->xVSS.bAudioMono )
+                {
+                    /* It means that BGM is the same as before, so, no need to redecode it */
+                    M4OSA_TRACE2_0(
+                        "BGM is the same as before, nothing to decode");
+                    isNewBGM = M4OSA_FALSE;
+                }
+                else
+                {
+                    /* We need to unallocate PCM preview file path in internal context */
+                    if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(
+                            (M4OSA_MemAddr32)xVSS_context->pcmPreviewFile);
+                        xVSS_context->pcmPreviewFile = M4OSA_NULL;
+                    }
+                }
+            }
+            else
+            {
+                /* We need to unallocate PCM preview file path in internal context */
+                if( xVSS_context->pcmPreviewFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pcmPreviewFile);
+                    xVSS_context->pcmPreviewFile = M4OSA_NULL;
+                }
+            }
+        }
+
+        /* Check if output settings have changed */
+        if( xVSS_context->pSettings->xVSS.outputVideoSize
+            != pSettings->xVSS.outputVideoSize
+            || xVSS_context->pSettings->xVSS.outputVideoFormat
+            != pSettings->xVSS.outputVideoFormat
+            || xVSS_context->pSettings->xVSS.outputAudioFormat
+            != pSettings->xVSS.outputAudioFormat
+            || xVSS_context->pSettings->xVSS.bAudioMono
+            != pSettings->xVSS.bAudioMono
+            || xVSS_context->pSettings->xVSS.outputAudioSamplFreq
+            != pSettings->xVSS.outputAudioSamplFreq )
+        {
+            /* If it is the case, we can't reuse already transcoded/converted files */
+            /* so, we delete these files and remove them from chained list */
+            if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+            {
+                M4xVSS_Pto3GPP_params *pParams =
+                    xVSS_context->pPTo3GPPparamsList;
+                M4xVSS_Pto3GPP_params *pParams_sauv;
+
+                while( pParams != M4OSA_NULL )
+                {
+                    if( pParams->pFileIn != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                        pParams->pFileIn = M4OSA_NULL;
+                    }
+
+                    if( pParams->pFileOut != M4OSA_NULL )
+                    {
+                        /* Delete temporary file */
+                        M4OSA_fileExtraDelete(pParams->pFileOut);
+                        M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                        pParams->pFileOut = M4OSA_NULL;
+                    }
+
+                    if( pParams->pFileTemp != M4OSA_NULL )
+                    {
+                        /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+                        M4OSA_fileExtraDelete(pParams->pFileTemp);
+                        M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                        pParams->pFileTemp = M4OSA_NULL;
+                    }
+                    pParams_sauv = pParams;
+                    pParams = pParams->pNext;
+                    M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+                    pParams_sauv = M4OSA_NULL;
+                }
+                xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+            }
+
+            if( xVSS_context->pMCSparamsList != M4OSA_NULL )
+            {
+                M4xVSS_MCS_params *pParams = xVSS_context->pMCSparamsList;
+                M4xVSS_MCS_params *pParams_sauv;
+                M4xVSS_MCS_params *pParams_bgm = M4OSA_NULL;
+
+                while( pParams != M4OSA_NULL )
+                {
+                    /* Here, we do not delete BGM */
+                    if( pParams->isBGM != M4OSA_TRUE )
+                    {
+                        if( pParams->pFileIn != M4OSA_NULL )
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                            pParams->pFileIn = M4OSA_NULL;
+                        }
+
+                        if( pParams->pFileOut != M4OSA_NULL )
+                        {
+                            /* Delete temporary file */
+                            M4OSA_fileExtraDelete(pParams->pFileOut);
+                            M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                            pParams->pFileOut = M4OSA_NULL;
+                        }
+
+                        if( pParams->pFileTemp != M4OSA_NULL )
+                        {
+                            /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+                            M4OSA_fileExtraDelete(pParams->pFileTemp);
+                            M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                            pParams->pFileTemp = M4OSA_NULL;
+                        }
+                        pParams_sauv = pParams;
+                        pParams = pParams->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+                        pParams_sauv = M4OSA_NULL;
+                    }
+                    else
+                    {
+                        pParams_bgm = pParams;
+                        pParams = pParams->pNext;
+                        /*PR P4ME00003182 initialize this pointer because the following params
+                        element will be deallocated*/
+                        if( pParams != M4OSA_NULL
+                            && pParams->isBGM != M4OSA_TRUE )
+                        {
+                            pParams_bgm->pNext = M4OSA_NULL;
+                        }
+                    }
+                }
+                xVSS_context->pMCSparamsList = pParams_bgm;
+            }
+            /* Maybe need to implement framerate changing */
+            //xVSS_context->pSettings->videoFrameRate;
+        }
+
+        /* Unallocate previous xVSS_context->pSettings structure */
+        M4xVSS_freeSettings(xVSS_context->pSettings);
+
+        /*Unallocate output file path*/
+        if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+        }
+        xVSS_context->pSettings->uiOutputPathSize = 0;
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+
+    /**********************************
+    Clips registering
+    **********************************/
+
+    /* Copy settings from user given structure to our "local" structure */
+    xVSS_context->pSettings->xVSS.outputVideoFormat =
+        pSettings->xVSS.outputVideoFormat;
+    xVSS_context->pSettings->xVSS.outputVideoSize =
+        pSettings->xVSS.outputVideoSize;
+    xVSS_context->pSettings->xVSS.outputAudioFormat =
+        pSettings->xVSS.outputAudioFormat;
+    xVSS_context->pSettings->xVSS.bAudioMono = pSettings->xVSS.bAudioMono;
+    xVSS_context->pSettings->xVSS.outputAudioSamplFreq =
+        pSettings->xVSS.outputAudioSamplFreq;
+    /*xVSS_context->pSettings->pOutputFile = pSettings->pOutputFile;*/
+    /*FB: VAL CR P4ME00003076
+    The output video and audio bitrate are given by the user in the edition settings structure*/
+    xVSS_context->pSettings->xVSS.outputVideoBitrate =
+        pSettings->xVSS.outputVideoBitrate;
+    xVSS_context->pSettings->xVSS.outputAudioBitrate =
+        pSettings->xVSS.outputAudioBitrate;
+    xVSS_context->pSettings->PTVolLevel = pSettings->PTVolLevel;
+
+    /*FB: bug fix if the output path is given in M4xVSS_sendCommand*/
+
+    if( pSettings->pOutputFile != M4OSA_NULL
+        && pSettings->uiOutputPathSize > 0 )
+    {
+        M4OSA_Void *pDecodedPath = pSettings->pOutputFile;
+        /*As all inputs of the xVSS are in UTF8, convert the output file path into the
+        customer format*/
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)pSettings->pOutputFile,
+                (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("M4xVSS_SendCommand:\
+                               M4xVSS_internalConvertFromUTF8 returns err: 0x%x", err);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            pSettings->uiOutputPathSize = length;
+        }
+
+        xVSS_context->pSettings->pOutputFile = (M4OSA_Void *)M4OSA_malloc \
+            (pSettings->uiOutputPathSize + 1, M4VS,
+            (M4OSA_Char *)"output file path");
+
+        if( xVSS_context->pSettings->pOutputFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->pOutputFile,
+            (M4OSA_MemAddr8)pDecodedPath, pSettings->uiOutputPathSize + 1);
+        xVSS_context->pSettings->uiOutputPathSize = pSettings->uiOutputPathSize;
+        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+    }
+    else
+    {
+        xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+        xVSS_context->pSettings->uiOutputPathSize = 0;
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+    xVSS_context->pSettings->pTemporaryFile = pSettings->pTemporaryFile;
+    xVSS_context->pSettings->uiClipNumber = pSettings->uiClipNumber;
+    xVSS_context->pSettings->videoFrameRate = pSettings->videoFrameRate;
+    xVSS_context->pSettings->uiMasterClip =
+        0; /* With VSS 2.0, this new param is mandatory */
+    xVSS_context->pSettings->xVSS.pTextRenderingFct =
+        pSettings->xVSS.pTextRenderingFct; /* CR text handling */
+    xVSS_context->pSettings->xVSS.outputFileSize =
+        pSettings->xVSS.outputFileSize;
+
+    if( pSettings->xVSS.outputFileSize != 0 \
+        && pSettings->xVSS.outputAudioFormat != M4VIDEOEDITING_kAMR_NB )
+    {
+        M4OSA_TRACE1_0("M4xVSS_SendCommand: Impossible to limit file\
+                       size with other audio output than AAC");
+        return M4ERR_PARAMETER;
+    }
+    xVSS_context->nbStepTotal = 0;
+    xVSS_context->currentStep = 0;
+
+    if( xVSS_context->pSettings->xVSS.outputVideoFormat != M4VIDEOEDITING_kMPEG4
+        && xVSS_context->pSettings->xVSS.outputVideoFormat
+        != M4VIDEOEDITING_kH263
+        && xVSS_context->pSettings->xVSS.outputVideoFormat
+        != M4VIDEOEDITING_kMPEG4_EMP
+        && xVSS_context->pSettings->xVSS.outputVideoFormat
+        != M4VIDEOEDITING_kH264 )
+    {
+        xVSS_context->pSettings->xVSS.outputVideoFormat =
+            M4VIDEOEDITING_kNoneVideo;
+    }
+
+    /* Get output width/height */
+    switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+    {
+        case M4VIDEOEDITING_kSQCIF:
+            width = 128;
+            height = 96;
+            break;
+
+        case M4VIDEOEDITING_kQQVGA:
+            width = 160;
+            height = 120;
+            break;
+
+        case M4VIDEOEDITING_kQCIF:
+            width = 176;
+            height = 144;
+            break;
+
+        case M4VIDEOEDITING_kQVGA:
+            width = 320;
+            height = 240;
+            break;
+
+        case M4VIDEOEDITING_kCIF:
+            width = 352;
+            height = 288;
+            break;
+
+        case M4VIDEOEDITING_kVGA:
+            width = 640;
+            height = 480;
+            break;
+            /* +PR LV5807 */
+        case M4VIDEOEDITING_kWVGA:
+            width = 800;
+            height = 480;
+            break;
+
+        case M4VIDEOEDITING_kNTSC:
+            width = 720;
+            height = 480;
+            break;
+            /* -PR LV5807 */
+            /* +CR Google */
+        case M4VIDEOEDITING_k640_360:
+            width = 640;
+            height = 360;
+            break;
+
+        case M4VIDEOEDITING_k854_480:
+
+            // StageFright encoders require %16 resolution
+
+            width = M4ENCODER_854_480_Width;
+
+            height = 480;
+            break;
+
+        case M4VIDEOEDITING_kHD1280:
+            width = 1280;
+            height = 720;
+            break;
+
+        case M4VIDEOEDITING_kHD1080:
+            // StageFright encoders require %16 resolution
+
+            width = M4ENCODER_HD1080_Width;
+
+            height = 720;
+            break;
+
+        case M4VIDEOEDITING_kHD960:
+            width = 960;
+            height = 720;
+            break;
+
+            /* -CR Google */
+        default: /* If output video size is not given, we take QCIF size */
+            width = 176;
+            height = 144;
+            xVSS_context->pSettings->xVSS.outputVideoSize =
+                M4VIDEOEDITING_kQCIF;
+            break;
+    }
+
+    /* Get output Sampling frequency */
+    switch( xVSS_context->pSettings->xVSS.outputAudioSamplFreq )
+    {
+        case M4VIDEOEDITING_k8000_ASF:
+            samplingFreq = 8000;
+            break;
+
+        case M4VIDEOEDITING_k16000_ASF:
+            samplingFreq = 16000;
+            break;
+
+        case M4VIDEOEDITING_k22050_ASF:
+            samplingFreq = 22050;
+            break;
+
+        case M4VIDEOEDITING_k24000_ASF:
+            samplingFreq = 24000;
+            break;
+
+        case M4VIDEOEDITING_k32000_ASF:
+            samplingFreq = 32000;
+            break;
+
+        case M4VIDEOEDITING_k44100_ASF:
+            samplingFreq = 44100;
+            break;
+
+        case M4VIDEOEDITING_k48000_ASF:
+            samplingFreq = 48000;
+            break;
+
+        case M4VIDEOEDITING_kDefault_ASF:
+        default:
+            if( xVSS_context->pSettings->xVSS.outputAudioFormat
+                == M4VIDEOEDITING_kAMR_NB )
+            {
+                samplingFreq = 8000;
+            }
+            else if( xVSS_context->pSettings->xVSS.outputAudioFormat
+                == M4VIDEOEDITING_kAAC )
+            {
+                samplingFreq = 16000;
+            }
+            else
+            {
+                samplingFreq = 0;
+            }
+            break;
+    }
+
+    /* Allocate clip/transitions if clip number is not null ... */
+    if( 0 < xVSS_context->pSettings->uiClipNumber )
+    {
+        if( xVSS_context->pSettings->pClipList != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->pClipList));
+            xVSS_context->pSettings->pClipList = M4OSA_NULL;
+        }
+
+        if( xVSS_context->pSettings->pTransitionList != M4OSA_NULL )
+        {
+            M4OSA_free(
+                (M4OSA_MemAddr32)(xVSS_context->pSettings->pTransitionList));
+            xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+        }
+
+        xVSS_context->pSettings->pClipList =
+            (M4VSS3GPP_ClipSettings ** )M4OSA_malloc \
+            (sizeof(M4VSS3GPP_ClipSettings *)*xVSS_context->pSettings->uiClipNumber,
+            M4VS, (M4OSA_Char *)"xVSS, copy of pClipList");
+
+        if( xVSS_context->pSettings->pClipList == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        /* Set clip list to NULL */
+        M4OSA_memset((M4OSA_MemAddr8)xVSS_context->pSettings->pClipList,
+            sizeof(M4VSS3GPP_ClipSettings *)
+            *xVSS_context->pSettings->uiClipNumber, 0);
+
+        if( xVSS_context->pSettings->uiClipNumber > 1 )
+        {
+            xVSS_context->pSettings->pTransitionList =
+                (M4VSS3GPP_TransitionSettings ** ) \
+                M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings *)                \
+                *(xVSS_context->pSettings->uiClipNumber - 1), M4VS, (M4OSA_Char *) \
+                "xVSS, copy of pTransitionList");
+
+            if( xVSS_context->pSettings->pTransitionList == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            /* Set transition list to NULL */
+            M4OSA_memset(
+                (M4OSA_MemAddr8)xVSS_context->pSettings->pTransitionList,
+                sizeof(M4VSS3GPP_TransitionSettings *)
+                *(xVSS_context->pSettings->uiClipNumber - 1), 0);
+        }
+        else
+        {
+            xVSS_context->pSettings->pTransitionList = M4OSA_NULL;
+        }
+    }
+    /* else, there is a pb in the input settings structure */
+    else
+    {
+        M4OSA_TRACE1_0("No clip in this settings list !!");
+        /*FB: to avoid leaks when there is an error in the send command*/
+        /* Free Send command */
+        M4xVSS_freeCommand(xVSS_context);
+        /**/
+        return M4ERR_PARAMETER;
+    }
+
+    /* RC Allocate effects settings */
+    xVSS_context->pSettings->nbEffects = pSettings->nbEffects;
+
+    if( 0 < xVSS_context->pSettings->nbEffects )
+    {
+        xVSS_context->pSettings->Effects =
+            (M4VSS3GPP_EffectSettings *)M4OSA_malloc \
+            (xVSS_context->pSettings->nbEffects * sizeof(M4VSS3GPP_EffectSettings),
+            M4VS, (M4OSA_Char *)"effects settings");
+
+        if( xVSS_context->pSettings->Effects == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        /*FB bug fix 19.03.2008: these pointers were not initialized -> crash when free*/
+        for ( i = 0; i < xVSS_context->pSettings->nbEffects; i++ )
+        {
+            xVSS_context->pSettings->Effects[i].xVSS.pFramingFilePath =
+                M4OSA_NULL;
+            xVSS_context->pSettings->Effects[i].xVSS.pFramingBuffer =
+                M4OSA_NULL;
+            xVSS_context->pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
+        }
+        /**/
+    }
+
+    if( xVSS_context->targetedTimescale == 0 )
+    {
+        M4OSA_UInt32 pTargetedTimeScale = 0;
+
+        err = M4xVSS_internalGetTargetedTimeScale(xVSS_context, pSettings,
+            &pTargetedTimeScale);
+
+        if( M4NO_ERROR != err || pTargetedTimeScale == 0 )
+        {
+            M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalGetTargetedTimeScale\
+                           returned 0x%x", err);
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return err;
+        }
+        xVSS_context->targetedTimescale = pTargetedTimeScale;
+    }
+
+    /* Initialize total duration variable */
+    totalDuration = 0;
+
+    /* Parsing list of clips given by application, and prepare analyzing */
+    for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
+    {
+        /* Allocate current clip */
+        xVSS_context->pSettings->pClipList[i] =
+            (M4VSS3GPP_ClipSettings *)M4OSA_malloc \
+            (sizeof(M4VSS3GPP_ClipSettings), M4VS, (M4OSA_Char *)"clip settings");
+
+        if( xVSS_context->pSettings->pClipList[i] == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+
+        /* Copy clip settings from given structure to our xVSS_context structure */
+        err =
+            M4xVSS_DuplicateClipSettings(xVSS_context->pSettings->pClipList[i],
+            pSettings->pClipList[i], M4OSA_TRUE);
+
+        if( err != M4NO_ERROR )
+        {
+            M4OSA_TRACE1_1(
+                "M4xVSS_SendCommand: M4xVSS_DuplicateClipSettings return error 0x%x",
+                err);
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return err;
+        }
+
+        /* Because there is 1 less transition than clip number */
+        if( i < xVSS_context->pSettings->uiClipNumber - 1 )
+        {
+            xVSS_context->pSettings->pTransitionList[i] =
+                (M4VSS3GPP_TransitionSettings
+                *)M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings),
+                M4VS, (M4OSA_Char *)"transition settings");
+
+            if( xVSS_context->pSettings->pTransitionList[i] == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            M4OSA_memcpy(
+                (M4OSA_MemAddr8)xVSS_context->pSettings->pTransitionList[i],
+                (M4OSA_MemAddr8)pSettings->pTransitionList[i],
+                sizeof(M4VSS3GPP_TransitionSettings));
+            /* Initialize external effect context to NULL, to know if input jpg has already been
+            decoded or not */
+            xVSS_context->pSettings->pTransitionList[i]->
+                pExtVideoTransitionFctCtxt = M4OSA_NULL;
+
+            switch( xVSS_context->pSettings->
+                pTransitionList[i]->VideoTransitionType )
+            {
+                    /* If transition type is alpha magic, we need to decode input file */
+                case M4xVSS_kVideoTransitionType_AlphaMagic:
+                    /* Allocate our alpha magic settings structure to have a copy of the
+                    provided one */
+                    xVSS_context->pSettings->pTransitionList[i]->      \
+                     xVSS.transitionSpecific.pAlphaMagicSettings =
+                        (M4xVSS_AlphaMagicSettings *)M4OSA_malloc \
+                        (sizeof(M4xVSS_AlphaMagicSettings), M4VS,
+                        (M4OSA_Char *)"Input Alpha magic settings structure");
+
+                    if( xVSS_context->pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    /* Copy data from the provided alpha magic settings structure tou our
+                    structure */
+                    M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->
+                        pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings,
+                        (M4OSA_MemAddr8)pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings,
+                        sizeof(M4xVSS_AlphaMagicSettings));
+
+                    /* Allocate our alpha magic input filename */
+                    xVSS_context->pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath = M4OSA_malloc(
+                        (M4OSA_chrLength(pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath)
+                        + 1), M4VS, (M4OSA_Char *)"Alpha magic file path");
+
+                    if( xVSS_context->pSettings->pTransitionList[i]-> \
+                        xVSS.transitionSpecific.pAlphaMagicSettings->pAlphaFilePath
+                        == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    /* Copy data from the provided alpha magic filename to our */
+                    M4OSA_chrNCopy(
+                        xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                        transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath,
+                        pSettings->pTransitionList[i]->xVSS.
+                        transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath, M4OSA_chrLength(
+                        pSettings->pTransitionList[i]->xVSS.
+                        transitionSpecific.pAlphaMagicSettings->
+                        pAlphaFilePath) + 1);
+
+                    /* Parse all transition to know if the input jpg has already been decoded */
+                    for ( j = 0; j < i; j++ )
+                    {
+                        if( xVSS_context->pSettings->
+                            pTransitionList[j]->VideoTransitionType
+                            == M4xVSS_kVideoTransitionType_AlphaMagic )
+                        {
+                            M4OSA_UInt32 pCmpResult = 0;
+                            M4OSA_chrCompare(xVSS_context->pSettings->
+                                pTransitionList[i]->xVSS.
+                                transitionSpecific.pAlphaMagicSettings->
+                                pAlphaFilePath, xVSS_context->pSettings->
+                                pTransitionList[j]->xVSS.
+                                transitionSpecific.
+                                pAlphaMagicSettings->pAlphaFilePath,
+                                (M4OSA_Int32 *) &pCmpResult);
+
+                            if( pCmpResult == 0 )
+                            {
+                                M4xVSS_internal_AlphaMagicSettings
+                                    *alphaSettings;
+
+                                alphaSettings =
+                                    (M4xVSS_internal_AlphaMagicSettings
+                                    *)M4OSA_malloc(
+                                    sizeof(
+                                    M4xVSS_internal_AlphaMagicSettings),
+                                    M4VS,
+                                    (M4OSA_Char
+                                    *)
+                                    "Alpha magic settings structure 1");
+
+                                if( alphaSettings == M4OSA_NULL )
+                                {
+                                    M4OSA_TRACE1_0(
+                                        "Allocation error in M4xVSS_SendCommand");
+                                    /*FB: to avoid leaks when there is an error in the send
+                                     command*/
+                                    /* Free Send command */
+                                    M4xVSS_freeCommand(xVSS_context);
+                                    /**/
+                                    return M4ERR_ALLOC;
+                                }
+                                alphaSettings->pPlane =
+                                    ((M4xVSS_internal_AlphaMagicSettings *)(
+                                    xVSS_context->pSettings->
+                                    pTransitionList[j]->
+                                    pExtVideoTransitionFctCtxt))->
+                                    pPlane;
+
+                                if( xVSS_context->pSettings->
+                                    pTransitionList[i]->xVSS.transitionSpecific.
+                                    pAlphaMagicSettings->blendingPercent > 0
+                                    && xVSS_context->pSettings->
+                                    pTransitionList[i]->xVSS.
+                                    transitionSpecific.
+                                    pAlphaMagicSettings->blendingPercent
+                                    <= 100 )
+                                {
+                                    alphaSettings->blendingthreshold =
+                                        ( xVSS_context->pSettings->
+                                        pTransitionList[i]->xVSS.
+                                        transitionSpecific.
+                                        pAlphaMagicSettings->
+                                        blendingPercent) * 255 / 200;
+                                }
+                                else
+                                {
+                                    alphaSettings->blendingthreshold = 0;
+                                }
+                                alphaSettings->isreverse =
+                                    xVSS_context->pSettings->
+                                    pTransitionList[i]->xVSS.
+                                    transitionSpecific.
+                                    pAlphaMagicSettings->isreverse;
+                                /* It means that the input jpg file for alpha magic has already
+                                 been decoded -> no nedd to decode it again */
+                                if( alphaSettings->blendingthreshold == 0 )
+                                {
+                                    xVSS_context->pSettings->
+                                        pTransitionList[i]->
+                                        ExtVideoTransitionFct =
+                                        M4xVSS_AlphaMagic;
+                                }
+                                else
+                                {
+                                    xVSS_context->pSettings->
+                                        pTransitionList[i]->
+                                        ExtVideoTransitionFct =
+                                        M4xVSS_AlphaMagicBlending;
+                                }
+                                xVSS_context->pSettings->pTransitionList[i]->
+                                    pExtVideoTransitionFctCtxt = alphaSettings;
+                                break;
+                            }
+                        }
+                    }
+
+                    /* If the jpg has not been decoded yet ... */
+                    if( xVSS_context->pSettings->
+                        pTransitionList[i]->pExtVideoTransitionFctCtxt
+                        == M4OSA_NULL )
+                    {
+                        M4VIFI_ImagePlane *outputPlane;
+                        M4xVSS_internal_AlphaMagicSettings *alphaSettings;
+                        /*UTF conversion support*/
+                        M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+                        /*To support ARGB8888 : get the width and height */
+                        M4OSA_UInt32 width_ARGB888 =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->width;
+                        M4OSA_UInt32 height_ARGB888 =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->height;
+                        M4OSA_TRACE1_1(
+                            " TransitionListM4xVSS_SendCommand width State is %d",
+                            width_ARGB888);
+                        M4OSA_TRACE1_1(
+                            " TransitionListM4xVSS_SendCommand height! State is %d",
+                            height_ARGB888);
+                        /* Allocate output plane */
+                        outputPlane = (M4VIFI_ImagePlane *)M4OSA_malloc(3
+                            * sizeof(M4VIFI_ImagePlane), M4VS, (M4OSA_Char
+                            *)
+                            "Output plane for Alpha magic transition");
+
+                        if( outputPlane == M4OSA_NULL )
+                        {
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+
+                        outputPlane[0].u_width = width;
+                        outputPlane[0].u_height = height;
+                        outputPlane[0].u_topleft = 0;
+                        outputPlane[0].u_stride = width;
+                        outputPlane[0].pac_data = (M4VIFI_UInt8
+                            *)M4OSA_malloc(( width * height * 3)
+                            >> 1,
+                            M4VS,
+                            (M4OSA_Char
+                            *)
+                            "Alloc for the Alpha magic pac_data output YUV");
+                        ;
+
+                        if( outputPlane[0].pac_data == M4OSA_NULL )
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)outputPlane);
+                            outputPlane = M4OSA_NULL;
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+                        outputPlane[1].u_width = width >> 1;
+                        outputPlane[1].u_height = height >> 1;
+                        outputPlane[1].u_topleft = 0;
+                        outputPlane[1].u_stride = width >> 1;
+                        outputPlane[1].pac_data = outputPlane[0].pac_data
+                            + outputPlane[0].u_width * outputPlane[0].u_height;
+                        outputPlane[2].u_width = width >> 1;
+                        outputPlane[2].u_height = height >> 1;
+                        outputPlane[2].u_topleft = 0;
+                        outputPlane[2].u_stride = width >> 1;
+                        outputPlane[2].pac_data = outputPlane[1].pac_data
+                            + outputPlane[1].u_width * outputPlane[1].u_height;
+
+                        pDecodedPath =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            pAlphaFilePath;
+                        /**
+                        * UTF conversion: convert into the customer format, before being used*/
+                        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                            != M4OSA_NULL && xVSS_context->
+                            UTFConversionContext.
+                            pTempOutConversionBuffer != M4OSA_NULL )
+                        {
+                            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                                (M4OSA_Void *)xVSS_context->pSettings->
+                                pTransitionList[i]->xVSS.
+                                transitionSpecific.
+                                pAlphaMagicSettings->pAlphaFilePath,
+                                (M4OSA_Void *)xVSS_context->
+                                UTFConversionContext.
+                                pTempOutConversionBuffer, &length);
+
+                            if( err != M4NO_ERROR )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                                    err);
+                                /* Free Send command */
+                                M4xVSS_freeCommand(xVSS_context);
+                                return err;
+                            }
+                            pDecodedPath =
+                                xVSS_context->UTFConversionContext.
+                                pTempOutConversionBuffer;
+                        }
+                        /**
+                        End of the conversion, use the decoded path*/
+                        /*To support ARGB8888 : convert + resizing from ARGB8888 to yuv420 */
+
+                        err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(
+                            pDecodedPath,
+                            xVSS_context->pFileReadPtr, outputPlane,
+                            width_ARGB888, height_ARGB888);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_free(
+                                (M4OSA_MemAddr32)outputPlane[0].pac_data);
+                            outputPlane[0].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)outputPlane);
+                            outputPlane = M4OSA_NULL;
+                            M4xVSS_freeCommand(xVSS_context);
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: error when decoding alpha magic JPEG: 0x%x",
+                                err);
+                            return err;
+                        }
+
+                        /* Allocate alpha settings structure */
+                        alphaSettings =
+                            (M4xVSS_internal_AlphaMagicSettings *)M4OSA_malloc(
+                            sizeof(M4xVSS_internal_AlphaMagicSettings),
+                            M4VS, (M4OSA_Char
+                            *)"Alpha magic settings structure 2");
+
+                        if( alphaSettings == M4OSA_NULL )
+                        {
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+                        alphaSettings->pPlane = outputPlane;
+
+                        if( xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            blendingPercent > 0 && xVSS_context->pSettings->
+                            pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            blendingPercent <= 100 )
+                        {
+                            alphaSettings->blendingthreshold =
+                                ( xVSS_context->pSettings->
+                                pTransitionList[i]->xVSS.
+                                transitionSpecific.pAlphaMagicSettings->
+                                blendingPercent) * 255 / 200;
+                        }
+                        else
+                        {
+                            alphaSettings->blendingthreshold = 0;
+                        }
+                        alphaSettings->isreverse =
+                            xVSS_context->pSettings->pTransitionList[i]->xVSS.
+                            transitionSpecific.pAlphaMagicSettings->
+                            isreverse;
+
+                        if( alphaSettings->blendingthreshold == 0 )
+                        {
+                            xVSS_context->pSettings->pTransitionList[i]->
+                                ExtVideoTransitionFct = M4xVSS_AlphaMagic;
+                        }
+                        else
+                        {
+                            xVSS_context->pSettings->pTransitionList[i]->
+                                ExtVideoTransitionFct =
+                                M4xVSS_AlphaMagicBlending;
+                        }
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt = alphaSettings;
+                    }
+
+                    break;
+
+                case M4xVSS_kVideoTransitionType_SlideTransition:
+                    {
+                        M4xVSS_internal_SlideTransitionSettings *slideSettings;
+                        slideSettings =
+                            (M4xVSS_internal_SlideTransitionSettings *)M4OSA_malloc(
+                            sizeof(M4xVSS_internal_SlideTransitionSettings),
+                            M4VS, (M4OSA_Char
+                            *)"Internal slide transition settings");
+
+                        if( M4OSA_NULL == slideSettings )
+                        {
+                            M4OSA_TRACE1_0(
+                                "Allocation error in M4xVSS_SendCommand");
+                            /*FB: to avoid leaks when there is an error in the send command*/
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            /**/
+                            return M4ERR_ALLOC;
+                        }
+                        /* Just copy the lone parameter from the input settings to the internal
+                         context. */
+
+                        slideSettings->direction =
+                            pSettings->pTransitionList[i]->xVSS.transitionSpecific.
+                            pSlideTransitionSettings->direction;
+
+                        /* No need to keep our copy of the settings. */
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            xVSS.transitionSpecific.pSlideTransitionSettings =
+                            M4OSA_NULL;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            ExtVideoTransitionFct = &M4xVSS_SlideTransition;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt = slideSettings;
+                    }
+                    break;
+
+                case M4xVSS_kVideoTransitionType_FadeBlack:
+                    {
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            ExtVideoTransitionFct = &M4xVSS_FadeBlackTransition;
+                    }
+                    break;
+
+                case M4xVSS_kVideoTransitionType_External:
+                    {
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            ExtVideoTransitionFct =
+                            pSettings->pTransitionList[i]->ExtVideoTransitionFct;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt =
+                            pSettings->pTransitionList[i]->
+                            pExtVideoTransitionFctCtxt;
+                        xVSS_context->pSettings->pTransitionList[i]->
+                            VideoTransitionType =
+                            M4VSS3GPP_kVideoTransitionType_External;
+                    }
+                    break;
+
+                default:
+                    break;
+                } // switch
+
+            /* Update total_duration with transition duration */
+            totalDuration -= xVSS_context->pSettings->
+                pTransitionList[i]->uiTransitionDuration;
+        }
+
+        /************************
+        JPG input file type case
+        *************************/
+#if 0
+
+        if( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_JPG )
+        {
+            M4OSA_Char out_img[64];
+            M4OSA_Char out_img_tmp[64];
+            M4xVSS_Pto3GPP_params *pParams;
+            M4OSA_Context pJPEGFileIn;
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+
+            /* Parse Pto3GPP params chained list to know if input file has already been
+            converted */
+            if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+            {
+                M4OSA_UInt32 pCmpResult = 0;
+
+                pParams = xVSS_context->pPTo3GPPparamsList;
+                /* We parse all Pto3gpp Param chained list */
+                while( pParams != M4OSA_NULL )
+                {
+                    M4OSA_chrCompare(pSettings->pClipList[i]->pFile,
+                        pParams->pFileIn, (M4OSA_Int32 *) &pCmpResult);
+
+                    if( pCmpResult == 0
+                        && (pSettings->pClipList[i]->uiEndCutTime
+                        == pParams->duration
+                        || pSettings->pClipList[i]->xVSS.uiDuration
+                        == pParams->duration)
+                        && pSettings->pClipList[i]->xVSS.MediaRendering
+                        == pParams->MediaRendering )
+                    {
+                        /* Replace JPG filename with existing 3GP filename */
+                        goto replaceJPG_3GP;
+                    }
+                    /* We need to update this variable, in case some pictures have been added
+                     between two */
+                    /* calls to M4xVSS_sendCommand */
+                    pPto3GPP_last = pParams;
+                    pParams = pParams->pNext;
+                }
+            }
+
+            /* Construct output temporary 3GP filename */
+            err = M4OSA_chrSPrintf(out_img, 63, (M4OSA_Char *)"%simg%d.3gp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+            err = M4OSA_chrSPrintf(out_img_tmp, 63, "%simg%d.tmp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+            xVSS_context->tempFileIndex++;
+
+            /* Allocate last element Pto3GPP params structure */
+            pParams = (M4xVSS_Pto3GPP_params
+                *)M4OSA_malloc(sizeof(M4xVSS_Pto3GPP_params),
+                M4VS, (M4OSA_Char *)"Element of Pto3GPP Params");
+
+            if( pParams == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: Problem when allocating one element Pto3GPP Params");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            /* Initializes pfilexxx members of pParams to be able to free them correctly */
+            pParams->pFileIn = M4OSA_NULL;
+            pParams->pFileOut = M4OSA_NULL;
+            pParams->pFileTemp = M4OSA_NULL;
+            pParams->pNext = M4OSA_NULL;
+            pParams->MediaRendering = M4xVSS_kResizing;
+
+            if( xVSS_context->pPTo3GPPparamsList
+                == M4OSA_NULL ) /* Means it is the first element of the list */
+            {
+                /* Initialize the xVSS context with the first element of the list */
+                xVSS_context->pPTo3GPPparamsList = pParams;
+
+                /* Save this element in case of other file to convert */
+                pPto3GPP_last = pParams;
+            }
+            else
+            {
+                /* Update next pointer of the previous last element of the chain */
+                pPto3GPP_last->pNext = pParams;
+
+                /* Update save of last element of the chain */
+                pPto3GPP_last = pParams;
+            }
+
+            /* Fill the last M4xVSS_Pto3GPP_params element */
+            pParams->duration =
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+            /* If duration is filled, let's use it instead of EndCutTime */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+            {
+                pParams->duration =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+            }
+
+            pParams->InputFileType = M4VIDEOEDITING_kFileType_JPG;
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                    *)xVSS_context->pSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            pParams->pFileIn = (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+                (M4OSA_Char *)"Pto3GPP Params: file in");
+
+            if( pParams->pFileIn == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+                (length + 1)); /* Copy input file path */
+
+            /* Check that JPG file is present on the FS (P4ME00002974) by just opening and
+            closing it */
+            err =
+                xVSS_context->pFileReadPtr->openRead(&pJPEGFileIn, pDecodedPath,
+                M4OSA_kFileRead);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't open input jpg file %s, error: 0x%x\n",
+                    pDecodedPath, err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+            err = xVSS_context->pFileReadPtr->closeRead(pJPEGFileIn);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_2("Can't close input jpg file %s, error: 0x%x\n",
+                    pDecodedPath, err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = out_img;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)out_img, (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            pParams->pFileOut = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                (M4OSA_Char *)"Pto3GPP Params: file out");
+
+            if( pParams->pFileOut == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pParams->pFileOut, pDecodedPath,
+                (length + 1)); /* Copy output file path */
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+
+            pDecodedPath = out_img_tmp;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)out_img_tmp, (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            pParams->pFileTemp = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                (M4OSA_Char *)"Pto3GPP Params: file temp");
+
+            if( pParams->pFileTemp == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pParams->pFileTemp, pDecodedPath,
+                (length + 1)); /* Copy temporary file path */
+
+#endif                         /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+            /* Fill PanAndZoom settings if needed */
+
+            if( M4OSA_TRUE
+                == xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom )
+            {
+                pParams->isPanZoom =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom;
+                /* Check that Pan & Zoom parameters are corrects */
+                if( xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa
+                    <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXa > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXa < 0
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYa > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYa < 0
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                    > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                    <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXb > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftXb < 0
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYb > 100
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    PanZoomTopleftYb < 0 )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                pParams->PanZoomXa =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa;
+                pParams->PanZoomTopleftXa =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftXa;
+                pParams->PanZoomTopleftYa =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftYa;
+                pParams->PanZoomXb =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb;
+                pParams->PanZoomTopleftXb =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftXb;
+                pParams->PanZoomTopleftYb =
+                    xVSS_context->pSettings->
+                    pClipList[i]->xVSS.PanZoomTopleftYb;
+            }
+            else
+            {
+                pParams->isPanZoom = M4OSA_FALSE;
+            }
+            /*+ PR No: blrnxpsw#223*/
+            /*Intializing the Video Frame Rate as it may not be intialized*/
+            /*Other changes made is @ M4xVSS_Internal.c @ line 1518 in
+            M4xVSS_internalStartConvertPictureTo3gp*/
+            switch( xVSS_context->pSettings->videoFrameRate )
+            {
+                case M4VIDEOEDITING_k30_FPS:
+                    pParams->framerate = 33;
+                    break;
+
+                case M4VIDEOEDITING_k25_FPS:
+                    pParams->framerate = 40;
+                    break;
+
+                case M4VIDEOEDITING_k20_FPS:
+                    pParams->framerate = 50;
+                    break;
+
+                case M4VIDEOEDITING_k15_FPS:
+                    pParams->framerate = 66;
+                    break;
+
+                case M4VIDEOEDITING_k12_5_FPS:
+                    pParams->framerate = 80;
+                    break;
+
+                case M4VIDEOEDITING_k10_FPS:
+                    pParams->framerate = 100;
+                    break;
+
+                case M4VIDEOEDITING_k7_5_FPS:
+                    pParams->framerate = 133;
+                    break;
+
+                case M4VIDEOEDITING_k5_FPS:
+                    pParams->framerate = 200;
+                    break;
+
+                default:
+                    /*Making Default Frame Rate @ 15 FPS*/
+                    pParams->framerate = 66;
+                    break;
+            }
+            /*-PR No: blrnxpsw#223*/
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+                == M4xVSS_kCropping
+                || xVSS_context->pSettings->pClipList[i]->xVSS.
+                MediaRendering == M4xVSS_kBlackBorders
+                || xVSS_context->pSettings->pClipList[i]->xVSS.
+                MediaRendering == M4xVSS_kResizing )
+            {
+                pParams->MediaRendering =
+                    xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering;
+            }
+
+            pParams->pNext = M4OSA_NULL;
+            pParams->isCreated = M4OSA_FALSE;
+            xVSS_context->nbStepTotal++;
+
+replaceJPG_3GP:
+            /* Update total duration */
+            totalDuration += pParams->duration;
+
+            /* Replacing in VSS structure the JPG file by the 3gp file */
+            xVSS_context->pSettings->pClipList[i]->FileType =
+                M4VIDEOEDITING_kFileType_3GPP;
+
+            if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+            {
+                M4OSA_free(xVSS_context->pSettings->pClipList[i]->pFile);
+                xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+            }
+
+            /**
+            * UTF conversion: convert into UTF8, before being used*/
+            pDecodedPath = pParams->pFileOut;
+
+            if( xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+                && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                    (M4OSA_Void *)pParams->pFileOut,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+            else
+            {
+                length = M4OSA_chrLength(pDecodedPath);
+            }
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_malloc((length
+                + 1), M4VS, (M4OSA_Char *)"xVSS file path of jpg to 3gp");
+
+            if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(xVSS_context->pSettings->pClipList[i]->pFile,
+                pDecodedPath, (length + 1));
+            /*FB: add file path size because of UTF16 conversion*/
+            xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+        }
+#endif
+
+        if( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_ARGB8888 )
+        {
+            M4OSA_Char out_img[64];
+            M4OSA_Char out_img_tmp[64];
+            M4xVSS_Pto3GPP_params *pParams = M4OSA_NULL;
+            M4OSA_Context pARGBFileIn;
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = pSettings->pClipList[i]->pFile;
+
+            /* Parse Pto3GPP params chained list to know if input file has already been
+            converted */
+            if( xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+            {
+                M4OSA_UInt32 pCmpResult = 0;
+
+                pParams = xVSS_context->pPTo3GPPparamsList;
+                /* We parse all Pto3gpp Param chained list */
+                while( pParams != M4OSA_NULL )
+                {
+                    M4OSA_chrCompare(pSettings->pClipList[i]->pFile,
+                        pParams->pFileIn, (M4OSA_Int32 *)&pCmpResult);
+
+                    if( pCmpResult == 0
+                        && (pSettings->pClipList[i]->uiEndCutTime
+                        == pParams->duration
+                        || pSettings->pClipList[i]->xVSS.uiDuration
+                        == pParams->duration)
+                        && pSettings->pClipList[i]->xVSS.MediaRendering
+                        == pParams->MediaRendering )
+
+
+
+                    {
+                        /* Replace JPG filename with existing 3GP filename */
+                        goto replaceARGB_3GP;
+                    }
+                    /* We need to update this variable, in case some pictures have been
+                     added between two */
+                    /* calls to M4xVSS_sendCommand */
+                    pPto3GPP_last = pParams;
+                    pParams = pParams->pNext;
+                }
+            }
+
+            /* Construct output temporary 3GP filename */
+            err = M4OSA_chrSPrintf(out_img, 63, (M4OSA_Char *)"%simg%d.3gp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+            err = M4OSA_chrSPrintf(out_img_tmp, 63, "%simg%d.tmp",
+                xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return err;
+            }
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+            xVSS_context->tempFileIndex++;
+
+            /* Allocate last element Pto3GPP params structure */
+            pParams = (M4xVSS_Pto3GPP_params
+                *)M4OSA_malloc(sizeof(M4xVSS_Pto3GPP_params),
+                M4VS, (M4OSA_Char *)"Element of Pto3GPP Params");
+
+            if( pParams == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: Problem when allocating one element Pto3GPP Params");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            /* Initializes pfilexxx members of pParams to be able to free them correctly */
+            pParams->pFileIn = M4OSA_NULL;
+            pParams->pFileOut = M4OSA_NULL;
+            pParams->pFileTemp = M4OSA_NULL;
+            pParams->pNext = M4OSA_NULL;
+            pParams->MediaRendering = M4xVSS_kResizing;
+
+            /*To support ARGB8888 :get the width and height */
+            pParams->height = pSettings->pClipList[
+                i]->ClipProperties.uiStillPicHeight; //ARGB_Height;
+                pParams->width = pSettings->pClipList[
+                    i]->ClipProperties.uiStillPicWidth; //ARGB_Width;
+                    M4OSA_TRACE1_1("CLIP M4xVSS_SendCommand  is %d", pParams->height);
+                    M4OSA_TRACE1_1("CLIP M4xVSS_SendCommand  is %d", pParams->height);
+
+                    if( xVSS_context->pPTo3GPPparamsList
+                        == M4OSA_NULL ) /* Means it is the first element of the list */
+                    {
+                        /* Initialize the xVSS context with the first element of the list */
+                        xVSS_context->pPTo3GPPparamsList = pParams;
+
+                        /* Save this element in case of other file to convert */
+                        pPto3GPP_last = pParams;
+                    }
+                    else
+                    {
+                        /* Update next pointer of the previous last element of the chain */
+                        pPto3GPP_last->pNext = pParams;
+
+                        /* Update save of last element of the chain */
+                        pPto3GPP_last = pParams;
+                    }
+
+                    /* Fill the last M4xVSS_Pto3GPP_params element */
+                    pParams->duration =
+                        xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+                    /* If duration is filled, let's use it instead of EndCutTime */
+                    if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+                    {
+                        pParams->duration =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+                    }
+
+                    pParams->InputFileType = M4VIDEOEDITING_kFileType_ARGB8888;
+
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                            *)xVSS_context->pSettings->pClipList[i]->pFile,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer,
+                            &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    pParams->pFileIn = (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+                        (M4OSA_Char *)"Pto3GPP Params: file in");
+
+                    if( pParams->pFileIn == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+                        (length + 1)); /* Copy input file path */
+
+                    /* Check that JPG file is present on the FS (P4ME00002974) by just opening
+                     and closing it */
+                    err =
+                        xVSS_context->pFileReadPtr->openRead(&pARGBFileIn, pDecodedPath,
+                        M4OSA_kFileRead);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_2("Can't open input jpg file %s, error: 0x%x\n",
+                            pDecodedPath, err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    err = xVSS_context->pFileReadPtr->closeRead(pARGBFileIn);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_2("Can't close input jpg file %s, error: 0x%x\n",
+                            pDecodedPath, err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    pDecodedPath = out_img;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                            (M4OSA_Void *)out_img, (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: pConvFromUTF8Fct returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    pParams->pFileOut = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                        (M4OSA_Char *)"Pto3GPP Params: file out");
+
+                    if( pParams->pFileOut == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(pParams->pFileOut, pDecodedPath,
+                        (length + 1)); /* Copy output file path */
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+
+                    pDecodedPath = out_img_tmp;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                            (M4OSA_Void *)out_img_tmp, (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8\
+                                 returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    pParams->pFileTemp = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                        (M4OSA_Char *)"Pto3GPP Params: file temp");
+
+                    if( pParams->pFileTemp == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(pParams->pFileTemp, pDecodedPath,
+                        (length + 1)); /* Copy temporary file path */
+
+#endif                         /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                    /* Fill PanAndZoom settings if needed */
+
+                    if( M4OSA_TRUE
+                        == xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom )
+                    {
+                        pParams->isPanZoom =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.isPanZoom;
+                        /* Check that Pan & Zoom parameters are corrects */
+                        if( xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa
+                            <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXa > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXa < 0
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYa > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYa < 0
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                            > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb
+                            <= 0 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXb > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftXb < 0
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYb > 100
+                            || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            PanZoomTopleftYb < 0 )
+                        {
+                            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                            M4xVSS_freeCommand(xVSS_context);
+                            return M4ERR_PARAMETER;
+                        }
+
+                        pParams->PanZoomXa =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXa;
+                        pParams->PanZoomTopleftXa =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftXa;
+                        pParams->PanZoomTopleftYa =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftYa;
+                        pParams->PanZoomXb =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.PanZoomXb;
+                        pParams->PanZoomTopleftXb =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftXb;
+                        pParams->PanZoomTopleftYb =
+                            xVSS_context->pSettings->
+                            pClipList[i]->xVSS.PanZoomTopleftYb;
+                    }
+                    else
+                    {
+                        pParams->isPanZoom = M4OSA_FALSE;
+                    }
+                    /*+ PR No: blrnxpsw#223*/
+                    /*Intializing the Video Frame Rate as it may not be intialized*/
+                    /*Other changes made is @ M4xVSS_Internal.c @ line 1518 in
+                    M4xVSS_internalStartConvertPictureTo3gp*/
+                    switch( xVSS_context->pSettings->videoFrameRate )
+                    {
+                        case M4VIDEOEDITING_k30_FPS:
+                            pParams->framerate = 33;
+                            break;
+
+                        case M4VIDEOEDITING_k25_FPS:
+                            pParams->framerate = 40;
+                            break;
+
+                        case M4VIDEOEDITING_k20_FPS:
+                            pParams->framerate = 50;
+                            break;
+
+                        case M4VIDEOEDITING_k15_FPS:
+                            pParams->framerate = 66;
+                            break;
+
+                        case M4VIDEOEDITING_k12_5_FPS:
+                            pParams->framerate = 80;
+                            break;
+
+                        case M4VIDEOEDITING_k10_FPS:
+                            pParams->framerate = 100;
+                            break;
+
+                        case M4VIDEOEDITING_k7_5_FPS:
+                            pParams->framerate = 133;
+                            break;
+
+                        case M4VIDEOEDITING_k5_FPS:
+                            pParams->framerate = 200;
+                            break;
+
+                        default:
+                            /*Making Default Frame Rate @ 15 FPS*/
+                            pParams->framerate = 66;
+                            break;
+                    }
+                    /*-PR No: blrnxpsw#223*/
+                    if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+                        == M4xVSS_kCropping
+                        || xVSS_context->pSettings->pClipList[i]->xVSS.
+                        MediaRendering == M4xVSS_kBlackBorders
+                        || xVSS_context->pSettings->pClipList[i]->xVSS.
+                        MediaRendering == M4xVSS_kResizing )
+                    {
+                        pParams->MediaRendering =
+                            xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering;
+                    }
+
+                    pParams->pNext = M4OSA_NULL;
+                    pParams->isCreated = M4OSA_FALSE;
+                    xVSS_context->nbStepTotal++;
+
+replaceARGB_3GP:
+                    /* Update total duration */
+                    totalDuration += pParams->duration;
+
+                    /* Replacing in VSS structure the JPG file by the 3gp file */
+                    xVSS_context->pSettings->pClipList[i]->FileType =
+                        M4VIDEOEDITING_kFileType_3GPP;
+
+                    if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(xVSS_context->pSettings->pClipList[i]->pFile);
+                        xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+                    }
+
+                    /**
+                    * UTF conversion: convert into UTF8, before being used*/
+                    pDecodedPath = pParams->pFileOut;
+
+                    if( xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+                        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                            (M4OSA_Void *)pParams->pFileOut,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer,
+                            &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: \
+                                0x%x",err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath =
+                            xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+                    }
+                    else
+                    {
+                        length = M4OSA_chrLength(pDecodedPath);
+                    }
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_malloc((length
+                        + 1), M4VS, (M4OSA_Char *)"xVSS file path of ARGB to 3gp");
+
+                    if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                        /*FB: to avoid leaks when there is an error in the send command*/
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        /**/
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy(xVSS_context->pSettings->pClipList[i]->pFile,
+                        pDecodedPath, (length + 1));
+                    /*FB: add file path size because of UTF16 conversion*/
+                    xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+        }
+        /************************
+        3GP input file type case
+        *************************/
+        else if( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_3GPP
+            || xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_MP4 )
+        {
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+            /* Need to call MCS in case 3GP video/audio types are not compatible
+            (H263/MPEG4 or AMRNB/AAC) */
+            /* => Need to fill MCS_Params structure with the right parameters ! */
+            /* Need also to parse MCS params struct to check if file has already been transcoded */
+
+            M4VIDEOEDITING_ClipProperties fileProperties;
+            M4xVSS_MCS_params *pParams;
+            M4OSA_Bool audioIsDifferent = M4OSA_FALSE;
+            M4OSA_Bool videoIsDifferent = M4OSA_FALSE;
+            M4OSA_Bool bAudioMono;
+#ifdef TIMESCALE_BUG
+
+            M4OSA_Bool timescaleDifferent = M4OSA_FALSE;
+
+#endif
+
+            /* Initialize file properties structure */
+
+            M4OSA_memset((M4OSA_MemAddr8) &fileProperties,
+                sizeof(M4VIDEOEDITING_ClipProperties), 0);
+
+            //fileProperties.AudioStreamType = M4VIDEOEDITING_kNoneAudio;
+
+            /* Prevent from bad initializing of percentage cut time */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent
+                            > 100 || xVSS_context->pSettings->pClipList[i]->xVSS.
+                            uiBeginCutPercent > 100 )
+            {
+                /* These percentage cut time have probably not been initialized */
+                /* Let's not use them by setting them to 0 */
+                xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent = 0;
+                xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent =
+                    0;
+            }
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                    *)xVSS_context->pSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+                &fileProperties);
+
+            if( err != M4NO_ERROR )
+            {
+                M4xVSS_freeCommand(xVSS_context);
+                M4OSA_TRACE1_1(
+                    "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned 0x%x",
+                    err);
+                /* TODO: Translate error code of MCS to an xVSS error code */
+                return err;
+            }
+
+            /* Parse MCS params chained list to know if input file has already been converted */
+            if( xVSS_context->pMCSparamsList != M4OSA_NULL )
+            {
+                M4OSA_UInt32 pCmpResult = 0;
+
+                pParams = xVSS_context->pMCSparamsList;
+                /* We parse all MCS Param chained list */
+                while( pParams != M4OSA_NULL )
+                {
+
+                    /**
+                    * UTF conversion: convert into UTF8, before being used*/
+                    pDecodedPath = pParams->pFileIn;
+
+                    if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                            (M4OSA_Void *)pParams->pFileIn,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.
+                            pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err:\
+                                 0x%x", err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath = xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    M4OSA_chrCompare(pSettings->pClipList[i]->pFile,
+                        pDecodedPath, (M4OSA_Int32 *) &pCmpResult);
+
+                    /* If input filenames are the same, and if this is not a BGM, we can reuse
+                    the transcoded file */
+                    if( pCmpResult == 0 && pParams->isBGM == M4OSA_FALSE
+                        && pParams->BeginCutTime
+                        == pSettings->pClipList[i]->uiBeginCutTime
+                        && (pParams->EndCutTime
+                        == pSettings->pClipList[i]->uiEndCutTime
+                        || pParams->EndCutTime
+                        == pSettings->pClipList[i]->uiBeginCutTime
+                        + pSettings->pClipList[i]->xVSS.uiDuration)
+                        && pSettings->pClipList[i]->xVSS.MediaRendering
+                        == pParams->MediaRendering )
+                    {
+                        if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+                        {
+                            if( pSettings->xVSS.pBGMtrack->uiAddVolume == 100
+                                || (pParams->OutputAudioFormat
+                                == M4VIDEOEDITING_kNullAudio
+                                && fileProperties.AudioStreamType
+                                == pSettings->xVSS.outputAudioFormat)
+                                || pParams->OutputAudioFormat
+                                == pSettings->xVSS.outputAudioFormat
+                                || fileProperties.AudioStreamType
+                                == M4VIDEOEDITING_kNoneAudio )
+                            {
+                                /* Replace 3GP filename with transcoded 3GP filename */
+                                goto replace3GP_3GP;
+                            }
+                        }
+                        else if( ( pParams->OutputAudioFormat
+                            == M4VIDEOEDITING_kNullAudio
+                            && fileProperties.AudioStreamType
+                            == pSettings->xVSS.outputAudioFormat)
+                            || pParams->OutputAudioFormat
+                            == pSettings->xVSS.outputAudioFormat
+                            || fileProperties.AudioStreamType
+                            == M4VIDEOEDITING_kNoneAudio )
+                        {
+                            /* Replace 3GP filename with transcoded 3GP filename */
+                            goto replace3GP_3GP;
+                        }
+                    }
+
+                    /* We need to update this variable, in case some 3GP files have been added
+                    between two */
+                    /* calls to M4xVSS_sendCommand */
+                    pMCS_last = pParams;
+                    pParams = pParams->pNext;
+                }
+            }
+
+            /* If we have percentage information let's use it... */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiEndCutPercent != 0
+                || xVSS_context->pSettings->pClipList[i]->xVSS.uiBeginCutPercent
+                != 0 )
+            {
+                /* If percentage information are not correct and if duration field is not filled */
+                if( ( xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiEndCutPercent
+                    <= xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiBeginCutPercent)
+                    && xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration
+                    == 0 )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4xVSS_sendCommand: Bad percentage for begin and end cut time !");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /* We transform the percentage into absolute time */
+                xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
+                    = (M4OSA_UInt32)(
+                    xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiBeginCutPercent
+                    * fileProperties.uiClipDuration / 100);
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    = (M4OSA_UInt32)(
+                    xVSS_context->pSettings->pClipList[i]->xVSS.
+                    uiEndCutPercent
+                    * fileProperties.uiClipDuration / 100);
+            }
+            /* ...Otherwise, we use absolute time. */
+            else
+            {
+                /* If endCutTime == 0, it means all the file is taken. Let's change to the file
+                duration, to accurate preview. */
+                if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime == 0
+                    || xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    > fileProperties.uiClipDuration )
+                {
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+                        fileProperties.uiClipDuration;
+                }
+            }
+
+            /* If duration field is filled, it has priority on other fields on EndCutTime,
+             so let's use it */
+            if( xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration != 0 )
+            {
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+                    xVSS_context->pSettings->pClipList[i]->uiBeginCutTime
+                    +xVSS_context->pSettings->pClipList[i]->xVSS.uiDuration;
+
+                if( xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    > fileProperties.uiClipDuration )
+                {
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime =
+                        fileProperties.uiClipDuration;
+                }
+            }
+
+            /* If output video format is not set, we take video format of the first 3GP video */
+            if( xVSS_context->pSettings->xVSS.outputVideoFormat
+                == M4VIDEOEDITING_kNoneVideo )
+            {
+                //xVSS_context->pSettings->xVSS.outputVideoFormat = fileProperties.VideoStreamType;
+                //M4OSA_TRACE2_1("Output video format is not set, set it to current clip: %d",
+                // xVSS_context->pSettings->xVSS.outputVideoFormat);
+                M4OSA_TRACE1_0(
+                    "Output video format is not set, an error parameter is returned.");
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_PARAMETER;
+            }
+
+            if( xVSS_context->pSettings->xVSS.outputAudioFormat
+                == M4VIDEOEDITING_kNoneAudio )
+            {
+                //xVSS_context->pSettings->xVSS.outputAudioFormat = fileProperties.AudioStreamType;
+                M4OSA_TRACE2_1(
+                    "Output audio format is not set -> remove audio track of clip: %d",
+                    i);
+            }
+
+#ifdef TIMESCALE_BUG
+            /* Check timescale */
+
+            if( fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4 //&&
+                /* !!!!!!!!!!!! Add condition to update timescale !!!!!!!!!!!!!!!!!!!!!!!!! */ )
+            {
+                timescaleDifferent = M4OSA_TRUE;
+            }
+
+#endif
+            /* If the output video format/size is not the same as provided video,
+            let's transcode it */
+
+            if( fileProperties.VideoStreamType
+                != xVSS_context->pSettings->xVSS.outputVideoFormat
+                || fileProperties.uiVideoWidth != width
+                || fileProperties.uiVideoHeight != height
+                || (fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4
+                && fileProperties.uiVideoTimeScale
+                != xVSS_context->targetedTimescale) )
+            {
+                videoIsDifferent = M4OSA_TRUE;
+            }
+            /*temp solution for fixng issue for H.264 compressed domain  */
+            videoIsDifferent = M4OSA_TRUE;
+
+            if( fileProperties.uiNbChannels == 1 )
+            {
+                bAudioMono = M4OSA_TRUE;
+            }
+            else
+            {
+                bAudioMono = M4OSA_FALSE;
+            }
+
+            if( fileProperties.AudioStreamType
+                != xVSS_context->pSettings->xVSS.outputAudioFormat
+                || (fileProperties.AudioStreamType == M4VIDEOEDITING_kAAC
+                && (fileProperties.uiSamplingFrequency != samplingFreq
+                || bAudioMono
+                != xVSS_context->pSettings->xVSS.bAudioMono)) )
+            {
+                audioIsDifferent = M4OSA_TRUE;
+                /* If we want to replace audio, there is no need to transcode audio */
+                if( pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+                {
+                    /* temp fix :PT volume not herad in the second clip */
+                    if( /*(pSettings->xVSS.pBGMtrack->uiAddVolume == 100
+                        && xVSS_context->pSettings->xVSS.outputFileSize == 0)
+                        ||*/
+                        fileProperties.AudioStreamType
+                        == M4VIDEOEDITING_kNoneAudio ) /*11/12/2008 CR 3283 VAL for the MMS
+                        use case, we need to transcode except the media without audio*/
+                    {
+                        audioIsDifferent = M4OSA_FALSE;
+                    }
+                }
+                else if( fileProperties.AudioStreamType
+                    == M4VIDEOEDITING_kNoneAudio )
+                {
+                    audioIsDifferent = M4OSA_FALSE;
+                }
+            }
+
+            if( videoIsDifferent == M4OSA_TRUE || audioIsDifferent == M4OSA_TRUE
+#ifdef TIMESCALE_BUG
+
+                || timescaleDifferent == M4OSA_TRUE
+
+#endif
+
+                )
+            {
+                M4OSA_Char out_3gp[64];
+                M4OSA_Char out_3gp_tmp[64];
+
+                /* Construct output temporary 3GP filename */
+                err = M4OSA_chrSPrintf(out_3gp, 63, (M4OSA_Char *)"%svid%d.3gp",
+                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                    return err;
+                }
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+
+                err = M4OSA_chrSPrintf(out_3gp_tmp, 63, "%svid%d.tmp",
+                    xVSS_context->pTempPath, xVSS_context->tempFileIndex);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1("Error in M4OSA_chrSPrintf: 0x%x", err);
+                    return err;
+                }
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                xVSS_context->tempFileIndex++;
+
+                pParams =
+                    (M4xVSS_MCS_params *)M4OSA_malloc(sizeof(M4xVSS_MCS_params),
+                    M4VS, (M4OSA_Char *)"Element of MCS Params (for 3GP)");
+
+                if( pParams == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                pParams->MediaRendering = M4xVSS_kResizing;
+
+                if( xVSS_context->pMCSparamsList
+                    == M4OSA_NULL ) /* Means it is the first element of the list */
+                {
+                    /* Initialize the xVSS context with the first element of the list */
+                    xVSS_context->pMCSparamsList = pParams;
+                }
+                else
+                {
+                    /* Update next pointer of the previous last element of the chain */
+                    pMCS_last->pNext = pParams;
+                }
+
+                /* Save this element in case of other file to convert */
+                pMCS_last = pParams;
+
+                /* Fill the last M4xVSS_MCS_params element */
+                pParams->InputFileType = M4VIDEOEDITING_kFileType_3GPP;
+                pParams->OutputFileType = M4VIDEOEDITING_kFileType_3GPP;
+
+#ifdef TIMESCALE_BUG
+                /* Check if timescale only needs to be modified */
+
+                if( timescaleDifferent == M4OSA_TRUE
+                    && videoIsDifferent == M4OSA_FALSE )
+                {
+                    pParams->OutputVideoTimescale = 30;
+                    pParams->OutputVideoFormat = M4VIDEOEDITING_kNullVideo;
+                    pParams->OutputVideoFrameRate =
+                        M4VIDEOEDITING_k15_FPS; /* Must be set, otherwise,
+                                                    MCS returns an error ... */
+                }
+                else
+                {
+                    pParams->OutputVideoTimescale = 0;
+                }
+
+#endif
+
+                pParams->OutputVideoTimescale = xVSS_context->targetedTimescale;
+
+                /* We do not need to reencode video if its parameters do not differ */
+                /* from output settings parameters */
+                if( videoIsDifferent == M4OSA_TRUE )
+                {
+                    pParams->OutputVideoFormat =
+                        xVSS_context->pSettings->xVSS.outputVideoFormat;
+                    pParams->OutputVideoFrameRate =
+                        xVSS_context->pSettings->videoFrameRate;
+                    pParams->OutputVideoFrameSize =
+                        xVSS_context->pSettings->xVSS.outputVideoSize;
+
+                    /*FB: VAL CR P4ME00003076
+                    The output video bitrate is now directly given by the user in the edition
+                    settings structure If the bitrate given by the user is irrelevant
+                    (the MCS minimum and maximum video bitrate are used),
+                    the output video bitrate is hardcoded according to the output video size*/
+                    if( xVSS_context->pSettings->xVSS.outputVideoBitrate
+                        >= M4VIDEOEDITING_k16_KBPS
+                        && xVSS_context->pSettings->xVSS.outputVideoBitrate
+                        <= M4VIDEOEDITING_k8_MBPS ) /*+ New Encoder bitrates */
+                    {
+                        pParams->OutputVideoBitrate =
+                            xVSS_context->pSettings->xVSS.outputVideoBitrate;
+                    }
+                    else
+                    {
+                        switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+                        {
+                            case M4VIDEOEDITING_kSQCIF:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k48_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kQQVGA:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kQCIF:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k128_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kQVGA:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k384_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kCIF:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k384_KBPS;
+                                break;
+
+                            case M4VIDEOEDITING_kVGA:
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k512_KBPS;
+                                break;
+
+                            default: /* Should not happen !! */
+                                pParams->OutputVideoBitrate =
+                                    M4VIDEOEDITING_k64_KBPS;
+                                break;
+                        }
+                    }
+                }
+                else
+                {
+                    pParams->OutputVideoFormat = M4VIDEOEDITING_kNullVideo;
+                    pParams->OutputVideoFrameRate =
+                        M4VIDEOEDITING_k15_FPS; /* Must be set, otherwise, MCS returns an error */
+                }
+
+                if( audioIsDifferent == M4OSA_TRUE )
+                {
+                    pParams->OutputAudioFormat =
+                        xVSS_context->pSettings->xVSS.outputAudioFormat;
+
+                    switch( xVSS_context->pSettings->xVSS.outputAudioFormat )
+                    {
+                        case M4VIDEOEDITING_kNoneAudio:
+                            break;
+
+                        case M4VIDEOEDITING_kAMR_NB:
+                            pParams->OutputAudioBitrate =
+                                M4VIDEOEDITING_k12_2_KBPS;
+                            pParams->bAudioMono = M4OSA_TRUE;
+                            pParams->OutputAudioSamplingFrequency =
+                                M4VIDEOEDITING_kDefault_ASF;
+                            break;
+
+                        case M4VIDEOEDITING_kAAC:
+                            {
+                                /*FB: VAL CR P4ME00003076
+                                The output audio bitrate in the AAC case is now directly given by
+                                the user in the edition settings structure
+                                If the bitrate given by the user is irrelevant or undefined
+                                (the MCS minimum and maximum audio bitrate are used),
+                                the output audio bitrate is hard coded according to the output
+                                audio sampling frequency*/
+
+                                /*Check if the audio bitrate is correctly defined*/
+
+                                /*Mono
+                                MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
+                                if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+                                    >= M4VIDEOEDITING_k16_KBPS
+                                    && xVSS_context->pSettings->
+                                    xVSS.outputAudioBitrate
+                                    <= M4VIDEOEDITING_k192_KBPS
+                                    && xVSS_context->pSettings->xVSS.bAudioMono
+                                    == M4OSA_TRUE )
+                                {
+                                    pParams->OutputAudioBitrate =
+                                        xVSS_context->pSettings->
+                                        xVSS.outputAudioBitrate;
+                                }
+                                /*Stereo
+                                MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
+                                else if( xVSS_context->pSettings->
+                                    xVSS.outputAudioBitrate
+                                    >= M4VIDEOEDITING_k32_KBPS
+                                    && xVSS_context->pSettings->
+                                    xVSS.outputAudioBitrate
+                                    <= M4VIDEOEDITING_k192_KBPS
+                                    && xVSS_context->pSettings->xVSS.bAudioMono
+                                    == M4OSA_FALSE )
+                                {
+                                    pParams->OutputAudioBitrate =
+                                        xVSS_context->pSettings->
+                                        xVSS.outputAudioBitrate;
+                                }
+
+                                /*The audio bitrate is hard coded according to the output audio
+                                 sampling frequency*/
+                                else
+                                {
+                                    switch( xVSS_context->pSettings->
+                                        xVSS.outputAudioSamplFreq )
+                                    {
+                                        case M4VIDEOEDITING_k16000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k24_KBPS;
+                                            break;
+
+                                        case M4VIDEOEDITING_k22050_ASF:
+                                        case M4VIDEOEDITING_k24000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k32_KBPS;
+                                            break;
+
+                                        case M4VIDEOEDITING_k32000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k48_KBPS;
+                                            break;
+
+                                        case M4VIDEOEDITING_k44100_ASF:
+                                        case M4VIDEOEDITING_k48000_ASF:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k64_KBPS;
+                                            break;
+
+                                        default:
+                                            pParams->OutputAudioBitrate =
+                                                M4VIDEOEDITING_k64_KBPS;
+                                            break;
+                                    }
+
+                                    if( xVSS_context->pSettings->xVSS.bAudioMono
+                                        == M4OSA_FALSE )
+                                    {
+                                        /* Output bitrate have to be doubled */
+                                        pParams->OutputAudioBitrate +=
+                                            pParams->OutputAudioBitrate;
+                                    }
+                                }
+
+                                pParams->bAudioMono =
+                                    xVSS_context->pSettings->xVSS.bAudioMono;
+
+                                if( xVSS_context->pSettings->
+                                    xVSS.outputAudioSamplFreq
+                                    == M4VIDEOEDITING_k8000_ASF )
+                                {
+                                    /* Prevent from unallowed sampling frequencies */
+                                    pParams->OutputAudioSamplingFrequency =
+                                        M4VIDEOEDITING_kDefault_ASF;
+                                }
+                                else
+                                {
+                                    pParams->OutputAudioSamplingFrequency =
+                                        xVSS_context->pSettings->
+                                        xVSS.outputAudioSamplFreq;
+                                }
+                                break;
+                            }
+
+                        default: /* Should not happen !! */
+                            pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+                            pParams->OutputAudioBitrate =
+                                M4VIDEOEDITING_k12_2_KBPS;
+                            pParams->bAudioMono = M4OSA_TRUE;
+                            pParams->OutputAudioSamplingFrequency =
+                                M4VIDEOEDITING_kDefault_ASF;
+                            break;
+                        }
+                }
+                else
+                {
+                    pParams->OutputAudioFormat = M4VIDEOEDITING_kNullAudio;
+                }
+
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = xVSS_context->pSettings->pClipList[i]->pFile;
+                length = M4OSA_chrLength(pDecodedPath);
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)xVSS_context->pSettings->
+                        pClipList[i]->pFile,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                pParams->pFileIn =
+                    (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                    (M4OSA_Char *)"MCS 3GP Params: file in");
+
+                if( pParams->pFileIn == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+                    (length + 1)); /* Copy input file path */
+
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = out_3gp;
+                length = M4OSA_chrLength(pDecodedPath);
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                pParams->pFileOut =
+                    (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                    (M4OSA_Char *)"MCS 3GP Params: file out");
+
+                if( pParams->pFileOut == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pParams->pFileOut, pDecodedPath,
+                    (length + 1)); /* Copy output file path */
+
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+
+                pDecodedPath = out_3gp_tmp;
+                length = M4OSA_chrLength(pDecodedPath);
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)out_3gp_tmp,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                pParams->pFileTemp =
+                    (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+                    (M4OSA_Char *)"MCS 3GP Params: file temp");
+
+                if( pParams->pFileTemp == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pParams->pFileTemp, pDecodedPath,
+                    (length + 1)); /* Copy temporary file path */
+
+#else
+
+                pParams->pFileTemp = M4OSA_NULL;
+
+#endif /*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+
+                /*FB 2008/10/20 keep media aspect ratio, add media rendering parameter*/
+
+                if( xVSS_context->pSettings->pClipList[i]->xVSS.MediaRendering
+                    == M4xVSS_kCropping
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    MediaRendering == M4xVSS_kBlackBorders
+                    || xVSS_context->pSettings->pClipList[i]->xVSS.
+                    MediaRendering == M4xVSS_kResizing )
+                {
+                    pParams->MediaRendering =
+                        xVSS_context->pSettings->pClipList[i]->xVSS.
+                        MediaRendering;
+                }
+
+                /*FB: transcoding per parts*/
+                pParams->BeginCutTime =
+                    xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+                pParams->EndCutTime =
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime;
+
+                pParams->pNext = M4OSA_NULL;
+                pParams->isBGM = M4OSA_FALSE;
+                pParams->isCreated = M4OSA_FALSE;
+                xVSS_context->nbStepTotal++;
+                bIsTranscoding = M4OSA_TRUE;
+
+replace3GP_3GP:
+                /* Update total duration */
+                totalDuration +=
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+
+                /*the cuts are done in the MCS, so we need to replace the beginCutTime
+                and endCutTime to keep the entire video*/
+                xVSS_context->pSettings->pClipList[i]->uiBeginCutTime = 0;
+                xVSS_context->pSettings->pClipList[i]->uiEndCutTime = 0;
+
+                /* Replacing in VSS structure the original 3GP file by the transcoded 3GP file */
+                xVSS_context->pSettings->pClipList[i]->FileType =
+                    M4VIDEOEDITING_kFileType_3GPP;
+
+                if( xVSS_context->pSettings->pClipList[i]->pFile != M4OSA_NULL )
+                {
+                    M4OSA_free(xVSS_context->pSettings->pClipList[i]->pFile);
+                    xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_NULL;
+                }
+
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = pParams->pFileOut;
+
+                if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertToUTF8(xVSS_context,
+                        (M4OSA_Void *)pParams->pFileOut,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertToUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                }
+                else
+                {
+                    length = M4OSA_chrLength(pDecodedPath);
+                }
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                xVSS_context->pSettings->pClipList[i]->pFile = M4OSA_malloc(
+                    (length + 1),
+                    M4VS, (M4OSA_Char *)"xVSS file path of 3gp to 3gp");
+
+                if( xVSS_context->pSettings->pClipList[i]->pFile == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(xVSS_context->pSettings->pClipList[i]->pFile,
+                    pDecodedPath, (length + 1));
+                /*FB: add file path size because of UTF 16 conversion*/
+                xVSS_context->pSettings->pClipList[i]->filePathSize = length+1;
+
+                /* We define master clip as first 3GP input clip */
+                /*if(xVSS_context->pSettings->uiMasterClip == 0 && fileProperties.
+                AudioStreamType != M4VIDEOEDITING_kNoneAudio)
+                {
+                xVSS_context->pSettings->uiMasterClip = i;
+                }*/
+            }
+            else
+            {
+                /* Update total duration */
+                totalDuration +=
+                    xVSS_context->pSettings->pClipList[i]->uiEndCutTime
+                    - xVSS_context->pSettings->pClipList[i]->uiBeginCutTime;
+            }
+            /* We define master clip as first 3GP input clip */
+            if( masterClip == -1
+                && fileProperties.AudioStreamType != M4VIDEOEDITING_kNoneAudio )
+            {
+                masterClip = i;
+                xVSS_context->pSettings->uiMasterClip = i;
+            }
+#if 0 /* Changed to be able to mix with video only files */
+
+            if( xVSS_context->pSettings->uiMasterClip == 0
+                && fileProperties.AudioStreamType != M4VIDEOEDITING_kNoneAudio )
+            {
+                xVSS_context->pSettings->uiMasterClip = i;
+            }
+
+#endif
+
+        }
+        /**************************
+        Other input file type case
+        ***************************/
+        else
+        {
+            M4OSA_TRACE1_0("Bad file type as input clip");
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_PARAMETER;
+        }
+    }
+
+    /*********************************************************
+    * Parse all effects to make some adjustment for framing, *
+    * text and to transform relative time into absolute time *
+    **********************************************************/
+    for ( j = 0; j < xVSS_context->pSettings->nbEffects; j++ )
+    {
+        /* Copy effect to "local" structure */
+        M4OSA_memcpy((M4OSA_MemAddr8) &(xVSS_context->pSettings->Effects[j]),
+            (M4OSA_MemAddr8) &(pSettings->Effects[j]),
+            sizeof(M4VSS3GPP_EffectSettings));
+
+        /* Prevent from bad initializing of effect percentage time */
+        if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent > 100
+            || xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent > 100 )
+        {
+            /* These percentage time have probably not been initialized */
+            /* Let's not use them by setting them to 0 */
+            xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent = 0;
+            xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent = 0;
+        }
+
+        /* If we have percentage information let's use it... Otherwise, we use absolute time. */
+        if( xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent != 0 )
+        {
+            xVSS_context->pSettings->
+                Effects[j].uiStartTime = (M4OSA_UInt32)(totalDuration
+                * xVSS_context->pSettings->Effects[j].xVSS.uiStartPercent
+                / 100);
+            /* The percentage of effect duration is based on the duration of the clip -
+            start time */
+            xVSS_context->pSettings->
+                Effects[j].uiDuration = (M4OSA_UInt32)(totalDuration
+                * xVSS_context->pSettings->Effects[j].xVSS.uiDurationPercent
+                / 100);
+        }
+
+        /* If there is a framing effect, we need to allocate framing effect structure */
+        if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Framing )
+        {
+#ifdef DECODE_GIF_ON_SAVING
+
+            M4xVSS_FramingContext *framingCtx;
+            /*UTF conversion support*/
+            M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+#else
+
+            M4xVSS_FramingStruct *framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+            M4OSA_Char *pExt2 = M4OSA_NULL;
+            M4VIFI_ImagePlane *pPlane =
+                xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+            M4OSA_Int32 result1, result2;
+
+            /* Copy framing file path */
+            if( pSettings->Effects[j].xVSS.pFramingFilePath != M4OSA_NULL )
+            {
+                xVSS_context->pSettings->
+                    Effects[j].xVSS.pFramingFilePath = M4OSA_malloc(
+                    M4OSA_chrLength(pSettings->Effects[j].xVSS.pFramingFilePath)
+                    + 1, M4VS, (M4OSA_Char *)"Local Framing file path");
+
+                if( xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath
+                    == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->
+                    Effects[j].xVSS.pFramingFilePath,
+                    (M4OSA_MemAddr8)pSettings->
+                    Effects[j].xVSS.pFramingFilePath, M4OSA_chrLength(
+                    pSettings->Effects[j].xVSS.pFramingFilePath) + 1);
+
+                pExt2 =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+            }
+
+#ifdef DECODE_GIF_ON_SAVING
+
+            framingCtx = (M4xVSS_FramingContext
+                *)M4OSA_malloc(sizeof(M4xVSS_FramingContext),
+                M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+            if( framingCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            framingCtx->aFramingCtx = M4OSA_NULL;
+            framingCtx->aFramingCtx_last = M4OSA_NULL;
+            framingCtx->pSPSContext = M4OSA_NULL;
+            framingCtx->outputVideoSize =
+                xVSS_context->pSettings->xVSS.outputVideoSize;
+            framingCtx->topleft_x =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+            framingCtx->topleft_y =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+            framingCtx->bEffectResize =
+                xVSS_context->pSettings->Effects[j].xVSS.bResize;
+            framingCtx->pEffectFilePath =
+                xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+            framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
+            framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
+            framingCtx->effectDuration =
+                xVSS_context->pSettings->Effects[j].uiDuration;
+            framingCtx->b_IsFileGif = M4OSA_FALSE;
+            framingCtx->alphaBlendingStruct = M4OSA_NULL;
+            framingCtx->b_animated = M4OSA_FALSE;
+
+            /* Output ratio for the effect is stored in uiFiftiesOutFrameRate parameters of the
+            extended xVSS effects structure */
+            if( xVSS_context->pSettings->Effects[j].xVSS.uiFiftiesOutFrameRate
+                != 0 )
+            {
+                framingCtx->frameDurationRatio =
+                    (M4OSA_Float)(( xVSS_context->pSettings->
+                    Effects[j].xVSS.uiFiftiesOutFrameRate) / 1000.0);
+            }
+            else
+            {
+                framingCtx->frameDurationRatio = 1.0;
+            }
+
+            /*Alpha blending*/
+            /*Check if the alpha blending parameters are corrects*/
+            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingEnd < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingStart < 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
+            {
+                pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
+            }
+
+            if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
+                || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime > 0 )
+            {
+                /*Allocate the alpha blending structure*/
+                framingCtx->alphaBlendingStruct =
+                    (M4xVSS_internalEffectsAlphaBlending *)M4OSA_malloc(
+                    sizeof(M4xVSS_internalEffectsAlphaBlending),
+                    M4VS, (M4OSA_Char *)"alpha blending structure");
+
+                if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_ALLOC;
+                }
+                /*Fill the alpha blending structure*/
+                framingCtx->alphaBlendingStruct->m_fadeInTime =
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
+                framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
+                framingCtx->alphaBlendingStruct->m_end =
+                    pSettings->Effects[j].xVSS.uialphaBlendingEnd;
+                framingCtx->alphaBlendingStruct->m_middle =
+                    pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
+                framingCtx->alphaBlendingStruct->m_start =
+                    pSettings->Effects[j].xVSS.uialphaBlendingStart;
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+                    + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                        > 100 )
+                {
+                    framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                        100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
+                }
+            }
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath =
+                xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                    (M4OSA_Void *)xVSS_context->pSettings->
+                    Effects[j].xVSS.pFramingFilePath,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+                pDecodedPath =
+                    xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /**
+            * End of the UTF conversion, use the converted file path*/
+            framingCtx->pEffectFilePath = M4OSA_malloc(length + 1, M4VS,
+                (M4OSA_Char *)"Local Framing file path");
+
+            if( framingCtx->pEffectFilePath == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy((M4OSA_MemAddr8)framingCtx->pEffectFilePath,
+                (M4OSA_MemAddr8)pDecodedPath, length + 1);
+
+            /* Save framing structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                framingCtx;
+
+#else
+
+            framingCtx = (M4xVSS_FramingStruct
+                *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+            if( framingCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_ALLOC;
+            }
+
+            framingCtx->topleft_x =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+            framingCtx->topleft_y =
+                xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+            /* BugFix 1.2.0: Leak when decoding error */
+            framingCtx->FramingRgb = M4OSA_NULL;
+            framingCtx->FramingYuv = M4OSA_NULL;
+            framingCtx->pNext = framingCtx;
+            /* Save framing structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+            if( pExt2 != M4OSA_NULL )
+            {
+                /* Decode the image associated to the effect, and fill framing structure */
+                pExt2 += (M4OSA_chrLength(pExt2) - 4);
+
+                M4OSA_chrCompare(pExt2,(M4OSA_Char *)".rgb", &result1);
+                M4OSA_chrCompare(pExt2,(M4OSA_Char *)".RGB", &result2);
+
+                if( 0 == result1 || 0 == result2 )
+                {
+#ifdef DECODE_GIF_ON_SAVING
+
+                    framingCtx->aFramingCtx =
+                        (M4xVSS_FramingStruct
+                        *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                        M4VS,
+                        (M4OSA_Char
+                        *)
+                        "M4xVSS_internalDecodeGIF: Context of the framing effect");
+
+                    if( framingCtx->aFramingCtx == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        /* TODO: Translate error code of SPS to an xVSS error code */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return M4ERR_ALLOC;
+                    }
+                    framingCtx->aFramingCtx->pCurrent =
+                        M4OSA_NULL; /* Only used by the first element of the chain */
+                    framingCtx->aFramingCtx->previousClipTime = -1;
+                    framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                    framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                    framingCtx->aFramingCtx->topleft_x =
+                        xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                    framingCtx->aFramingCtx->topleft_y =
+                        xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+                    /*To support ARGB8888 : get the width and height */
+
+                    framingCtx->aFramingCtx->width =
+                        xVSS_context->pSettings->Effects[j].xVSS.width;
+                    framingCtx->aFramingCtx->height =
+                        xVSS_context->pSettings->Effects[j].xVSS.height;
+                    M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->width);
+                    M4OSA_TRACE1_1("FRAMMING BEFORE M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->height);
+
+#endif
+
+                    err = M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(
+                        xVSS_context,
+                        &(xVSS_context->pSettings->Effects[j]),
+                        framingCtx->aFramingCtx,xVSS_context->pSettings->xVSS.outputVideoSize);
+                    M4OSA_TRACE1_1("FRAMMING AFTER M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->width);
+                    M4OSA_TRACE1_1("FRAMMING AFTER M4xVSS_SendCommand  %d",
+                        framingCtx->aFramingCtx->height);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalDecodePNG returned 0x%x",
+                            err);
+                        /* TODO: Translate error code of SPS to an xVSS error code */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                }
+                else
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SendCommand: Not supported still picture format 0x%x",
+                        err);
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_PARAMETER;
+                }
+            }
+            else if( pPlane != M4OSA_NULL )
+            {
+#ifdef DECODE_GIF_ON_SAVING
+
+                framingCtx->aFramingCtx = (M4xVSS_FramingStruct
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                    M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+                if( framingCtx->aFramingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                framingCtx->aFramingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->aFramingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+                /* BugFix 1.2.0: Leak when decoding error */
+                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->duration = 0;
+                framingCtx->aFramingCtx->previousClipTime = -1;
+                framingCtx->aFramingCtx->FramingRgb =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+                /* Force input RGB buffer to even size to avoid errors in YUV conversion */
+                framingCtx->aFramingCtx->FramingRgb->u_width =
+                    framingCtx->aFramingCtx->FramingRgb->u_width & ~1;
+                framingCtx->aFramingCtx->FramingRgb->u_height =
+                    framingCtx->aFramingCtx->FramingRgb->u_height & ~1;
+                /* Input RGB plane is provided, let's convert it to YUV420, and update framing
+                structure  */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
+
+#else
+
+                framingCtx->FramingRgb =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingBuffer;
+                /* Force input RGB buffer to even size to avoid errors in YUV conversion */
+                framingCtx->FramingRgb.u_width =
+                    framingCtx->FramingRgb.u_width & ~1;
+                framingCtx->FramingRgb.u_height =
+                    framingCtx->FramingRgb.u_height & ~1;
+                /* Input RGB plane is provided, let's convert it to YUV420, and update framing
+                 structure  */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
+
+#endif
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+                        err);
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return err;
+                }
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: No input image/plane provided for framing effect.");
+                /*FB: to avoid leaks when there is an error in the send command*/
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                /**/
+                return M4ERR_PARAMETER;
+            }
+        }
+        /* CR: Add text handling with external text interface */
+        /* If effect type is text, we call external text function to get RGB 565 buffer */
+        if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Text )
+        {
+            /* Call the font engine function pointer to get RGB565 buffer */
+            /* We transform text effect into framing effect from buffer */
+            if( xVSS_context->pSettings->xVSS.pTextRenderingFct != M4OSA_NULL )
+            {
+                /*FB: add UTF convertion for text buffer*/
+                M4OSA_Void *pDecodedPath = M4OSA_NULL;
+#ifdef DECODE_GIF_ON_SAVING
+
+                M4xVSS_FramingContext *framingCtx;
+
+#else
+
+                M4xVSS_FramingStruct *framingCtx;
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+#ifdef DECODE_GIF_ON_SAVING
+
+                framingCtx = (M4xVSS_FramingContext
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingContext),
+                    M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+                if( framingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+                framingCtx->aFramingCtx = M4OSA_NULL;
+                framingCtx->aFramingCtx_last = M4OSA_NULL;
+                framingCtx->pSPSContext = M4OSA_NULL;
+                framingCtx->outputVideoSize =
+                    xVSS_context->pSettings->xVSS.outputVideoSize;
+                framingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+                framingCtx->bEffectResize =
+                    xVSS_context->pSettings->Effects[j].xVSS.bResize;
+                framingCtx->pEffectFilePath =
+                    xVSS_context->pSettings->Effects[j].xVSS.pFramingFilePath;
+                framingCtx->pFileReadPtr = xVSS_context->pFileReadPtr;
+                framingCtx->pFileWritePtr = xVSS_context->pFileWritePtr;
+                framingCtx->effectDuration =
+                    xVSS_context->pSettings->Effects[j].uiDuration;
+                framingCtx->b_IsFileGif = M4OSA_FALSE;
+                framingCtx->b_animated = M4OSA_FALSE;
+                framingCtx->alphaBlendingStruct = M4OSA_NULL;
+
+                /* Save framing structure associated with corresponding effect */
+                xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                    framingCtx;
+
+                framingCtx->aFramingCtx = (M4xVSS_FramingStruct
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                    M4VS, (M4OSA_Char *)"Context of the framing effect");
+
+                if( framingCtx->aFramingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                framingCtx->aFramingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->aFramingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+
+                /* BugFix 1.2.0: Leak when decoding error */
+                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                framingCtx->aFramingCtx->pNext = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->pCurrent = framingCtx->aFramingCtx;
+                framingCtx->aFramingCtx->duration = 0;
+                framingCtx->aFramingCtx->previousClipTime = -1;
+
+                /*Alpha blending*/
+                /*Check if the alpha blending parameters are corrects*/
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+                > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime = 0;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime = 0;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingEnd < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingEnd > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingEnd = 100;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingMiddle < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingMiddle > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingMiddle = 100;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingStart < 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingStart > 100 )
+                {
+                    pSettings->Effects[j].xVSS.uialphaBlendingStart = 100;
+                }
+
+                if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime > 0
+                    || pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                    > 0 )
+                {
+                    /*Allocate the alpha blending structure*/
+                    framingCtx->alphaBlendingStruct =
+                        (M4xVSS_internalEffectsAlphaBlending *)M4OSA_malloc(
+                        sizeof(M4xVSS_internalEffectsAlphaBlending),
+                        M4VS, (M4OSA_Char *)"alpha blending structure");
+
+                    if( framingCtx->alphaBlendingStruct == M4OSA_NULL )
+                    {
+                        M4OSA_TRACE1_0(
+                            "Allocation error in M4xVSS_SendCommand");
+                        M4xVSS_freeCommand(xVSS_context);
+                        return M4ERR_ALLOC;
+                    }
+                    /*Fill the alpha blending structure*/
+                    framingCtx->alphaBlendingStruct->m_fadeInTime =
+                        pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime;
+                    framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                        pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime;
+                    framingCtx->alphaBlendingStruct->m_end =
+                        pSettings->Effects[j].xVSS.uialphaBlendingEnd;
+                    framingCtx->alphaBlendingStruct->m_middle =
+                        pSettings->Effects[j].xVSS.uialphaBlendingMiddle;
+                    framingCtx->alphaBlendingStruct->m_start =
+                        pSettings->Effects[j].xVSS.uialphaBlendingStart;
+
+                    if( pSettings->Effects[j].xVSS.uialphaBlendingFadeInTime
+                        + pSettings->Effects[j].xVSS.uialphaBlendingFadeOutTime
+                            > 100 )
+                    {
+                        framingCtx->alphaBlendingStruct->m_fadeOutTime =
+                            100 - framingCtx->alphaBlendingStruct->m_fadeInTime;
+                    }
+                }
+#else
+
+                framingCtx = (M4xVSS_FramingStruct
+                    *)M4OSA_malloc(sizeof(M4xVSS_FramingStruct),
+                    M4VS, (M4OSA_Char
+                    *)"Context of the framing effect (for text)");
+
+                if( framingCtx == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                framingCtx->topleft_x =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_x;
+                framingCtx->topleft_y =
+                    xVSS_context->pSettings->Effects[j].xVSS.topleft_y;
+                framingCtx->FramingRgb = M4OSA_NULL;
+
+                /* BugFix 1.2.0: Leak when decoding error */
+                framingCtx->FramingYuv = M4OSA_NULL;
+                framingCtx->pNext = framingCtx;
+
+#endif
+                /* Save framing structure associated with corresponding effect */
+
+                xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                    framingCtx;
+
+                /* FB: changes for Video Artist: memcopy pTextBuffer so that it can be changed
+                after a complete analysis*/
+                if( pSettings->Effects[j].xVSS.pTextBuffer == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("M4xVSS_SendCommand: pTextBuffer is null");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /*Convert text buffer into customer format before being used*/
+                /**
+                * UTF conversion: convert into the customer format, before being used*/
+                pDecodedPath = pSettings->Effects[j].xVSS.pTextBuffer;
+                xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
+                    pSettings->Effects[j].xVSS.textBufferSize;
+
+                if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                    != M4OSA_NULL && xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer
+                    != M4OSA_NULL )
+                {
+                    err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                        (M4OSA_Void *)pSettings->
+                        Effects[j].xVSS.pTextBuffer,
+                        (M4OSA_Void *)xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer,
+                        &length);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                            err);
+                        /* Free Send command */
+                        M4xVSS_freeCommand(xVSS_context);
+                        return err;
+                    }
+                    pDecodedPath = xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer;
+                    xVSS_context->pSettings->Effects[j].xVSS.textBufferSize =
+                        length;
+                }
+                /**
+                * End of the UTF conversion, use the converted file path*/
+
+                xVSS_context->pSettings->
+                    Effects[j].xVSS.pTextBuffer = M4OSA_malloc(
+                    xVSS_context->pSettings->Effects[j].xVSS.textBufferSize + 1,
+                    M4VS, (M4OSA_Char *)"Local text buffer effect");
+
+                //xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer =
+                // M4OSA_malloc(M4OSA_chrLength(pSettings->Effects[j].xVSS.pTextBuffer)+1,
+                // M4VS, "Local text buffer effect");
+                if( xVSS_context->pSettings->Effects[j].xVSS.pTextBuffer
+                    == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                if( pSettings->Effects[j].xVSS.pTextBuffer != M4OSA_NULL )
+                {
+                    //M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->Effects[j]
+                    //.xVSS.pTextBuffer, (M4OSA_MemAddr8)pSettings->Effects[j].xVSS.pTextBuffer,
+                    // M4OSA_chrLength(pSettings->Effects[j].xVSS.pTextBuffer)+1);
+                    M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->
+                        Effects[j].xVSS.pTextBuffer,
+                        (M4OSA_MemAddr8)pDecodedPath, xVSS_context->pSettings->
+                        Effects[j].xVSS.textBufferSize + 1);
+                }
+
+                /*Allocate the text RGB buffer*/
+                framingCtx->aFramingCtx->FramingRgb =
+                    (M4VIFI_ImagePlane *)M4OSA_malloc(sizeof(M4VIFI_ImagePlane),
+                    M4VS,
+                    (M4OSA_Char *)"RGB structure for the text effect");
+
+                if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+                if( xVSS_context->pSettings->Effects[j].xVSS.uiTextBufferWidth
+                    == 0 || xVSS_context->pSettings->
+                    Effects[j].xVSS.uiTextBufferHeight == 0 )
+                {
+                    M4OSA_TRACE1_0(
+                        "M4xVSS_SendCommand: text plane width and height are not defined");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_PARAMETER;
+                }
+                /* Allocate input RGB text buffer and force it to even size to avoid errors in
+                 YUV conversion */
+                framingCtx->aFramingCtx->FramingRgb->u_width =
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.uiTextBufferWidth & ~1;
+                framingCtx->aFramingCtx->FramingRgb->u_height =
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.uiTextBufferHeight & ~1;
+                framingCtx->aFramingCtx->FramingRgb->u_stride =
+                    2 * framingCtx->aFramingCtx->FramingRgb->u_width;
+                framingCtx->aFramingCtx->FramingRgb->u_topleft = 0;
+                framingCtx->aFramingCtx->FramingRgb->pac_data =
+                    (M4VIFI_UInt8 *)M4OSA_malloc(
+                    framingCtx->aFramingCtx->FramingRgb->u_height
+                    * framingCtx->aFramingCtx->FramingRgb->u_stride,
+                    M4VS, (M4OSA_Char *)"Text RGB plane->pac_data");
+
+                if( framingCtx->aFramingCtx->FramingRgb->pac_data
+                    == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                    /*FB: to avoid leaks when there is an error in the send command*/
+                    /* Free Send command */
+                    M4xVSS_freeCommand(xVSS_context);
+                    /**/
+                    return M4ERR_ALLOC;
+                }
+
+#ifdef DECODE_GIF_ON_SAVING
+                /**/
+                /* Call text rendering function */
+
+                err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
+                    xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.pTextBuffer,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.textBufferSize,
+                    &(framingCtx->aFramingCtx->FramingRgb));
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_0("Text rendering external function failed\n");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+                /* Check that RGB buffer is set */
+                if( framingCtx->aFramingCtx->FramingRgb == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0(
+                        "Text rendering function did not set RGB buffer correctly !");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /* Convert RGB plane to YUV420 and update framing structure */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx->aFramingCtx);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+                        err);
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+#else
+                /**/
+                /* Call text rendering function */
+
+                err = xVSS_context->pSettings->xVSS.pTextRenderingFct(
+                    xVSS_context->pSettings->Effects[j].xVSS.pRenderingData,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.pTextBuffer,
+                    xVSS_context->pSettings->
+                    Effects[j].xVSS.textBufferSize,
+                    &(framingCtx->FramingRgb));
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_0("Text rendering external function failed\n");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+                /* Check that RGB buffer is set */
+                if( framingCtx->FramingRgb == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0(
+                        "Text rendering function did not set RGB buffer correctly !");
+                    M4xVSS_freeCommand(xVSS_context);
+                    return M4ERR_PARAMETER;
+                }
+
+                /* Convert RGB plane to YUV420 and update framing structure */
+                err = M4xVSS_internalConvertRGBtoYUV(framingCtx);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_sendCommand: error when converting RGB to YUV: 0w%x",
+                        err);
+                    M4xVSS_freeCommand(xVSS_context);
+                    return err;
+                }
+
+#endif /*DECODE_GIF_ON_SAVING*/
+
+                /* Change internally effect type from "text" to framing */
+
+                xVSS_context->pSettings->Effects[j].VideoEffectType =
+                    M4xVSS_kVideoEffectType_Framing;
+                xVSS_context->pSettings->Effects[j].xVSS.bResize = M4OSA_FALSE;
+            }
+            else
+            {
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: No text rendering function set !!");
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_PARAMETER;
+            }
+        }
+
+        /* Allocate the structure to store the data needed by the Fifties effect */
+        else if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Fifties )
+        {
+            M4xVSS_FiftiesStruct *fiftiesCtx;
+
+            /* Check the expected frame rate for the fifties effect (must be above 0) */
+            if( 0 == xVSS_context->pSettings->
+                Effects[j].xVSS.uiFiftiesOutFrameRate )
+            {
+                M4OSA_TRACE1_0(
+                    "The frame rate for the fifties effect must be greater than 0 !");
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_PARAMETER;
+            }
+
+            fiftiesCtx = (M4xVSS_FiftiesStruct
+                *)M4OSA_malloc(sizeof(M4xVSS_FiftiesStruct),
+                M4VS, (M4OSA_Char *)"Context of the fifties effect");
+
+            if( fiftiesCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_ALLOC;
+            }
+
+            fiftiesCtx->previousClipTime = -1;
+            fiftiesCtx->fiftiesEffectDuration = 1000 / xVSS_context->pSettings->
+                Effects[j].xVSS.uiFiftiesOutFrameRate;
+            fiftiesCtx->shiftRandomValue = 0;
+            fiftiesCtx->stripeRandomValue = 0;
+
+            /* Save the structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                fiftiesCtx;
+        }
+
+        /* Allocate the structure to store the data needed by the Color effect */
+        else if( xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_ColorRGB16
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_BlackAndWhite
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Pink
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Green
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Sepia
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Negative
+            || xVSS_context->pSettings->Effects[j].VideoEffectType
+            == M4xVSS_kVideoEffectType_Gradient )
+        {
+            M4xVSS_ColorStruct *ColorCtx;
+
+            ColorCtx =
+                (M4xVSS_ColorStruct *)M4OSA_malloc(sizeof(M4xVSS_ColorStruct),
+                M4VS, (M4OSA_Char *)"Context of the color effect");
+
+            if( ColorCtx == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return M4ERR_ALLOC;
+            }
+
+            ColorCtx->colorEffectType =
+                xVSS_context->pSettings->Effects[j].VideoEffectType;
+
+            if( xVSS_context->pSettings->Effects[j].VideoEffectType
+                == M4xVSS_kVideoEffectType_ColorRGB16
+                || xVSS_context->pSettings->Effects[j].VideoEffectType
+                == M4xVSS_kVideoEffectType_Gradient )
+            {
+                ColorCtx->rgb16ColorData =
+                    xVSS_context->pSettings->Effects[j].xVSS.uiRgb16InputColor;
+            }
+            else
+            {
+                ColorCtx->rgb16ColorData = 0;
+            }
+
+            /* Save the structure associated with corresponding effect */
+            xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                ColorCtx;
+        }
+    }
+
+    /**********************************
+    Background music registering
+    **********************************/
+    if( pSettings->xVSS.pBGMtrack != M4OSA_NULL && isNewBGM == M4OSA_TRUE )
+    {
+#ifdef PREVIEW_ENABLED
+
+        M4xVSS_MCS_params *pParams;
+        M4OSA_Char *out_pcm;
+        /*UTF conversion support*/
+        M4OSA_Void *pDecodedPath = M4OSA_NULL;
+
+#endif
+
+        /* We save output file pointer, because we will need to use it when saving audio mixed
+         file (last save step) */
+
+        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+        xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
+
+        /* If a previous BGM has already been registered, delete it */
+        /* Here can be implemented test to know if the same BGM is registered */
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL )
+            {
+                M4OSA_free(
+                    (M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack->
+                    pFile);
+                xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+            }
+            M4OSA_free(
+                (M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack);
+            xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+        }
+
+        /* Allocate BGM */
+        xVSS_context->pSettings->xVSS.pBGMtrack =
+            (M4xVSS_BGMSettings *)M4OSA_malloc(sizeof(M4xVSS_BGMSettings), M4VS,
+            (M4OSA_Char *)"xVSS_context->pSettings->xVSS.pBGMtrack");
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+
+        /* Copy input structure to our structure */
+        M4OSA_memcpy((M4OSA_MemAddr8)xVSS_context->pSettings->xVSS.pBGMtrack,
+            (M4OSA_MemAddr8)pSettings->xVSS.pBGMtrack,
+            sizeof(M4xVSS_BGMSettings));
+        /* Allocate file name, and copy file name buffer to our structure */
+        xVSS_context->pSettings->xVSS.pBGMtrack->pFile =
+            M4OSA_malloc((M4OSA_chrLength(pSettings->xVSS.pBGMtrack->pFile)
+            + 1), M4VS, (M4OSA_Char *)"xVSS BGM file path");
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+            pSettings->xVSS.pBGMtrack->pFile,
+            M4OSA_chrLength(pSettings->xVSS.pBGMtrack->pFile) + 1);
+
+#ifdef PREVIEW_ENABLED
+        /* Decode BGM track to pcm output file */
+
+        pParams =
+            (M4xVSS_MCS_params *)M4OSA_malloc(sizeof(M4xVSS_MCS_params), M4VS,
+            (M4OSA_Char *)"Element of MCS Params (for BGM)");
+
+        if( pParams == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0(
+                "M4xVSS_sendCommand: Problem when allocating one element MCS Params");
+            return M4ERR_ALLOC;
+        }
+
+        /* Initialize the pointers in case of problem (PR 2273) */
+        pParams->pFileIn = M4OSA_NULL;
+        pParams->pFileOut = M4OSA_NULL;
+        pParams->pFileTemp = M4OSA_NULL;
+        pParams->pNext = M4OSA_NULL;
+        pParams->BeginCutTime = 0;
+        pParams->EndCutTime = 0;
+
+        if( xVSS_context->pMCSparamsList
+            == M4OSA_NULL ) /* Means it is the first element of the list */
+        {
+            /* Initialize the xVSS context with the first element of the list */
+            xVSS_context->pMCSparamsList = pParams;
+
+#if 0 /* Not necessary, BGM is the last element of transcoding */
+            /* Save this element in case of other file to convert (can't happen, BGM ...) */
+
+            pMCS_last = pParams;
+
+#endif
+
+        }
+        else
+        {
+            M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+            M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+            /* Parse MCS params chained list to find and delete BGM element */
+            while( pParams_temp != M4OSA_NULL )
+            {
+                if( pParams_temp->isBGM == M4OSA_TRUE )
+                {
+                    /* Remove this element */
+                    if( pParams_temp->pFileIn != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileIn);
+                        pParams_temp->pFileIn = M4OSA_NULL;
+                    }
+
+                    if( pParams_temp->pFileOut != M4OSA_NULL )
+                    {
+                        /* Remove PCM temporary file */
+                        M4OSA_fileExtraDelete(pParams_temp->pFileOut);
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileOut);
+                        pParams_temp->pFileOut = M4OSA_NULL;
+                    }
+                    /* Chain previous element with next element = remove BGM chained
+                         list element */
+                    if( pParams_prev != M4OSA_NULL )
+                    {
+                        pParams_prev->pNext = pParams_temp->pNext;
+                    }
+                    /* If current pointer is the first of the chained list and next pointer of
+                    the chained list is NULL */
+                    /* it means that there was only one element in the list */
+                    /* => we put the context variable to NULL to reaffect the first chained list
+                     element */
+                    if( pParams_temp == xVSS_context->pMCSparamsList
+                        && pParams_temp->pNext == M4OSA_NULL )
+                    {
+                        xVSS_context->pMCSparamsList = M4OSA_NULL;
+                    }
+                    /* In that case, BGM pointer is the first one, but there are others elements
+                     after it */
+                    /* So, we need to change first chained list element */
+                    else if( pParams_temp->pNext != M4OSA_NULL
+                        && pParams_prev == M4OSA_NULL )
+                    {
+                        xVSS_context->pMCSparamsList = pParams_temp->pNext;
+                    }
+
+                    if( pParams_temp->pNext != M4OSA_NULL )
+                    {
+                        pParams_prev = pParams_temp->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                        pParams_temp = pParams_prev;
+                    }
+                    else
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                    }
+                }
+                else
+                {
+                    pParams_prev = pParams_temp;
+                    pParams_temp = pParams_temp->pNext;
+                }
+            }
+            /* We need to initialize the last element of the chained list to be able to add new
+             BGM element */
+            pMCS_last = pParams_prev;
+
+            if( xVSS_context->pMCSparamsList == M4OSA_NULL )
+            {
+                /* In that case, it means that there was only one element in the chained list */
+                /* So, we need to save the new params*/
+                xVSS_context->pMCSparamsList = pParams;
+            }
+            else
+            {
+                /* Update next pointer of the previous last element of the chain */
+                pMCS_last->pNext = pParams;
+            }
+
+#if 0 /* Not necessary, BGM is the last element of transcoding */
+            /* Update save of last element of the chain (not necessary, BGM ...) */
+
+            pMCS_last = pParams;
+
+#endif
+
+        }
+
+        /* Fill the last M4xVSS_MCS_params element */
+        pParams->InputFileType =
+            xVSS_context->pSettings->xVSS.pBGMtrack->FileType;
+        pParams->OutputFileType = M4VIDEOEDITING_kFileType_PCM;
+        pParams->OutputVideoFormat = M4VIDEOEDITING_kNoneVideo;
+        pParams->OutputVideoFrameSize = M4VIDEOEDITING_kQCIF;
+        pParams->OutputVideoFrameRate = M4VIDEOEDITING_k15_FPS;
+
+        if( xVSS_context->pSettings->xVSS.outputAudioFormat
+            == M4VIDEOEDITING_kAAC )
+        {
+            pParams->OutputAudioFormat = M4VIDEOEDITING_kAAC;
+            pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
+
+            /*FB: VAL CR P4ME00003076
+            The output audio bitrate in the AAC case is now directly given by the user*/
+            /*Check if the audio bitrate is correctly defined*/
+            /*Mono
+            MCS values for AAC Mono are min: 16kbps and max: 192 kbps*/
+            if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+                >= M4VIDEOEDITING_k16_KBPS
+                && xVSS_context->pSettings->xVSS.outputAudioBitrate
+                <= M4VIDEOEDITING_k192_KBPS
+                && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
+            {
+                pParams->OutputAudioBitrate =
+                    xVSS_context->pSettings->xVSS.outputAudioBitrate;
+            }
+            /*Stereo
+            MCS values for AAC Mono are min: 32kbps and max: 192 kbps*/
+            else if( xVSS_context->pSettings->xVSS.outputAudioBitrate
+                >= M4VIDEOEDITING_k32_KBPS
+                && xVSS_context->pSettings->xVSS.outputAudioBitrate
+                <= M4VIDEOEDITING_k192_KBPS
+                && xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_FALSE )
+            {
+                pParams->OutputAudioBitrate =
+                    xVSS_context->pSettings->xVSS.outputAudioBitrate;
+            }
+            else
+            {
+                pParams->OutputAudioBitrate = M4VIDEOEDITING_k32_KBPS;
+            }
+            pParams->bAudioMono = xVSS_context->pSettings->xVSS.bAudioMono;
+        }
+        else
+        {
+            pParams->OutputAudioFormat = M4VIDEOEDITING_kAMR_NB;
+            pParams->OutputAudioSamplingFrequency = M4VIDEOEDITING_kDefault_ASF;
+            pParams->OutputAudioBitrate = M4VIDEOEDITING_k12_2_KBPS;
+            pParams->bAudioMono = M4OSA_TRUE;
+        }
+        pParams->OutputVideoBitrate = M4VIDEOEDITING_kUndefinedBitrate;
+
+        /* Prepare output filename */
+        /* 21 is the size of "preview_16000_2.pcm" + \0 */
+        out_pcm =
+            (M4OSA_Char *)M4OSA_malloc(M4OSA_chrLength(xVSS_context->pTempPath)
+            + 21, M4VS, (M4OSA_Char *)"Temp char* for pcmPreviewFile");
+
+        if( out_pcm == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+            return M4ERR_ALLOC;
+        }
+
+        /* Copy temporary path to final preview path string */
+        M4OSA_chrNCopy(out_pcm, xVSS_context->pTempPath,
+            M4OSA_chrLength(xVSS_context->pTempPath) + 1);
+
+        /* Depending of the output sample frequency and nb of channels, we construct preview
+        output filename */
+        if( xVSS_context->pSettings->xVSS.outputAudioFormat
+            == M4VIDEOEDITING_kAAC )
+        {
+            /* Construct output temporary PCM filename */
+            if( xVSS_context->pSettings->xVSS.bAudioMono == M4OSA_TRUE )
+            {
+                M4OSA_chrNCat(out_pcm, (M4OSA_Char *)"preview_16000_1.pcm\0",
+                    20);
+            }
+            else
+            {
+                M4OSA_chrNCat(out_pcm, (M4OSA_Char *)"preview_16000_2.pcm\0",
+                    20);
+            }
+        }
+        else if( xVSS_context->pSettings->xVSS.outputAudioFormat
+            == M4VIDEOEDITING_kAMR_NB )
+        {
+            /* Construct output temporary PCM filename */
+            M4OSA_chrNCat(out_pcm, (M4OSA_Char *)"preview_08000_1.pcm\0", 20);
+        }
+        else
+        {
+            if( out_pcm != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)out_pcm);
+                out_pcm = M4OSA_NULL;
+            }
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Bad audio output format \n");
+            return M4ERR_PARAMETER;
+        }
+
+        xVSS_context->pcmPreviewFile = out_pcm;
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+        pDecodedPath = out_pcm;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)out_pcm, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        xVSS_context->pcmPreviewFile =
+            (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+            (M4OSA_Char *)"pcmPreviewFile");
+
+        if( xVSS_context->pcmPreviewFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pcmPreviewFile, pDecodedPath, length + 1);
+
+        /* Free temporary output filename */
+        if( out_pcm != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+        }
+
+        pParams->pFileOut = M4OSA_malloc((length + 1), M4VS,
+            (M4OSA_Char *)"MCS BGM Params: file out");
+
+        if( pParams->pFileOut == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+        pParams->pFileTemp = M4OSA_NULL;
+
+        M4OSA_memcpy(pParams->pFileOut, xVSS_context->pcmPreviewFile,
+            (length + 1)); /* Copy output file path */
+
+#if 0
+
+        xVSS_context->pcmPreviewFile =
+            (M4OSA_Char *)M4OSA_malloc(M4OSA_chrLength(out_pcm) + 1, M4VS,
+            "pcmPreviewFile");
+
+        if( xVSS_context->pcmPreviewFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_Init");
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+            /*FB: to avoid leaks when there is an error in the send command*/
+            /* Free Send command */
+            M4xVSS_freeCommand(xVSS_context);
+            /**/
+            return M4ERR_ALLOC;
+        }
+        M4OSA_chrNCopy(xVSS_context->pcmPreviewFile, out_pcm,
+            M4OSA_chrLength(out_pcm) + 1);
+
+        /* Free temporary output filename */
+        if( out_pcm != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)out_pcm);
+            out_pcm = M4OSA_NULL;
+        }
+
+#endif
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+
+        pDecodedPath = xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)xVSS_context->pSettings->xVSS.pBGMtrack->
+                pFile, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SendCommand: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+                /* Free Send command */
+                M4xVSS_freeCommand(xVSS_context);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        pParams->pFileIn = (M4OSA_Void *)M4OSA_malloc((length + 1), M4VS,
+            (M4OSA_Char *)"MCS BGM Params: file in");
+
+        if( pParams->pFileIn == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SendCommand");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(pParams->pFileIn, pDecodedPath,
+            (length + 1)); /* Copy input file path */
+
+        pParams->isBGM = M4OSA_TRUE;
+        pParams->isCreated = M4OSA_FALSE;
+        xVSS_context->nbStepTotal++;
+        bIsTranscoding = M4OSA_TRUE;
+#endif /* PREVIEW_ENABLED */
+
+    }
+    else if( pSettings->xVSS.pBGMtrack != M4OSA_NULL
+        && isNewBGM == M4OSA_FALSE )
+    {
+#ifdef PREVIEW_ENABLED
+        /* BGM is the same as previously, no need to redecode audio */
+        /* Need to update MCS params chained list, to signal M4xVSS_step function to skip
+        BGM decoding */
+
+        M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+        M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+#endif /* PREVIEW_ENABLED */
+        /* We save output file pointer, because we will need to use it when saving audio
+         mixed file (last save step) */
+
+        xVSS_context->pOutputFile = xVSS_context->pSettings->pOutputFile;
+        xVSS_context->pTemporaryFile = xVSS_context->pSettings->pTemporaryFile;
+
+        /* Re-write BGM settings in case they have changed between two sendCommand */
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddCts =
+            pSettings->xVSS.pBGMtrack->uiAddCts;
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume =
+            pSettings->xVSS.pBGMtrack->uiAddVolume;
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiBeginLoop =
+            pSettings->xVSS.pBGMtrack->uiBeginLoop;
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiEndLoop =
+            pSettings->xVSS.pBGMtrack->uiEndLoop;
+
+#ifdef PREVIEW_ENABLED
+        /* Parse MCS params chained list to find and delete BGM element */
+
+        while( pParams_temp != M4OSA_NULL )
+        {
+            if( pParams_temp->isBGM == M4OSA_TRUE )
+            {
+                pParams_temp->isCreated = M4OSA_TRUE;
+                break;
+            }
+            pParams_prev = pParams_temp;
+            pParams_temp = pParams_temp->pNext;
+        }
+
+#endif /* PREVIEW_ENABLED */
+
+        M4OSA_TRACE2_0("M4xVSS_SendCommand has been recalled, BGM is the same");
+    }
+    else
+    {
+        M4OSA_TRACE1_0("No BGM in this xVSS command");
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+#ifdef PREVIEW_ENABLED
+            /* Need to remove MCS previous params chained list */
+
+            M4xVSS_MCS_params *pParams_temp = xVSS_context->pMCSparamsList;
+            M4xVSS_MCS_params *pParams_prev = M4OSA_NULL;
+
+            /* Parse MCS params chained list to find and delete BGM element */
+            while( pParams_temp != M4OSA_NULL )
+            {
+                if( pParams_temp->isBGM == M4OSA_TRUE )
+                {
+                    /* Remove this element */
+                    if( pParams_temp->pFileIn != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileIn);
+                        pParams_temp->pFileIn = M4OSA_NULL;
+                    }
+
+                    if( pParams_temp->pFileOut != M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp->pFileOut);
+                        pParams_temp->pFileOut = M4OSA_NULL;
+                    }
+                    /* Chain previous element with next element */
+                    if( pParams_prev != M4OSA_NULL )
+                    {
+                        pParams_prev->pNext = pParams_temp->pNext;
+                    }
+                    /* If current pointer is the first of the chained list and next pointer
+                     of the chained list is NULL */
+                    /* it means that there was only one element in the list */
+                    /* => we put the context variable to NULL */
+                    if( pParams_temp == xVSS_context->pMCSparamsList
+                        && pParams_temp->pNext == M4OSA_NULL )
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        xVSS_context->pMCSparamsList = M4OSA_NULL;
+                    }
+                    /* In that case, BGM pointer is the first one, but there are others
+                     elements after it */
+                    /* So, we need to change first chained list element */
+                    else if( pParams_temp->pNext != M4OSA_NULL )
+                    {
+                        xVSS_context->pMCSparamsList = pParams_temp->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                    }
+                    /* In all other cases, nothing else to do except freeing the chained
+                    list element */
+                    else
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pParams_temp);
+                        pParams_temp = M4OSA_NULL;
+                    }
+                    break;
+                }
+                pParams_prev = pParams_temp;
+                pParams_temp = pParams_temp->pNext;
+            }
+
+#endif /* PREVIEW_ENABLED */
+            /* Here, we unallocate all BGM components and put xVSS_context->pSettings->
+            xVSS.pBGMtrack to NULL */
+
+            if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+            {
+                if( xVSS_context->pSettings->xVSS.pBGMtrack->pFile
+                    != M4OSA_NULL )
+                {
+                    M4OSA_free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
+                    xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+                }
+                M4OSA_free(
+                    (M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack);
+                xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+            }
+        }
+    }
+
+    /* Default behaviour, if no audio/video output format is set, we put H263/AMR by default */
+#if 0
+
+    if( xVSS_context->pSettings->xVSS.outputVideoFormat
+        == M4VIDEOEDITING_kNoneVideo )
+    {
+        xVSS_context->pSettings->xVSS.outputVideoFormat = M4VIDEOEDITING_kH263;
+    }
+
+    if( xVSS_context->pSettings->xVSS.outputAudioFormat
+        == M4VIDEOEDITING_kNoneAudio )
+    {
+        xVSS_context->pSettings->xVSS.outputAudioFormat =
+            M4VIDEOEDITING_kAMR_NB;
+    }
+
+#endif
+    /* Changed to be able to mix with video only files -> in case no master clip is found
+    (i.e only JPG input or video only input) */
+    /* and if there is a BGM, we force the added volume to 100 (i.e replace audio) */
+
+    if( masterClip == -1
+        && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+    {
+        /* In that case, it means that no input 3GP file has a video track.
+        Therefore, if a mixing is asked, it will fail. Thus, we force replace audio. */
+        xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume = 100;
+    }
+
+    /* Save clip number to know if a M4xVSS_sendCommand has already been called */
+    xVSS_context->previousClipNumber = xVSS_context->pSettings->uiClipNumber;
+
+    /* Change state */
+    xVSS_context->m_state = M4xVSS_kStateAnalyzing;
+
+    /* In case of MMS use case, we compute here the max video bitrate */
+    /* In case of too low bitrate, a specific warning is returned */
+    if( xVSS_context->pSettings->xVSS.outputFileSize != 0 && totalDuration > 0 )
+    {
+        M4OSA_UInt32 targetedBitrate = 0;
+        M4VIDEOEDITING_ClipProperties fileProperties;
+        M4OSA_Double ratio;
+
+        if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            if( xVSS_context->pSettings->xVSS.pBGMtrack->uiAddVolume
+                == 100 ) /* We are in "replace audio mode, need to check the filetype */
+            {
+                if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+                    == M4VIDEOEDITING_kFileType_3GPP )
+                {
+                    M4OSA_Void *pDecodedPath;
+                    /**
+                    * UTF conversion: convert into the customer format, before being used*/
+                    pDecodedPath =
+                        xVSS_context->pSettings->xVSS.pBGMtrack->pFile;
+                    length = M4OSA_chrLength(pDecodedPath);
+
+                    if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                        != M4OSA_NULL && xVSS_context->
+                        UTFConversionContext.pTempOutConversionBuffer
+                        != M4OSA_NULL )
+                    {
+                        err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                            (M4OSA_Void *)xVSS_context->pSettings->
+                            xVSS.pBGMtrack->pFile,
+                            (M4OSA_Void *)xVSS_context->
+                            UTFConversionContext.
+                            pTempOutConversionBuffer, &length);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_SendCommand: \
+                                M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                                err);
+                            /* Free Send command */
+                            M4xVSS_freeCommand(xVSS_context);
+                            return err;
+                        }
+                        pDecodedPath = xVSS_context->
+                            UTFConversionContext.pTempOutConversionBuffer;
+                    }
+
+                    /**
+                    * End of the UTF conversion, use the converted file path*/
+                    err =
+                        M4xVSS_internalGetProperties(xVSS_context, pDecodedPath,
+                        &fileProperties);
+
+                    /* Get the properties of the BGM track */
+                    /*err = M4xVSS_internalGetProperties(xVSS_context, xVSS_context->pSettings->
+                    xVSS.pBGMtrack->pFile, &fileProperties);*/
+                    if( err != M4NO_ERROR )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_sendCommand: M4xVSS_internalGetProperties returned an error:\
+                             0x%x", err);
+                        return err;
+                    }
+
+                    if( fileProperties.AudioStreamType
+                        != M4VIDEOEDITING_kAMR_NB )
+                    {
+                        M4OSA_TRACE1_0(
+                            "M4xVSS_sendCommand: Impossible to use MMS mode with BGM != AMR-NB");
+                        return M4ERR_PARAMETER;
+                    }
+                }
+                else if( xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+                    != M4VIDEOEDITING_kFileType_AMR
+                    && xVSS_context->pSettings->xVSS.pBGMtrack->FileType
+                    != M4VIDEOEDITING_kFileType_MP3 )
+                {
+                    M4OSA_TRACE1_0("M4xVSS_sendCommand: Bad input BGM file");
+                    return M4ERR_PARAMETER;
+                }
+            }
+        }
+
+        /* Compute targeted bitrate, with 8% margin (moov) */
+        if( totalDuration > 1000 )
+        {
+            targetedBitrate =
+                (M4OSA_UInt32)(( xVSS_context->pSettings->xVSS.outputFileSize
+                * 8 * 0.84) / (totalDuration / 1000));
+        }
+        else
+        {
+            targetedBitrate = 0;
+        }
+
+        /* Remove audio bitrate */
+        if( targetedBitrate >= 12200 )
+        {
+            targetedBitrate -= 12200; /* Only AMR is supported in MMS case */
+        }
+        else
+        {
+            targetedBitrate = 0;
+        }
+
+        /* Compute an indicator of "complexity" depending on nb of sequences and total duration */
+        /* The highest is the number of sequences, the more there are some I frames */
+        /* In that case, it is necessary to reduce the target bitrate */
+        ratio =
+            (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
+            * 100000) / (M4OSA_Double)(totalDuration));
+        M4OSA_TRACE2_3(
+            "Ratio clip_nb/duration = %f\nTargeted bitrate = %d\nTotal duration: %d",
+            (M4OSA_Double)((M4OSA_Double)(xVSS_context->pSettings->uiClipNumber
+            * 100000) / (M4OSA_Double)(totalDuration)),
+            targetedBitrate, totalDuration);
+
+        if( ratio > 50 && ratio <= 75 )
+        {
+            /* It means that there is a potential risk of having a higher file size
+            than specified */
+            targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.1);
+            M4OSA_TRACE2_2(
+                "New bitrate1 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
+                ratio, targetedBitrate);
+        }
+        else if( ratio > 75 )
+        {
+            targetedBitrate -= (M4OSA_UInt32)(targetedBitrate * 0.15);
+            M4OSA_TRACE2_2(
+                "New bitrate2 !!\nRatio clip_nb/duration = %f\nTargeted bitrate = %d",
+                ratio, targetedBitrate);
+        }
+
+        /*CR 3283 MMS use case for VAL:
+        Decrease the output file size to keep a margin of 5%
+        The writer will stop when the targeted output file size will be reached*/
+        xVSS_context->pSettings->xVSS.outputFileSize -=
+            (M4OSA_UInt32)(xVSS_context->pSettings->xVSS.outputFileSize * 0.05);
+
+        switch( xVSS_context->pSettings->xVSS.outputVideoSize )
+        {
+            case M4VIDEOEDITING_kSQCIF:
+                if( targetedBitrate < 32000 )
+                {
+                    xVSS_context->targetedBitrate = 32000;
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kQQVGA:
+                if( targetedBitrate < 32000 )              /*48000)*/
+                {
+                    xVSS_context->targetedBitrate = 32000; /*48000;*/
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kQCIF:
+                if( targetedBitrate < 48000 )              /*64000)*/
+                {
+                    xVSS_context->targetedBitrate = 48000; /*64000;*/
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kQVGA:
+                if( targetedBitrate < 64000 )              /*128000)*/
+                {
+                    xVSS_context->targetedBitrate = 64000; /*128000;*/
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kCIF:
+                if( targetedBitrate < 128000 )
+                {
+                    xVSS_context->targetedBitrate = 128000;
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            case M4VIDEOEDITING_kVGA:
+                if( targetedBitrate < 192000 )
+                {
+                    xVSS_context->targetedBitrate = 192000;
+                    return M4VSS3GPP_WAR_OUTPUTFILESIZE_EXCEED;
+                }
+                break;
+
+            default:
+                /* Cannot happen */
+                M4OSA_TRACE1_0(
+                    "M4xVSS_sendCommand: Error in output fileSize !");
+                return M4ERR_PARAMETER;
+                break;
+        }
+        xVSS_context->targetedBitrate = (M4OSA_UInt32)targetedBitrate;
+    }
+
+    if( bIsTranscoding )
+    {
+        return M4VSS3GPP_WAR_TRANSCODING_NECESSARY;
+    }
+    else
+    {
+        return M4NO_ERROR;
+    }
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_SaveStart(M4OSA_Context pContext, M4OSA_Char* pFilePath)
+ * @brief        This function prepare the save
+ * @note        The xVSS create 3GP edited final file
+ *                This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_ANALYZING_DONE
+ *                After this function, the user must call M4xVSS_Step until
+ *                it returns another error than M4NO_ERROR.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    pFilePath            (IN) If the user wants to provide a different
+ *                                output filename, else can be NULL (allocated by the user)
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SaveStart( M4OSA_Context pContext, M4OSA_Void *pFilePath,
+                           M4OSA_UInt32 filePathSize )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err;
+
+    /*Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
+    M4VSS3GPP_EditSettings *pEditSavingSettings = M4OSA_NULL;
+    M4OSA_UInt8 i, j;
+    M4OSA_UInt32 offset = 0;
+    M4OSA_UInt8 nbEffects = 0;
+    /*only for UTF conversion support*/
+    M4OSA_Void *pDecodedPath = M4OSA_NULL;
+    M4OSA_UInt32 length = 0;
+    /**/
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateOpened )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_SaveStart function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* RC: to temporary handle changing of output filepath */
+    /* TO BE CHANGED CLEANLY WITH A MALLOC/MEMCPY !!!! */
+    if( pFilePath != M4OSA_NULL )
+    {
+        if( xVSS_context->pSettings->pOutputFile != M4OSA_NULL )
+        {
+            /*it means that pOutputFile has been allocated in M4xVSS_sendCommand()*/
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+            xVSS_context->pSettings->uiOutputPathSize = 0;
+        }
+
+        pDecodedPath = pFilePath;
+        /*As all inputs of the xVSS are in UTF8, convert the output file path into the customer
+         format*/
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)pFilePath, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            filePathSize = length;
+        }
+
+        xVSS_context->pOutputFile =
+            (M4OSA_Void *)M4OSA_malloc(filePathSize + 1, M4VS,
+            (M4OSA_Char *)"M4xVSS_SaveStart: output file");
+
+        if( xVSS_context->pOutputFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pOutputFile, pDecodedPath, filePathSize + 1);
+        xVSS_context->pOutputFile[filePathSize] = '\0';
+        xVSS_context->pSettings->pOutputFile = xVSS_context->pOutputFile;
+        xVSS_context->pSettings->uiOutputPathSize = filePathSize;
+    }
+
+    /**
+    ***/
+
+    /*FB: Add for UTF conversion: copy the pSettings structure into a new pCurrentEditSettings*/
+    /*It is the same principle as in the PreviewStart()*/
+    pEditSavingSettings =
+        (M4VSS3GPP_EditSettings *)M4OSA_malloc(sizeof(M4VSS3GPP_EditSettings),
+        M4VS, (M4OSA_Char *)"Saving, copy of VSS structure");
+
+    if( pEditSavingSettings == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+        if( xVSS_context->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+        return M4ERR_ALLOC;
+    }
+
+    /* Copy settings from input structure */
+    M4OSA_memcpy((M4OSA_MemAddr8) &(pEditSavingSettings->xVSS),
+        (M4OSA_MemAddr8) &(xVSS_context->pSettings->xVSS),
+        sizeof(M4xVSS_EditSettings));
+
+    /* Initialize pEditSavingSettings structure */
+    pEditSavingSettings->xVSS.pBGMtrack = M4OSA_NULL;
+
+    pEditSavingSettings->videoFrameRate =
+        xVSS_context->pSettings->videoFrameRate;
+    pEditSavingSettings->uiClipNumber = xVSS_context->pSettings->uiClipNumber;
+    pEditSavingSettings->uiMasterClip =
+        xVSS_context->pSettings->uiMasterClip; /* VSS2.0 mandatory parameter */
+
+    /* Allocate savingSettings.pClipList/pTransitions structure */
+    pEditSavingSettings->pClipList = (M4VSS3GPP_ClipSettings *
+        * )M4OSA_malloc(sizeof(M4VSS3GPP_ClipSettings *)
+        *pEditSavingSettings->uiClipNumber,
+        M4VS, (M4OSA_Char *)"xVSS, saving , copy of pClipList");
+
+    if( pEditSavingSettings->pClipList == M4OSA_NULL )
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+        if( xVSS_context->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+        return M4ERR_ALLOC;
+    }
+
+    if( pEditSavingSettings->uiClipNumber > 1 )
+    {
+        pEditSavingSettings->pTransitionList = (M4VSS3GPP_TransitionSettings *
+            * )M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings *)
+            *(pEditSavingSettings->uiClipNumber - 1),
+            M4VS, (M4OSA_Char *)"xVSS, saving, copy of pTransitionList");
+
+        if( pEditSavingSettings->pTransitionList == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+    }
+    else
+    {
+        pEditSavingSettings->pTransitionList = M4OSA_NULL;
+    }
+
+    for ( i = 0; i < pEditSavingSettings->uiClipNumber; i++ )
+    {
+        pEditSavingSettings->pClipList[i] = (M4VSS3GPP_ClipSettings
+            *)M4OSA_malloc(sizeof(M4VSS3GPP_ClipSettings),
+            M4VS, (M4OSA_Char *)"saving clip settings");
+
+        if( pEditSavingSettings->pClipList[i] == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+
+        if( i < pEditSavingSettings->uiClipNumber
+            - 1 ) /* Because there is 1 less transition than clip number */
+        {
+            pEditSavingSettings->pTransitionList[i] =
+                (M4VSS3GPP_TransitionSettings
+                *)M4OSA_malloc(sizeof(M4VSS3GPP_TransitionSettings),
+                M4VS, (M4OSA_Char *)"saving transition settings");
+
+            if( pEditSavingSettings->pTransitionList[i] == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return M4ERR_ALLOC;
+            }
+        }
+    }
+
+    for ( i = 0; i < xVSS_context->pSettings->uiClipNumber; i++ )
+    {
+        // Add MP4 file support
+
+        if( ( xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_3GPP)
+            || (xVSS_context->pSettings->pClipList[i]->FileType
+            == M4VIDEOEDITING_kFileType_MP4) )
+
+        {
+            /* Copy data from given structure to our saving structure */
+            M4xVSS_DuplicateClipSettings(pEditSavingSettings->pClipList[i],
+                xVSS_context->pSettings->pClipList[i],
+                M4OSA_FALSE /* remove effects */);
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = pEditSavingSettings->pClipList[i]->pFile;
+            length = M4OSA_chrLength(pDecodedPath);
+
+            if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct
+                != M4OSA_NULL && xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer
+                != M4OSA_NULL )
+            {
+                err =
+                    M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void
+                    *)pEditSavingSettings->pClipList[i]->pFile,
+                    (M4OSA_Void *)xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer,
+                    &length);
+
+                if( err != M4NO_ERROR )
+                {
+                    M4OSA_TRACE1_1(
+                        "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                        err);
+
+                    if( xVSS_context->pOutputFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(
+                            (M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                        xVSS_context->pOutputFile = M4OSA_NULL;
+                    }
+                    return err;
+                }
+                pDecodedPath = xVSS_context->
+                    UTFConversionContext.pTempOutConversionBuffer;
+
+                /**
+                * End of the UTF conversion, use the converted file path*/
+                M4OSA_free((M4OSA_MemAddr32)
+                    pEditSavingSettings->pClipList[i]->pFile);
+                pEditSavingSettings->pClipList[i]->pFile = (M4OSA_Void
+                    *)M4OSA_malloc((length + 1),
+                    M4VS, (M4OSA_Char *)"saving transition settings");
+
+                if( pEditSavingSettings->pClipList[i]->pFile == M4OSA_NULL )
+                {
+                    M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+                    if( xVSS_context->pOutputFile != M4OSA_NULL )
+                    {
+                        M4OSA_free(
+                            (M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                        xVSS_context->pOutputFile = M4OSA_NULL;
+                    }
+                    return M4ERR_ALLOC;
+                }
+                M4OSA_memcpy(pEditSavingSettings->pClipList[i]->pFile,
+                    pDecodedPath, length + 1);
+            }
+            /*FB: add file path size because of UTF 16 conversion*/
+            pEditSavingSettings->pClipList[i]->filePathSize = length+1;
+
+            if( i
+                < xVSS_context->pSettings->uiClipNumber
+                - 1 ) /* Because there is 1 less transition than clip number */
+            {
+                M4OSA_memcpy(
+                    (M4OSA_MemAddr8)pEditSavingSettings->pTransitionList[i],
+                    (M4OSA_MemAddr8)xVSS_context->pSettings->
+                    pTransitionList[i],
+                    sizeof(M4VSS3GPP_TransitionSettings));
+            }
+        }
+        else
+        {
+            M4OSA_TRACE1_0(
+                "M4xVSS_SaveStart: Error when parsing xVSS_context->pSettings->pClipList[i]:\
+                 Bad file type");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_PARAMETER;
+        }
+    }
+
+    /* Count the number of video effects, used to know how much memory is needed to allocate*/
+    /* FB 2008/10/15: removed : not compatible with M4VSS3GPP_kVideoEffectType_None
+    for(j=0;j<xVSS_context->pSettings->nbEffects;j++)
+    {
+    if(xVSS_context->pSettings->Effects[j].VideoEffectType != M4VSS3GPP_kVideoEffectType_None)
+    {
+    nbEffects++;
+    }
+    }*/
+    nbEffects = xVSS_context->pSettings->nbEffects;
+
+    /* Allocate effects saving structure with correct number of effects */
+    if( nbEffects != 0 )
+    {
+        pEditSavingSettings->Effects =
+            (M4VSS3GPP_EffectSettings *)M4OSA_malloc(nbEffects
+            * sizeof(M4VSS3GPP_EffectSettings), M4VS, (M4OSA_Char
+            *)"Saving settings, effects table of structure settings");
+
+        if( pEditSavingSettings->Effects == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+
+        /* Just copy effect structure to saving structure, as effects time are now */
+        /* relative to output clip time*/
+        M4OSA_memcpy((M4OSA_MemAddr8)pEditSavingSettings->Effects,
+            (M4OSA_MemAddr8)xVSS_context->pSettings->Effects,
+            nbEffects * sizeof(M4VSS3GPP_EffectSettings));
+    }
+    else
+    {
+        pEditSavingSettings->Effects = M4OSA_NULL;
+        pEditSavingSettings->nbEffects = 0;
+    }
+    pEditSavingSettings->nbEffects = nbEffects;
+
+    if( pFilePath != M4OSA_NULL )
+    {
+        pEditSavingSettings->pOutputFile = pFilePath;
+    }
+
+    /* Save pointer of saving video editor to use in step function */
+    xVSS_context->pCurrentEditSettings = pEditSavingSettings;
+
+    /* Change output file name to temporary output file name, because final file will be
+     generated by audio mixer */
+    if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+    {
+
+        M4OSA_Char out_3gp[64];
+        M4OSA_Char out_3gp_tmp[64];
+
+        /**/
+        pEditSavingSettings->xVSS.pBGMtrack =
+            (M4xVSS_BGMSettings *)M4OSA_malloc(sizeof(M4xVSS_BGMSettings), M4VS,
+            (M4OSA_Char
+            *)"Saving settings, effects table of structure settings");
+
+        if( pEditSavingSettings->xVSS.pBGMtrack == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+
+        /* Just copy effect structure to saving structure, as effects time are now */
+        /* relative to output clip time*/
+        M4OSA_memcpy((M4OSA_MemAddr8)pEditSavingSettings->xVSS.pBGMtrack,
+            (M4OSA_MemAddr8)xVSS_context->pSettings->xVSS.pBGMtrack,
+            sizeof(M4xVSS_BGMSettings));
+
+        /* Allocate file name, and copy file name buffer to our structure */
+        pEditSavingSettings->xVSS.pBGMtrack->pFile = M4OSA_malloc(
+            (M4OSA_chrLength(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
+            + 1),
+            M4VS, (M4OSA_Char *)"Saving struct xVSS BGM file path");
+
+        if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+        {
+            M4xVSS_freeCommand(xVSS_context);
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(pEditSavingSettings->xVSS.pBGMtrack->pFile,
+            xVSS_context->pSettings->xVSS.pBGMtrack->pFile,
+            M4OSA_chrLength(xVSS_context->pSettings->xVSS.pBGMtrack->pFile)
+            + 1);
+
+        /*Copy BGM track file path*/
+
+        /**
+        * UTF conversion*/
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)pEditSavingSettings->xVSS.pBGMtrack->pFile,
+                (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+
+            M4OSA_free(
+                (M4OSA_MemAddr32)pEditSavingSettings->xVSS.pBGMtrack->pFile);
+            pEditSavingSettings->xVSS.pBGMtrack->pFile =
+                (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS, (M4OSA_Char
+                *)"M4xVSS_SaveStart: Temp filename in case of BGM");
+
+            if( pEditSavingSettings->xVSS.pBGMtrack->pFile == M4OSA_NULL )
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return M4ERR_ALLOC;
+            }
+            M4OSA_memcpy(pEditSavingSettings->xVSS.pBGMtrack->pFile,
+                pDecodedPath, length + 1);
+        }
+
+        /**/
+
+        M4OSA_chrNCopy(out_3gp, xVSS_context->pTempPath, 64);
+        M4OSA_chrNCopy(out_3gp_tmp, xVSS_context->pTempPath, 64);
+
+        /* Construct output temporary 3GP filename */
+        M4OSA_chrNCat(out_3gp, (M4OSA_Char *)"savetemp.3gp\0", 13);
+        M4OSA_chrNCat(out_3gp_tmp, (M4OSA_Char *)"savetemp.tmp\0", 13);
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+        pDecodedPath = out_3gp;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)out_3gp, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        xVSS_context->pCurrentEditSettings->pOutputFile =
+            (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+            (M4OSA_Char *)"M4xVSS_SaveStart: Temp filename in case of BGM");
+
+        if( xVSS_context->pCurrentEditSettings->pOutputFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pCurrentEditSettings->pOutputFile,
+            pDecodedPath, length + 1);
+        xVSS_context->pCurrentEditSettings->uiOutputPathSize = length + 1;
+
+        /**
+        * UTF conversion: convert into the customer format, before being used*/
+        pDecodedPath = out_3gp_tmp;
+        length = M4OSA_chrLength(pDecodedPath);
+
+        if( xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            != M4OSA_NULL )
+        {
+            err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                (M4OSA_Void *)out_3gp_tmp, (M4OSA_Void *)xVSS_context->
+                UTFConversionContext.pTempOutConversionBuffer, &length);
+
+            if( err != M4NO_ERROR )
+            {
+                M4OSA_TRACE1_1(
+                    "M4xVSS_SaveStart: M4xVSS_internalConvertFromUTF8 returns err: 0x%x",
+                    err);
+
+                if( xVSS_context->pOutputFile != M4OSA_NULL )
+                {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                    xVSS_context->pOutputFile = M4OSA_NULL;
+                }
+                return err;
+            }
+            pDecodedPath =
+                xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        }
+
+        /**
+        * End of the UTF conversion, use the converted file path*/
+        xVSS_context->pCurrentEditSettings->pTemporaryFile =
+            (M4OSA_Void *)M4OSA_malloc(length + 1, M4VS,
+            (M4OSA_Char *)"M4xVSS_SaveStart: Temporary file");
+
+        if( xVSS_context->pCurrentEditSettings->pTemporaryFile == M4OSA_NULL )
+        {
+            M4OSA_TRACE1_0("Allocation error in M4xVSS_SaveStart");
+
+            if( xVSS_context->pOutputFile != M4OSA_NULL )
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            return M4ERR_ALLOC;
+        }
+        M4OSA_memcpy(xVSS_context->pCurrentEditSettings->pTemporaryFile,
+            pDecodedPath, length + 1);
+
+        /* Put nb of step for progression monitoring to 2, because audio mixing is needed */
+        xVSS_context->nbStepTotal = 2;
+    }
+    else
+    {
+        xVSS_context->pCurrentEditSettings->pOutputFile =
+            xVSS_context->pOutputFile;
+        xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+
+        /* Put nb of step for progression monitoring to 1, because no audio mixing is needed */
+        xVSS_context->nbStepTotal = 1;
+    }
+
+    /**
+    ***/
+
+    err = M4xVSS_internalGenerateEditedFile(xVSS_context);
+
+    if( err != M4NO_ERROR )
+    {
+        M4OSA_TRACE1_1(
+            "M4xVSS_SaveStart: M4xVSS_internalGenerateEditedFile returned an error: 0x%x",
+            err);
+
+        /**/
+        if( xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL
+            && xVSS_context->pSettings->xVSS.pBGMtrack == M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->
+                pOutputFile);
+            xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+
+        if( xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL
+            && xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->
+                pTemporaryFile);
+            xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+        }
+
+        if( xVSS_context->pOutputFile != M4OSA_NULL )
+        {
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+            xVSS_context->pOutputFile = M4OSA_NULL;
+        }
+        /* TODO: Translate error code of VSS to an xVSS error code */
+        return err;
+    }
+
+    /* Reinitialize current step number for progression monitoring */
+    xVSS_context->currentStep = 0;
+
+    /* Change xVSS state */
+    xVSS_context->m_state = M4xVSS_kStateSaving;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_SaveStop(M4OSA_Context pContext)
+ * @brief        This function unallocate save ressources and change xVSS
+ *                internal state.
+ * @note        This function must be called once M4xVSS_Step has returned
+ *                M4VSS3GPP_WAR_SAVING_DONE
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_SaveStop( M4OSA_Context pContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateSaving )
+    {
+        M4OSA_TRACE1_1(
+            "Bad state when calling M4xVSS_SaveStop function! State is %d",
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /* Free saving structures */
+    M4xVSS_internalFreeSaving(xVSS_context);
+
+    if( xVSS_context->pOutputFile != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+
+    /* Change xVSS state */
+    xVSS_context->m_state = M4xVSS_kStateSaved;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_Step(M4OSA_Context pContext, M4OSA_UInt8 *pProgress)
+ * @brief        This function executes differents tasks, depending of xVSS
+ *                internal state.
+ * @note        This function:
+ *                    - analyses editing structure if called after M4xVSS_SendCommand
+ *                    - generates preview file if called after M4xVSS_PreviewStart
+ *                    - generates final edited file if called after M4xVSS_SaveStart
+ *
+ * @param    pContext                        (IN) Pointer on the xVSS edit context
+ * @param    pProgress                        (IN/OUT) Pointer on an integer giving a
+ *                                            progress indication (between 0-100)
+ * @return    M4NO_ERROR:                        No error, the user must call M4xVSS_Step again
+ * @return    M4ERR_PARAMETER:                At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:                    This function cannot not be called at this time
+ * @return    M4VSS3GPP_WAR_PREVIEW_READY:    Preview file is generated
+ * @return    M4VSS3GPP_WAR_SAVING_DONE:        Final edited file is generated
+ * @return    M4VSS3GPP_WAR_ANALYZING_DONE:    Analyse is done
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_Step( M4OSA_Context pContext, M4OSA_UInt8 *pProgress )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
+    M4VSS3GPP_AudioMixingContext pAudioMixingCtxt =
+        xVSS_context->pAudioMixContext;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt8 uiProgress = 0;
+
+    switch( xVSS_context->m_state )
+    {
+        case M4xVSS_kStateSaving:
+        //case M4xVSS_kStateGeneratingPreview:
+            {
+                if( xVSS_context->editingStep
+                    == M4xVSS_kMicroStateEditing ) /* VSS -> creating effects, transitions ... */
+                {
+                    /* RC: to delete unecessary temp files on the fly */
+                    M4VSS3GPP_InternalEditContext *pVSSContext =
+                        (M4VSS3GPP_InternalEditContext *)pVssCtxt;
+
+                    err = M4VSS3GPP_editStep(pVssCtxt, &uiProgress);
+
+                    if( ( err != M4NO_ERROR) && (err != M4VSS3GPP_WAR_EDITING_DONE)
+                        && (err != M4VSS3GPP_WAR_SWITCH_CLIP) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_Step: M4VSS3GPP_editStep returned 0x%x\n", err);
+                        M4VSS3GPP_editCleanUp(pVssCtxt);
+                        /* TODO ? : Translate error code of VSS to an xVSS error code ? */
+                        xVSS_context->pCurrentEditContext = M4OSA_NULL;
+                        return err;
+                    }
+
+                    /* RC: to delete unecessary temp files on the fly */
+                    if( err == M4VSS3GPP_WAR_SWITCH_CLIP )
+                    {
+#ifndef DO_NOT_REMOVE_TEMP_FILES
+                        /* It means we can delete the temporary file */
+                        /* First step, check the temp file is not use somewhere else after */
+
+                        M4OSA_UInt32 i;
+                        M4OSA_Int32 cmpResult = -1;
+
+                        for ( i = pVSSContext->uiCurrentClip;
+                            i < pVSSContext->uiClipNumber; i++ )
+                        {
+                            if( pVSSContext->pClipList[pVSSContext->uiCurrentClip
+                                - 1].filePathSize
+                                == pVSSContext->pClipList[i].filePathSize )
+                            {
+                                cmpResult = M4OSA_memcmp(pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile, pVSSContext->pClipList[i].pFile,
+                                    pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].filePathSize);
+
+                                if( cmpResult == 0 )
+                                {
+                                    /* It means we found a corresponding file, we do not delete
+                                    this temporary file */
+                                    break;
+                                }
+                            }
+                        }
+
+                        if( cmpResult != 0 )
+                        {
+                            M4OSA_UInt32 ConvertedSize = 0;
+                            M4OSA_Char *toto;
+                            M4OSA_Char *pTmpStr;
+
+                            /* Convert result in UTF8 to check if we can delete it or not */
+                            if( xVSS_context->UTFConversionContext.pConvToUTF8Fct
+                                != M4OSA_NULL && xVSS_context->
+                                UTFConversionContext.
+                                pTempOutConversionBuffer != M4OSA_NULL )
+                            {
+                                M4xVSS_internalConvertToUTF8(xVSS_context,
+                                    (M4OSA_Void *)pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile, (M4OSA_Void *)xVSS_context->
+                                    UTFConversionContext.
+                                    pTempOutConversionBuffer, &ConvertedSize);
+                                err = M4OSA_chrFindPattern(xVSS_context->
+                                    UTFConversionContext.
+                                    pTempOutConversionBuffer,
+                                    xVSS_context->pTempPath, &toto);
+                                pTmpStr =
+                                    xVSS_context->UTFConversionContext.
+                                    pTempOutConversionBuffer;
+                            }
+                            else
+                            {
+                                err = M4OSA_chrFindPattern(pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile, xVSS_context->pTempPath, &toto);
+                                pTmpStr = pVSSContext->
+                                    pClipList[pVSSContext->uiCurrentClip
+                                    - 1].pFile;
+                            }
+
+                            if( err == M4NO_ERROR )
+                            {
+                                /* As temporary files can be imgXXX.3gp or vidXXX.3gp */
+                                pTmpStr +=
+                                    (M4OSA_chrLength(pTmpStr)
+                                    - 10); /* Because temporary files have a length at most of
+                                    10 bytes */
+                                err = M4OSA_chrFindPattern(pTmpStr,
+                                    (M4OSA_Char *)"img", &toto);
+
+                                if( err != M4NO_ERROR )
+                                {
+                                    err = M4OSA_chrFindPattern(pTmpStr,
+                                        (M4OSA_Char *)"vid", &toto);
+                                }
+
+                                if( err
+                                    == M4NO_ERROR ) /* It means the file is a temporary file, we
+                                    can delete it */
+                                {
+                                    M4OSA_fileExtraDelete(pVSSContext->
+                                        pClipList[pVSSContext->uiCurrentClip
+                                        - 1].pFile);
+                                }
+                            }
+                        }
+
+#endif /* DO_NOT_REMOVE_TEMP_FILES*/
+                        /* */
+
+                        err = M4NO_ERROR;
+                    }
+
+                    if( err == M4VSS3GPP_WAR_EDITING_DONE )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                        uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        err = M4xVSS_internalCloseEditedFile(xVSS_context);
+                        /* Fix for  blrnxpsw#234---> */
+                        if( err != M4NO_ERROR )
+                        {
+                            if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                            {
+                                err = M4xVSSERR_NO_MORE_SPACE;
+                            }
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_internalCloseEditedFile returned an error: 0x%x",
+                                err);
+                            return err;
+                        }
+                        /*<---- Fix for  blrnxpsw#234 */
+                        if( xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack
+                            != M4OSA_NULL )
+                        {
+                            xVSS_context->editingStep =
+                                M4xVSS_kMicroStateAudioMixing;
+                            /* Open Audio mixing component */
+                            err = M4xVSS_internalGenerateAudioMixFile(xVSS_context);
+
+                            if( err != M4NO_ERROR )
+                            {
+                                M4OSA_TRACE1_1(
+                                    "M4xVSS_internalGenerateAudioMixFile returned an error: 0x%x",
+                                    err);
+                                /* TODO ? : Translate error code of VSS to an xVSS error code */
+                                return err;
+                            }
+                            err = M4NO_ERROR;
+                            goto end_step;
+                        }
+                        else
+                        {
+
+                            err = M4VSS3GPP_WAR_SAVING_DONE;
+                            goto end_step;
+
+                        }
+                    }
+                }
+                else if( xVSS_context->editingStep
+                    == M4xVSS_kMicroStateAudioMixing ) /* Audio mixing: mix/replace audio track
+                    with given BGM */
+                {
+                    err = M4VSS3GPP_audioMixingStep(pAudioMixingCtxt, &uiProgress);
+
+                    if( ( err != M4NO_ERROR)
+                        && (err != M4VSS3GPP_WAR_END_OF_AUDIO_MIXING) )
+                    {
+                        M4OSA_TRACE1_1(
+                            "M4VSS3GPP_audioMixingMain: M4VSS3GPP_audioMixingStep returned 0x%x\n",
+                            err);
+                        /* TODO ? : Translate error code of VSS to an xVSS error code */
+                        return err;
+                    }
+
+                    if( err == M4VSS3GPP_WAR_END_OF_AUDIO_MIXING )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                        uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1(
+                                "M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x",
+                                err);
+                            /* TODO ? : Translate error code of VSS to an xVSS error code */
+                            return err;
+                        }
+
+                            err = M4VSS3GPP_WAR_SAVING_DONE;
+                            goto end_step;
+
+                    }
+                }
+                else
+                {
+                    M4OSA_TRACE1_0("Bad state in step function !");
+                    return M4ERR_STATE;
+                }
+            }
+            break;
+
+        case M4xVSS_kStateAnalyzing:
+            {
+                if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateAnalysePto3GPP ) /* Pto3GPP, analysing input parameters */
+                {
+                    if( xVSS_context->pPTo3GPPcurrentParams == M4OSA_NULL
+                        && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pPTo3GPPcurrentParams =
+                            xVSS_context->
+                            pPTo3GPPparamsList; /* Current Pto3GPP Parameter is the first element
+                            of the list */
+                    }
+                    else if( xVSS_context->pPTo3GPPcurrentParams != M4OSA_NULL
+                        && xVSS_context->pPTo3GPPparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pPTo3GPPcurrentParams =
+                            xVSS_context->pPTo3GPPcurrentParams->
+                            pNext; /* Current Pto3GPP Parameter is the next element of the list */
+
+                        if( xVSS_context->pPTo3GPPcurrentParams
+                            == M4OSA_NULL ) /* It means there is no next image to convert */
+                        {
+                            /* We step to MCS phase */
+                            xVSS_context->analyseStep =
+                                M4xVSS_kMicroStateAnalyzeMCS;
+                            err = M4NO_ERROR;
+                            goto end_step;
+                        }
+                    }
+                    else if( xVSS_context->pPTo3GPPparamsList == M4OSA_NULL )
+                    {
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalyzeMCS; /* Change Analyzing micro state to
+                             MCS phase */
+                        err = M4NO_ERROR;
+                        goto end_step;
+                    }
+
+                    /* Check if this file has to be converted or not */
+                    /* If not, we just return M4NO_ERROR, and go to next file */
+                    if( xVSS_context->pPTo3GPPcurrentParams->isCreated
+                        == M4OSA_FALSE )
+                    {
+                        /* Opening Pto3GPP */
+                        err = M4xVSS_internalStartConvertPictureTo3gp(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartConvertPictureTo3gp \
+                            returned error: 0x%x",
+                                err)
+                                /* TODO ? : Translate error code of VSS to an xVSS error code */
+                                return err;
+                        }
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateConvertPto3GPP;
+                    }
+                }
+                else if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateConvertPto3GPP ) /* Pto3GPP, converting */
+                {
+                    err = M4PTO3GPP_Step(xVSS_context->pM4PTO3GPP_Ctxt);
+
+                    if( ( err != M4NO_ERROR) && (err
+                        != ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING)) )
+                    {
+                        /* TO BE CHECKED NO LEAKS  !!!!! */
+                        M4OSA_TRACE1_1(
+                            "M4xVSS_Step: M4PTO3GPP_Step returned 0x%x\n", err);
+                        /* TODO ? : Translate error code of VSS to an xVSS error code */
+                        return err;
+                    }
+                    else if( err
+                        == ((M4OSA_UInt32)M4PTO3GPP_WAR_END_OF_PROCESSING) )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                         uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalysePto3GPP; /* We go back to analyze parameters
+                            to see if there is a next file to convert */
+                        /* RC !!!!!!!! */
+                        xVSS_context->pPTo3GPPcurrentParams->isCreated =
+                            M4OSA_TRUE; /* To avoid reconverting it if another SendCommand is
+                            called */
+                        err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
+                        /*SS:blrnxpsw#  234 */
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step:\
+                                           M4xVSS_internalStopConvertPictureTo3gp returned 0x%x",
+                                            err);
+                            /* TODO ? : Translate error code of VSS to an xVSS error code */
+                            return err;
+                        }
+                    }
+                }
+                else if( xVSS_context->analyseStep
+                    ==
+                    M4xVSS_kMicroStateAnalyzeMCS ) /* MCS: analyzing input parameters */
+                {
+                    if( xVSS_context->pMCScurrentParams == M4OSA_NULL \
+                        && xVSS_context->pMCSparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pMCScurrentParams = xVSS_context->
+                            pMCSparamsList; /* Current MCS Parameter is the first
+                                            element of the list */
+                    }
+                    else if( xVSS_context->pMCScurrentParams != M4OSA_NULL \
+                        && xVSS_context->pMCSparamsList != M4OSA_NULL )
+                    {
+                        xVSS_context->pMCScurrentParams =
+                            xVSS_context->pMCScurrentParams->
+                            pNext; /* Current MCS Parameter
+                                   is the next element of the list */
+
+                        if( xVSS_context->pMCScurrentParams == M4OSA_NULL )
+                            /* It means there is no next image to convert */
+                        {
+                            xVSS_context->analyseStep =
+                                M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
+                            xVSS_context->m_state =
+                                M4xVSS_kStateOpened; /* Change xVSS state */
+                            err = M4VSS3GPP_WAR_ANALYZING_DONE;
+                            goto end_step; /* End of Analysis */
+                        }
+                    }
+                    else if( xVSS_context->pMCSparamsList == M4OSA_NULL )
+                    {
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalysePto3GPP; /* Reinit Analyzing micro state */
+                        xVSS_context->m_state =
+                            M4xVSS_kStateOpened; /* Change xVSS state */
+                        err = M4VSS3GPP_WAR_ANALYZING_DONE;
+                        goto end_step;                        /* End of Analysis */
+                    }
+
+                    /* Check if this file has to be transcoded or not */
+                    /* If not, we just return M4NO_ERROR, and go to next file */
+                    if( xVSS_context->pMCScurrentParams->isCreated == M4OSA_FALSE )
+                    {
+                        /* Opening MCS */
+                        err = M4xVSS_internalStartTranscoding(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartTranscoding returned\
+                                 error: 0x%x", err)
+                                           /* TODO ? : Translate error code of MCS to an xVSS error
+                                           code ? */
+                                           return err;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_Step: M4xVSS_internalStartTranscoding returned\
+                                success; MCS context: 0x%x",
+                                 xVSS_context->pMCS_Ctxt)xVSS_context->analyseStep =
+                                       M4xVSS_kMicroStateTranscodeMCS;
+                    }
+                }
+                else if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateTranscodeMCS )
+                    /* MCS: transcoding file */
+                {
+                    err = M4MCS_step(xVSS_context->pMCS_Ctxt, &uiProgress);
+                    /*SS:blrnxpsw#  234 */
+                    if( err == ((M4OSA_UInt32)M4MCS_ERR_NOMORE_SPACE) )
+                    {
+                        err = M4xVSSERR_NO_MORE_SPACE;
+                    }
+
+                    if( ( err != M4NO_ERROR)
+                        && (err != M4MCS_WAR_TRANSCODING_DONE) )
+                    {
+                        /* TO BE CHECKED NO LEAKS  !!!!! */
+                        M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_step returned 0x%x\n",
+                            err);
+                        /* TODO ? : Translate error code of MCS to an xVSS error code ? */
+                        return err;
+                    }
+                    else if( err == M4MCS_WAR_TRANSCODING_DONE )
+                    {
+                        xVSS_context->currentStep++;
+                        /* P4ME00003276: When a step is complete, increment currentStep and reset
+                        uiProgress unless progress would be wrong */
+                        uiProgress = 0;
+                        xVSS_context->analyseStep =
+                            M4xVSS_kMicroStateAnalyzeMCS; /* We go back to
+                                                          analyze parameters to see if there is
+                                                           a next file to transcode */
+                        /* RC !!!!!!!!!*/
+                        xVSS_context->pMCScurrentParams->isCreated =
+                            M4OSA_TRUE; /* To avoid
+                                        reconverting it if another SendCommand is called */
+                        err = M4xVSS_internalStopTranscoding(xVSS_context);
+
+                        if( err != M4NO_ERROR )
+                        {
+                            M4OSA_TRACE1_1("M4xVSS_Step:\
+                                           M4xVSS_internalStopTranscoding returned 0x%x", err);
+                            /* TODO ? : Translate error code of MCS to an xVSS error code ? */
+                            return err;
+                        }
+                    }
+                }
+                else
+                {
+                    M4OSA_TRACE1_0("Bad micro state in analyzing state")
+                        return M4ERR_STATE;
+                }
+            }
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "Bad state when calling M4xVSS_Step function! State is %d",
+                xVSS_context->m_state);
+            return M4ERR_STATE;
+    }
+
+end_step:
+    /* Compute progression */
+    if( xVSS_context->nbStepTotal != 0 )
+    {
+        *pProgress = (M4OSA_UInt8)(( ( xVSS_context->currentStep * 100) \
+            / (xVSS_context->nbStepTotal))
+            + (uiProgress / (xVSS_context->nbStepTotal)));
+
+        if( *pProgress > 100 )
+        {
+            *pProgress = 100;
+        }
+    }
+    else
+    {
+        *pProgress = 100;
+    }
+
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_CloseCommand(M4OSA_Context pContext)
+ * @brief        This function deletes current editing profile, unallocate
+ *                ressources and change xVSS internal state.
+ * @note        After this function, the user can call a new M4xVSS_SendCommand
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CloseCommand( M4OSA_Context pContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check state */
+    /* Depending of the state, differents things have to be done */
+    switch( xVSS_context->m_state )
+    {
+        case M4xVSS_kStateOpened:
+            /* Nothing to do here */
+            err = M4xVSS_internalFreeSaving(xVSS_context);
+            break;
+
+        case M4xVSS_kStateSaving:
+            {
+                if( xVSS_context->editingStep == M4xVSS_kMicroStateEditing )
+                {
+                    err = M4xVSS_internalCloseEditedFile(xVSS_context);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        /* Fix for blrnxpsw#234---->*/
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_CloseCommand:\
+                                       M4xVSS_internalCloseEditedFile returned an error: 0x%x",
+                                        err);
+                        /* we are retaining error here and returning error  in the end of the
+                        function  as to aviod memory leak*/
+                        //return err;
+                    }
+                }
+                else if( xVSS_context->editingStep
+                    == M4xVSS_kMicroStateAudioMixing )
+                {
+                    err = M4xVSS_internalCloseAudioMixedFile(xVSS_context);
+
+                    if( err != M4NO_ERROR )
+                    {
+                        /* Fix for blrnxpsw#234---->*/
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_CloseCommand: \
+                                M4xVSS_internalCloseAudioMixedFile returned an error: 0x%x", err);
+                        /* we are retaining error here and returning error  in the end of
+                        the function  as to aviod memory leak*/
+                        //return err;
+                        /* <----Fix for blrnxpsw#234*/
+                    }
+                }
+                err = M4xVSS_internalFreeSaving(xVSS_context);
+                /* We free this pointer only if a BGM track is present, because in that case,
+                this pointer owns to us */
+                if( xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL ) {
+                    /*if(M4OSA_NULL != xVSS_context->pSettings->pOutputFile)
+                    {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+                    xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+                    }*/
+                    /*if(M4OSA_NULL != xVSS_context->pSettings->pTemporaryFile)
+                    {
+                    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pTemporaryFile);
+                    xVSS_context->pSettings->pTemporaryFile = M4OSA_NULL;
+                    }*/
+                }
+            }
+            break;
+
+        case M4xVSS_kStateSaved:
+            break;
+
+        case M4xVSS_kStateAnalyzing:
+            {
+                if( xVSS_context->analyseStep == M4xVSS_kMicroStateConvertPto3GPP )
+                {
+                    /* Free Pto3GPP module */
+                    err = M4xVSS_internalStopConvertPictureTo3gp(xVSS_context);
+                    /* Fix for blrnxpsw#234---->*/
+                    if( err != M4NO_ERROR )
+                    {
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_Step: \
+                                       M4xVSS_internalStopConvertPictureTo3gp returned 0x%x", err);
+                        /* we are retaining error here and returning error  in the end of the
+                        function  as to aviod memory leak*/
+                        //return err;
+                    }
+                    /* <-----Fix for blrnxpsw#234>*/
+                }
+                else if( xVSS_context->analyseStep
+                    == M4xVSS_kMicroStateTranscodeMCS )
+                {
+                    /* Free MCS module */
+                    err = M4MCS_abort(xVSS_context->pMCS_Ctxt);
+                    /* Fix for blrnxpsw#234---->*/
+                    if( err != M4NO_ERROR )
+                    {
+                        if( err == ((M4OSA_UInt32)M4ERR_FILE_INVALID_POSITION) )
+                        {
+                            err = M4xVSSERR_NO_MORE_SPACE;
+                        }
+                        M4OSA_TRACE1_1("M4xVSS_Step: M4MCS_abort returned 0x%x",
+                            err);
+                        /* we are retaining error here and returning error  in the end of the
+                        function  as to aviod memory leak*/
+                        //return err;
+                    }
+                    /* <---Fix for blrnxpsw#234*/
+                }
+            }
+            break;
+
+        default:
+            M4OSA_TRACE1_1(
+                "Bad state when calling M4xVSS_CloseCommand function! State is %d",
+                xVSS_context->m_state);
+            return M4ERR_STATE;
+    }
+
+    /* Free Send command */
+    M4xVSS_freeCommand(xVSS_context);
+
+    xVSS_context->m_state = M4xVSS_kStateInitialized; /* Change xVSS state */
+
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_CleanUp(M4OSA_Context pContext)
+ * @brief        This function deletes all xVSS ressources
+ * @note        This function must be called after M4xVSS_CloseCommand.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CleanUp( M4OSA_Context pContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_TRACE3_0("M4xVSS_CleanUp:entering");
+
+    /* Check state */
+    if( xVSS_context->m_state != M4xVSS_kStateInitialized )
+    {
+        M4OSA_TRACE1_1(\
+            "Bad state when calling M4xVSS_CleanUp function! State is %d",\
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    /**
+    * UTF conversion: free temporary buffer*/
+    if( xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+        != M4OSA_NULL )
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->
+            UTFConversionContext.pTempOutConversionBuffer);
+        xVSS_context->UTFConversionContext.pTempOutConversionBuffer =
+            M4OSA_NULL;
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pTempPath);
+    xVSS_context->pTempPath = M4OSA_NULL;
+
+    M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings);
+    xVSS_context->pSettings = M4OSA_NULL;
+
+    M4OSA_free((M4OSA_MemAddr32)xVSS_context);
+    xVSS_context = M4OSA_NULL;
+    M4OSA_TRACE3_0("M4xVSS_CleanUp:leaving ");
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_RegisterExternalVideoDecoder( M4OSA_Context pContext,
+                                              M4VD_VideoType decoderType,
+                                              M4VD_Interface *pDecoderInterface,
+                                              M4OSA_Void *pUserData )
+{
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    /* Here the situation is a bit special: we need to record the registrations that are made,
+    so that we can replay them for each clip we create. */
+
+    if( decoderType >= M4VD_kVideoType_NB )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context->registeredExternalDecs[decoderType].pDecoderInterface =
+        pDecoderInterface;
+    xVSS_context->registeredExternalDecs[decoderType].pUserData = pUserData;
+    xVSS_context->registeredExternalDecs[decoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW decoder that may already have been registered for this type;
+    this is normal.*/
+
+    return M4NO_ERROR;
+
+#else
+
+    return M4ERR_NOT_IMPLEMENTED;
+
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+}
+
+M4OSA_ERR M4xVSS_RegisterExternalVideoEncoder( M4OSA_Context pContext,
+                                              M4VE_EncoderType encoderType,
+                                              M4VE_Interface *pEncoderInterface,
+                                              M4OSA_Void *pUserData )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    /* Here the situation is a bit special: we need to record the registrations that are made,
+    so that we can replay them for each clip we create. */
+
+    if( encoderType >= M4VE_kEncoderType_NB )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    xVSS_context->registeredExternalEncs[encoderType].pEncoderInterface =
+        pEncoderInterface;
+    xVSS_context->registeredExternalEncs[encoderType].pUserData = pUserData;
+    xVSS_context->registeredExternalEncs[encoderType].registered = M4OSA_TRUE;
+
+    /* Notice it overwrites any HW encoder that may already have been registered for this type;
+    this is normal.*/
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_GetVersion(M4_VersionInfo *pVersion)
+ * @brief        This function get the version of the Video Studio 2.1
+ *
+ * @param    pVersion            (IN) Pointer on the version info struct
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_GetVersion( M4_VersionInfo *pVersion )
+{
+    /* Just used for a grep in code */
+    /* CHANGE_VERSION_HERE */
+    static const M4OSA_Char cVersion[26] = "NXPSW_VideoStudio21_1_3_0";
+
+    if( M4OSA_NULL == pVersion )
+    {
+        return M4ERR_PARAMETER;
+    }
+
+    pVersion->m_major = M4_xVSS_MAJOR;
+    pVersion->m_minor = M4_xVSS_MINOR;
+    pVersion->m_revision = M4_xVSS_REVISION;
+    pVersion->m_structSize = sizeof(M4_VersionInfo);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_CreateClipSettings()
+ * @brief    Allows filling a clip settings structure with default values
+ *
+ * @note    WARNING: pClipSettings->Effects[ ] will be allocated in this function.
+ *                   pClipSettings->pFile      will be allocated in this function.
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   pFile               (IN) Clip file name
+ * @param   filePathSize        (IN) Size of the clip path (needed for the UTF16 conversion)
+ * @param    nbEffects           (IN) Nb of effect settings to allocate
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_CreateClipSettings( M4VSS3GPP_ClipSettings *pClipSettings,
+                                    M4OSA_Void *pFile, M4OSA_UInt32 filePathSize,
+                                     M4OSA_UInt8 nbEffects )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE3_1("M4xVSS_CreateClipSettings called with pClipSettings=0x%p",
+        pClipSettings);
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4xVSS_CreateClipSettings: pClipSettings is NULL");
+
+    /* Create inherited VSS3GPP stuff */
+    /*err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile,nbEffects);*/
+    /*FB: add clip path size (needed for UTF 16 conversion)*/
+    err = M4VSS3GPP_editCreateClipSettings(pClipSettings, pFile, filePathSize,
+        nbEffects);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
+                       ERROR in M4VSS3GPP_editCreateClipSettings = 0x%x", err);
+        return err;
+    }
+
+    /* Set the clip settings to default */
+    pClipSettings->xVSS.uiBeginCutPercent = 0;
+    pClipSettings->xVSS.uiEndCutPercent = 0;
+    pClipSettings->xVSS.uiDuration = 0;
+    pClipSettings->xVSS.isPanZoom = M4OSA_FALSE;
+    pClipSettings->xVSS.PanZoomTopleftXa = 0;
+    pClipSettings->xVSS.PanZoomTopleftYa = 0;
+    pClipSettings->xVSS.PanZoomTopleftXb = 0;
+    pClipSettings->xVSS.PanZoomTopleftYb = 0;
+    pClipSettings->xVSS.PanZoomXa = 0;
+    pClipSettings->xVSS.PanZoomXb = 0;
+
+    /**
+    * Return with no error */
+    M4OSA_TRACE3_0("M4xVSS_CreateClipSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_DuplicateClipSettings()
+ * @brief    Duplicates a clip settings structure, performing allocations if required
+ *
+ * @param    pClipSettingsDest    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param    pClipSettingsOrig    (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @param   bCopyEffects        (IN) Flag to know if we have to duplicate effects
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_DuplicateClipSettings( M4VSS3GPP_ClipSettings
+                                       *pClipSettingsDest,
+                                       M4VSS3GPP_ClipSettings *pClipSettingsOrig,
+                                        M4OSA_Bool bCopyEffects )
+{
+    M4OSA_ERR err = M4NO_ERROR;
+
+    M4OSA_TRACE3_2(
+        "M4xVSS_DuplicateClipSettings called with dest=0x%p src=0x%p",
+        pClipSettingsDest, pClipSettingsOrig);
+
+    /* Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsDest), M4ERR_PARAMETER,
+        "M4xVSS_DuplicateClipSettings: pClipSettingsDest is NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettingsOrig), M4ERR_PARAMETER,
+        "M4xVSS_DuplicateClipSettings: pClipSettingsOrig is NULL");
+
+    /* Call inherited VSS3GPP duplication */
+    err = M4VSS3GPP_editDuplicateClipSettings(pClipSettingsDest,
+        pClipSettingsOrig, bCopyEffects);
+
+    if( M4NO_ERROR != err )
+    {
+        M4OSA_TRACE1_1("M4xVSS_CreateClipSettings :\
+                       ERROR in M4VSS3GPP_editDuplicateClipSettings = 0x%x", err);
+        return err;
+    }
+
+    /* Return with no error */
+    M4OSA_TRACE3_0("M4xVSS_DuplicateClipSettings(): returning M4NO_ERROR");
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_FreeClipSettings()
+ * @brief    Free the pointers allocated in the ClipSetting structure (pFile, Effects, ...).
+ *
+ * @param    pClipSettings        (IN) Pointer to a valid M4VSS3GPP_ClipSettings structure
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    pClipSettings is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_FreeClipSettings( M4VSS3GPP_ClipSettings *pClipSettings )
+{
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pClipSettings), M4ERR_PARAMETER,
+        "M4xVSS_FreeClipSettings: pClipSettings is NULL");
+
+    /* Free inherited VSS3GPP stuff */
+    M4VSS3GPP_editFreeClipSettings(pClipSettings);
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_getMCSContext(M4OSA_Context pContext, M4OSA_Context* mcsContext)
+ * @brief        This function returns the MCS context within the xVSS internal context
+ * @note        This function must be called only after VSS state has moved to analyzing state or
+ * beyond
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    mcsContext        (OUT) Pointer to pointer of mcs context to return
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_getMCSContext( M4OSA_Context pContext,
+                               M4OSA_Context *mcsContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4xVSS_getMCSContext: pContext is NULL");
+
+    if( xVSS_context->m_state == M4xVSS_kStateInitialized )
+    {
+        M4OSA_TRACE1_1("M4xVSS_getMCSContext: Bad state! State is %d",\
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    *mcsContext = xVSS_context->pMCS_Ctxt;
+
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_getVSS3GPPContext(M4OSA_Context pContext,
+ *                                                   M4OSA_Context* mcsContext)
+ * @brief        This function returns the VSS3GPP context within the xVSS internal context
+ * @note        This function must be called only after VSS state has moved to Generating preview
+ *              or beyond
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @param    vss3gppContext        (OUT) Pointer to pointer of vss3gpp context to return
+ * @return    M4NO_ERROR:        No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_STATE:        This function cannot not be called at this time
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_getVSS3GPPContext( M4OSA_Context pContext,
+                                   M4OSA_Context *vss3gppContext )
+{
+    M4xVSS_Context *xVSS_context = (M4xVSS_Context *)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /**
+    *    Check input parameter */
+    M4OSA_DEBUG_IF2((M4OSA_NULL == pContext), M4ERR_PARAMETER,
+        "M4xVSS_getVSS3GPPContext: pContext is NULL");
+
+    if( xVSS_context->m_state < M4xVSS_kStateSaving )
+    {
+        M4OSA_TRACE1_1("M4xVSS_getVSS3GPPContext: Bad state! State is %d",\
+            xVSS_context->m_state);
+        return M4ERR_STATE;
+    }
+
+    *vss3gppContext = xVSS_context->pCurrentEditContext;
+
+    return err;
+}
diff --git a/libvideoeditor/vss/src/M4xVSS_internal.c b/libvideoeditor/vss/src/M4xVSS_internal.c
new file mode 100755
index 0000000..62107aa
--- /dev/null
+++ b/libvideoeditor/vss/src/M4xVSS_internal.c
@@ -0,0 +1,5047 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    M4xVSS_internal.c
+ * @brief    Internal functions of extended Video Studio Service (Video Studio 2.1)
+ * @note
+ ******************************************************************************
+ */
+#include "M4OSA_Debug.h"
+#include "M4OSA_CharStar.h"
+#include "M4OSA_FileExtra.h"
+
+#include "NXPSW_CompilerSwitches.h"
+
+#include "M4VSS3GPP_API.h"
+#include "M4VSS3GPP_ErrorCodes.h"
+
+#include "M4xVSS_API.h"
+#include "M4xVSS_Internal.h"
+
+/*for rgb16 color effect*/
+#include "M4VIFI_Defines.h"
+#include "M4VIFI_Clip.h"
+
+/**
+ * component includes */
+#include "M4VFL_transition.h"            /**< video effects */
+
+/* Internal header file of VSS is included because of MMS use case */
+#include "M4VSS3GPP_InternalTypes.h"
+
+/*Exif header files to add image rendering support (cropping, black borders)*/
+#include "M4EXIFC_CommonAPI.h"
+// StageFright encoders require %16 resolution
+#include "M4ENCODER_common.h"
+
+#define TRANSPARENT_COLOR 0x7E0
+
+/* Prototype of M4VIFI_xVSS_RGB565toYUV420 function (avoid green effect of transparency color) */
+M4VIFI_UInt8 M4VIFI_xVSS_RGB565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+                                        M4VIFI_ImagePlane *pPlaneOut);
+
+
+/*special MCS function used only in VideoArtist and VideoStudio to open the media in the normal
+ mode. That way the media duration is accurate*/
+extern M4OSA_ERR M4MCS_open_normalMode(M4MCS_Context pContext, M4OSA_Void* pFileIn,
+                                         M4VIDEOEDITING_FileType InputFileType,
+                                         M4OSA_Void* pFileOut, M4OSA_Void* pTempFile);
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext)
+ * @brief        This function initializes MCS (3GP transcoder) with the given
+ *                parameters
+ * @note        The transcoding parameters are given by the internal xVSS context.
+ *                This context contains a pointer on the current element of the
+ *                chained list of MCS parameters.
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStartTranscoding(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4MCS_Context mcs_context;
+    M4MCS_OutputParams Params;
+    M4MCS_EncodingParams Rates;
+    M4OSA_UInt32 i;
+
+    err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_init: 0x%x", err);
+        return err;
+    }
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    /* replay recorded external decoder registrations on the MCS */
+    for (i=0; i<M4VD_kVideoType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalDecs[i].registered)
+        {
+            err = M4MCS_registerExternalVideoDecoder(mcs_context, i,
+                    xVSS_context->registeredExternalDecs[i].pDecoderInterface,
+                    xVSS_context->registeredExternalDecs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalStartTranscoding:\
+                     M4MCS_registerExternalVideoDecoder() returns 0x%x!", err);
+                M4MCS_abort(mcs_context);
+                return err;
+            }
+        }
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    /* replay recorded external encoder registrations on the MCS */
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalEncs[i].registered)
+        {
+            err = M4MCS_registerExternalVideoEncoder(mcs_context, i,
+                    xVSS_context->registeredExternalEncs[i].pEncoderInterface,
+                    xVSS_context->registeredExternalEncs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalStartTranscoding:\
+                     M4MCS_registerExternalVideoEncoder() returns 0x%x!", err);
+                M4MCS_abort(mcs_context);
+                return err;
+            }
+        }
+    }
+
+    err = M4MCS_open(mcs_context, xVSS_context->pMCScurrentParams->pFileIn,
+         xVSS_context->pMCScurrentParams->InputFileType,
+             xVSS_context->pMCScurrentParams->pFileOut,
+             xVSS_context->pMCScurrentParams->pFileTemp);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_open: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    /**
+     * Fill MCS parameters with the parameters contained in the current element of the
+       MCS parameters chained list */
+    Params.OutputFileType = xVSS_context->pMCScurrentParams->OutputFileType;
+    Params.OutputVideoFormat = xVSS_context->pMCScurrentParams->OutputVideoFormat;
+    Params.OutputVideoFrameSize = xVSS_context->pMCScurrentParams->OutputVideoFrameSize;
+    Params.OutputVideoFrameRate = xVSS_context->pMCScurrentParams->OutputVideoFrameRate;
+    Params.OutputAudioFormat = xVSS_context->pMCScurrentParams->OutputAudioFormat;
+    Params.OutputAudioSamplingFrequency =
+         xVSS_context->pMCScurrentParams->OutputAudioSamplingFrequency;
+    Params.bAudioMono = xVSS_context->pMCScurrentParams->bAudioMono;
+    Params.pOutputPCMfile = M4OSA_NULL;
+    /*FB 2008/10/20: add media rendering parameter to keep aspect ratio*/
+    switch(xVSS_context->pMCScurrentParams->MediaRendering)
+    {
+    case M4xVSS_kResizing:
+        Params.MediaRendering = M4MCS_kResizing;
+        break;
+    case M4xVSS_kCropping:
+        Params.MediaRendering = M4MCS_kCropping;
+        break;
+    case M4xVSS_kBlackBorders:
+        Params.MediaRendering = M4MCS_kBlackBorders;
+        break;
+    default:
+        break;
+    }
+    /**/
+#ifdef TIMESCALE_BUG
+    Params.OutputVideoTimescale = xVSS_context->pMCScurrentParams->OutputVideoTimescale;
+#endif
+    // new params after integrating MCS 2.0
+    // Set the number of audio effects; 0 for now.
+    Params.nbEffects = 0;
+
+    // Set the audio effect; null for now.
+    Params.pEffects = NULL;
+
+    // Set the audio effect; null for now.
+    Params.bDiscardExif = M4OSA_FALSE;
+
+    // Set the audio effect; null for now.
+    Params.bAdjustOrientation = M4OSA_FALSE;
+    // new params after integrating MCS 2.0
+
+    /**
+     * Set output parameters */
+    err = M4MCS_setOutputParams(mcs_context, &Params);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_setOutputParams: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    Rates.OutputVideoBitrate = xVSS_context->pMCScurrentParams->OutputVideoBitrate;
+    Rates.OutputAudioBitrate = xVSS_context->pMCScurrentParams->OutputAudioBitrate;
+    Rates.BeginCutTime = 0;
+    Rates.EndCutTime = 0;
+    Rates.OutputFileSize = 0;
+
+    /*FB: transcoding per parts*/
+    Rates.BeginCutTime = xVSS_context->pMCScurrentParams->BeginCutTime;
+    Rates.EndCutTime = xVSS_context->pMCScurrentParams->EndCutTime;
+    Rates.OutputVideoTimescale = xVSS_context->pMCScurrentParams->OutputVideoTimescale;
+
+    err = M4MCS_setEncodingParams(mcs_context, &Rates);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_setEncodingParams: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    err = M4MCS_checkParamsAndStart(mcs_context);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_checkParamsAndStart: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    /**
+     * Save MCS context to be able to call MCS step function in M4xVSS_step function */
+    xVSS_context->pMCS_Ctxt = mcs_context;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
+ * @brief        This function cleans up MCS (3GP transcoder)
+ * @note
+ *
+ * @param    pContext            (IN) Pointer on the xVSS edit context
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL
+ * @return    M4ERR_ALLOC:        Memory allocation has failed
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStopTranscoding(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    err = M4MCS_close(xVSS_context->pMCS_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_close: 0x%x", err);
+        M4MCS_abort(xVSS_context->pMCS_Ctxt);
+        return err;
+    }
+
+    /**
+     * Free this MCS instance */
+    err = M4MCS_cleanUp(xVSS_context->pMCS_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalStopTranscoding: Error in M4MCS_cleanUp: 0x%x", err);
+        return err;
+    }
+
+    xVSS_context->pMCS_Ctxt = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+ *                                             M4OSA_FileReadPointer* pFileReadPtr,
+ *                                                M4VIFI_ImagePlane* pImagePlanes,
+ *                                                 M4OSA_UInt32 width,
+ *                                                M4OSA_UInt32 height);
+ * @brief    It Coverts and resizes a ARGB8888 image to YUV420
+ * @note
+ * @param    pFileIn            (IN) The Image input file
+ * @param    pFileReadPtr    (IN) Pointer on filesystem functions
+ * @param    pImagePlanes    (IN/OUT) Pointer on YUV420 output planes allocated by the user
+ *                            ARGB8888 image  will be converted and resized  to output
+ *                             YUV420 plane size
+ *@param    width        (IN) width of the ARGB8888
+ *@param    height            (IN) height of the ARGB8888
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_ALLOC: memory error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_internalConvertAndResizeARGB8888toYUV420(M4OSA_Void* pFileIn,
+                                                          M4OSA_FileReadPointer* pFileReadPtr,
+                                                          M4VIFI_ImagePlane* pImagePlanes,
+                                                          M4OSA_UInt32 width,M4OSA_UInt32 height)
+{
+    M4OSA_Context pARGBIn;
+    M4VIFI_ImagePlane rgbPlane1 ,rgbPlane2;
+    M4OSA_UInt32 frameSize_argb=(width * height * 4);
+    M4OSA_UInt32 frameSize = (width * height * 3); //Size of RGB888 data.
+    M4OSA_UInt32 i = 0,j= 0;
+    M4OSA_ERR err=M4NO_ERROR;
+
+
+    M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_malloc(frameSize_argb,
+         M4VS, (M4OSA_Char*)"Image argb data");
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Entering :");
+    if(pTmpData == M4OSA_NULL) {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+            Failed to allocate memory for Image clip");
+        return M4ERR_ALLOC;
+    }
+
+    M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :width and height %d %d",
+        width ,height);
+    /* Get file size (mandatory for chunk decoding) */
+    err = pFileReadPtr->openRead(&pARGBIn, pFileIn, M4OSA_kFileRead);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+            Can't open input ARGB8888 file %s, error: 0x%x\n",pFileIn, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        goto cleanup;
+    }
+
+    err = pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888\
+             file %s, error: 0x%x\n",pFileIn, err);
+        pFileReadPtr->closeRead(pARGBIn);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        goto cleanup;
+    }
+
+    err = pFileReadPtr->closeRead(pARGBIn);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Can't close ARGB8888 \
+             file %s, error: 0x%x\n",pFileIn, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        goto cleanup;
+    }
+
+    rgbPlane1.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(frameSize, M4VS,
+         (M4OSA_Char*)"Image clip RGB888 data");
+    if(rgbPlane1.pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 \
+            Failed to allocate memory for Image clip");
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        return M4ERR_ALLOC;
+    }
+
+        rgbPlane1.u_height = height;
+        rgbPlane1.u_width = width;
+        rgbPlane1.u_stride = width*3;
+        rgbPlane1.u_topleft = 0;
+
+
+    /** Remove the alpha channel */
+    for (i=0, j = 0; i < frameSize_argb; i++) {
+        if ((i % 4) == 0) continue;
+        rgbPlane1.pac_data[j] = pTmpData[i];
+        j++;
+    }
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+
+    /* To Check if resizing is required with color conversion */
+    if(width != pImagePlanes->u_width || height != pImagePlanes->u_height)
+    {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 Resizing :");
+        frameSize =  ( pImagePlanes->u_width * pImagePlanes->u_height * 3);
+        rgbPlane2.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(frameSize, M4VS,
+             (M4OSA_Char*)"Image clip RGB888 data");
+        if(rgbPlane2.pac_data == M4OSA_NULL)
+        {
+            M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+            M4OSA_free((M4OSA_MemAddr32)pTmpData);
+            return M4ERR_ALLOC;
+        }
+            rgbPlane2.u_height =  pImagePlanes->u_height;
+            rgbPlane2.u_width = pImagePlanes->u_width;
+            rgbPlane2.u_stride = pImagePlanes->u_width*3;
+            rgbPlane2.u_topleft = 0;
+
+        /* Resizing RGB888 to RGB888 */
+        err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane1, &rgbPlane2);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("error when converting from Resize RGB888 to RGB888: 0x%x\n", err);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane2.pac_data);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+            return err;
+        }
+        /*Converting Resized RGB888 to YUV420 */
+        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane2, pImagePlanes);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("error when converting from RGB888 to YUV: 0x%x\n", err);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane2.pac_data);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+            return err;
+        }
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane2.pac_data);
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+
+            M4OSA_TRACE1_0("RGB to YUV done");
+
+
+    }
+    else
+    {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 NO  Resizing :");
+        err = M4VIFI_RGB888toYUV420(M4OSA_NULL, &rgbPlane1, pImagePlanes);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("error when converting from RGB to YUV: 0x%x\n", err);
+        }
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane1.pac_data);
+
+            M4OSA_TRACE1_0("RGB to YUV done");
+    }
+cleanup:
+    M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 leaving :");
+    return err;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+ *                                             M4OSA_FileReadPointer* pFileReadPtr,
+ *                                                M4VIFI_ImagePlane* pImagePlanes,
+ *                                                 M4OSA_UInt32 width,
+ *                                                M4OSA_UInt32 height);
+ * @brief    It Coverts a ARGB8888 image to YUV420
+ * @note
+ * @param    pFileIn            (IN) The Image input file
+ * @param    pFileReadPtr    (IN) Pointer on filesystem functions
+ * @param    pImagePlanes    (IN/OUT) Pointer on YUV420 output planes allocated by the user
+ *                            ARGB8888 image  will be converted and resized  to output
+ *                            YUV420 plane size
+ * @param    width        (IN) width of the ARGB8888
+ * @param    height            (IN) height of the ARGB8888
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_ALLOC: memory error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4xVSS_internalConvertARGB8888toYUV420(M4OSA_Void* pFileIn,
+                                                 M4OSA_FileReadPointer* pFileReadPtr,
+                                                 M4VIFI_ImagePlane** pImagePlanes,
+                                                 M4OSA_UInt32 width,M4OSA_UInt32 height)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4VIFI_ImagePlane *yuvPlane = M4OSA_NULL;
+
+    yuvPlane = (M4VIFI_ImagePlane*)M4OSA_malloc(3*sizeof(M4VIFI_ImagePlane),
+                M4VS, (M4OSA_Char*)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+    if(yuvPlane == M4OSA_NULL) {
+        M4OSA_TRACE1_0("M4xVSS_internalConvertAndResizeARGB8888toYUV420 :\
+            Failed to allocate memory for Image clip");
+        return M4ERR_ALLOC;
+    }
+    yuvPlane[0].u_height = height;
+    yuvPlane[0].u_width = width;
+    yuvPlane[0].u_stride = width;
+    yuvPlane[0].u_topleft = 0;
+    yuvPlane[0].pac_data = (M4VIFI_UInt8*)M4OSA_malloc(yuvPlane[0].u_height \
+        * yuvPlane[0].u_width * 1.5, M4VS, (M4OSA_Char*)"imageClip YUV data");
+
+    yuvPlane[1].u_height = yuvPlane[0].u_height >>1;
+    yuvPlane[1].u_width = yuvPlane[0].u_width >> 1;
+    yuvPlane[1].u_stride = yuvPlane[1].u_width;
+    yuvPlane[1].u_topleft = 0;
+    yuvPlane[1].pac_data = (M4VIFI_UInt8*)(yuvPlane[0].pac_data + yuvPlane[0].u_height \
+        * yuvPlane[0].u_width);
+
+    yuvPlane[2].u_height = yuvPlane[0].u_height >>1;
+    yuvPlane[2].u_width = yuvPlane[0].u_width >> 1;
+    yuvPlane[2].u_stride = yuvPlane[2].u_width;
+    yuvPlane[2].u_topleft = 0;
+    yuvPlane[2].pac_data = (M4VIFI_UInt8*)(yuvPlane[1].pac_data + yuvPlane[1].u_height \
+        * yuvPlane[1].u_width);
+    err = M4xVSS_internalConvertAndResizeARGB8888toYUV420( pFileIn,pFileReadPtr,
+                                                          yuvPlane, width, height);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalConvertAndResizeARGB8888toYUV420 return error: 0x%x\n", err);
+        M4OSA_free((M4OSA_MemAddr32)yuvPlane);
+        return err;
+    }
+
+        *pImagePlanes = yuvPlane;
+
+    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB8888toYUV420 :Leaving");
+    return err;
+
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_PictureCallbackFct (M4OSA_Void* pPictureCtxt,
+ *                                        M4VIFI_ImagePlane* pImagePlanes,
+ *                                        M4OSA_UInt32* pPictureDuration);
+ * @brief    It feeds the PTO3GPP with YUV420 pictures.
+ * @note    This function is given to the PTO3GPP in the M4PTO3GPP_Params structure
+ * @param    pContext    (IN) The integrator own context
+ * @param    pImagePlanes(IN/OUT) Pointer to an array of three valid image planes
+ * @param    pPictureDuration(OUT) Duration of the returned picture
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_PictureCallbackFct(M4OSA_Void* pPictureCtxt, M4VIFI_ImagePlane* pImagePlanes,
+                                     M4OSA_Double* pPictureDuration)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt8    last_frame_flag = 0;
+    M4xVSS_PictureCallbackCtxt* pC = (M4xVSS_PictureCallbackCtxt*) (pPictureCtxt);
+
+    /*Used for pan&zoom*/
+    M4OSA_UInt8 tempPanzoomXa = 0;
+    M4OSA_UInt8 tempPanzoomXb = 0;
+    M4AIR_Params Params;
+    /**/
+
+    /*Used for cropping and black borders*/
+    M4OSA_Context    pPictureContext = M4OSA_NULL;
+    M4OSA_FilePosition    pictureSize = 0 ;
+    M4OSA_UInt8*    pictureBuffer = M4OSA_NULL;
+    //M4EXIFC_Context pExifContext = M4OSA_NULL;
+    M4EXIFC_BasicTags pBasicTags;
+    M4VIFI_ImagePlane pImagePlanes1 = pImagePlanes[0];
+    M4VIFI_ImagePlane pImagePlanes2 = pImagePlanes[1];
+    M4VIFI_ImagePlane pImagePlanes3 = pImagePlanes[2];
+    /**/
+
+    /**
+     * Check input parameters */
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureCtxt),        M4ERR_PARAMETER,
+         "M4xVSS_PictureCallbackFct: pPictureCtxt is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pImagePlanes),        M4ERR_PARAMETER,
+         "M4xVSS_PictureCallbackFct: pImagePlanes is M4OSA_NULL");
+    M4OSA_DEBUG_IF2((M4OSA_NULL==pPictureDuration), M4ERR_PARAMETER,
+         "M4xVSS_PictureCallbackFct: pPictureDuration is M4OSA_NULL");
+    M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct :Entering");
+    /*PR P4ME00003181 In case the image number is 0, pan&zoom can not be used*/
+    if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom && pC->m_NbImage == 0)
+    {
+        pC->m_pPto3GPPparams->isPanZoom = M4OSA_FALSE;
+    }
+
+    /*If no cropping/black borders or pan&zoom, just decode and resize the picture*/
+    if(pC->m_mediaRendering == M4xVSS_kResizing && M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+    {
+        /**
+         * Convert and resize input ARGB8888 file to YUV420 */
+        /*To support ARGB8888 : */
+        M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 1: width and heght %d %d",
+            pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+        err = M4xVSS_internalConvertAndResizeARGB8888toYUV420(pC->m_FileIn,
+             pC->m_pFileReadPtr, pImagePlanes,pC->m_pPto3GPPparams->width,
+                pC->m_pPto3GPPparams->height);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
+            return err;
+        }
+    }
+    /*In case of cropping, black borders or pan&zoom, call the EXIF reader and the AIR*/
+    else
+    {
+        /**
+         * Computes ratios */
+        if(pC->m_pDecodedPlane == M4OSA_NULL)
+        {
+            /**
+             * Convert input ARGB8888 file to YUV420 */
+             M4OSA_TRACE1_2("M4xVSS_PictureCallbackFct 2: width and heght %d %d",
+                pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+            err = M4xVSS_internalConvertARGB8888toYUV420(pC->m_FileIn, pC->m_pFileReadPtr,
+                &(pC->m_pDecodedPlane),pC->m_pPto3GPPparams->width,pC->m_pPto3GPPparams->height);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when decoding JPEG: 0x%x\n", err);
+                if(pC->m_pDecodedPlane != M4OSA_NULL)
+                {
+                    /* YUV420 planar is returned but allocation is made only once
+                        (contigous planes in memory) */
+                    if(pC->m_pDecodedPlane->pac_data != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane->pac_data);
+                    }
+                    M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+                    pC->m_pDecodedPlane = M4OSA_NULL;
+                }
+                return err;
+            }
+        }
+
+        /*Initialize AIR Params*/
+        Params.m_inputCoord.m_x = 0;
+        Params.m_inputCoord.m_y = 0;
+        Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+        Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+        Params.m_outputSize.m_width = pImagePlanes->u_width;
+        Params.m_outputSize.m_height = pImagePlanes->u_height;
+        Params.m_bOutputStripe = M4OSA_FALSE;
+        Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+
+        /*Initialize Exif params structure*/
+        pBasicTags.orientation = M4COMMON_kOrientationUnknown;
+
+        /**
+        Pan&zoom params*/
+        if(M4OSA_TRUE == pC->m_pPto3GPPparams->isPanZoom)
+        {
+            /*Save ratio values, they can be reused if the new ratios are 0*/
+            tempPanzoomXa = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXa;
+            tempPanzoomXb = (M4OSA_UInt8)pC->m_pPto3GPPparams->PanZoomXb;
+#if 0
+            /**
+             * Check size of output JPEG is compatible with pan & zoom parameters
+               First, check final (b) parameters */
+            if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftXb > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad final Pan & Zoom settings !!!\
+                    New final Zoom ratio is: %d", (100 - pC->m_pPto3GPPparams->PanZoomTopleftXb));
+                /* We do not change the topleft parameter as it may correspond to a precise area
+                of the picture -> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXb;
+            }
+
+            if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftYb > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad final Pan & Zoom settings \
+                    !!! New final Zoom ratio is: %d",
+                    (100 - pC->m_pPto3GPPparams->PanZoomTopleftYb));
+                /* We do not change the topleft parameter as it may correspond to a
+                precise area of the picture -> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYb;
+            }
+
+            /**
+             * Then, check initial (a) parameters */
+            if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftXa > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad initial Pan & Zoom settings !!! \
+                    New initial Zoom ratio is: %d",(100 - pC->m_pPto3GPPparams->PanZoomTopleftXa));
+                /* We do not change the topleft parameter as it may correspond to a precise
+                area of the picture-> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXa;
+            }
+
+            if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftYa > 100 )
+            {
+                M4OSA_TRACE1_1("WARNING : Bad initial Pan & Zoom settings !!! New initial\
+                     Zoom ratio is: %d", (100 - pC->m_pPto3GPPparams->PanZoomTopleftYa));
+                /* We do not change the topleft parameter as it may correspond to a precise
+                area of the picture-> only the zoom ratio is modified */
+                pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYa;
+            }
+#endif
+            /*Check that the ratio is not 0*/
+            /*Check (a) parameters*/
+            if(pC->m_pPto3GPPparams->PanZoomXa == 0)
+            {
+                M4OSA_UInt8 maxRatio = 0;
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXa >=
+                     pC->m_pPto3GPPparams->PanZoomTopleftYa)
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (a)
+                    parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXa + tempPanzoomXa - 100;
+                    }
+                }
+                else
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (a)
+                     parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYa + tempPanzoomXa - 100;
+                    }
+                }
+                /*Modify the (a) parameters:*/
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXa >= maxRatio)
+                {
+                    /*The (a) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXa -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (a) topleft parameter to 0 but the ratio will be also further
+                    modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXa = 0;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomTopleftYa >= maxRatio)
+                {
+                    /*The (a) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYa -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (a) topleft parameter to 0 but the ratio will be also further
+                     modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYa = 0;
+                }
+                /*The new ratio is the original one*/
+                pC->m_pPto3GPPparams->PanZoomXa = tempPanzoomXa;
+                if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftXa > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (a) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXa;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomXa + pC->m_pPto3GPPparams->PanZoomTopleftYa > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (a) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXa = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYa;
+                }
+            }
+            /*Check (b) parameters*/
+            if(pC->m_pPto3GPPparams->PanZoomXb == 0)
+            {
+                M4OSA_UInt8 maxRatio = 0;
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXb >=
+                     pC->m_pPto3GPPparams->PanZoomTopleftYb)
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (b)
+                     parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftXb + tempPanzoomXb - 100;
+                    }
+                }
+                else
+                {
+                    /*The ratio is 0, that means the area of the picture defined with (b)
+                     parameters is bigger than the image size*/
+                    if(pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb > 100)
+                    {
+                        /*The oversize is maxRatio*/
+                        maxRatio = pC->m_pPto3GPPparams->PanZoomTopleftYb + tempPanzoomXb - 100;
+                    }
+                }
+                /*Modify the (b) parameters:*/
+                if(pC->m_pPto3GPPparams->PanZoomTopleftXb >= maxRatio)
+                {
+                    /*The (b) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXb -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (b) topleft parameter to 0 but the ratio will be also further
+                     modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftXb = 0;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomTopleftYb >= maxRatio)
+                {
+                    /*The (b) topleft parameters can be moved to keep the same area size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYb -= maxRatio;
+                }
+                else
+                {
+                    /*Move the (b) topleft parameter to 0 but the ratio will be also further
+                    modified to match the image size*/
+                    pC->m_pPto3GPPparams->PanZoomTopleftYb = 0;
+                }
+                /*The new ratio is the original one*/
+                pC->m_pPto3GPPparams->PanZoomXb = tempPanzoomXb;
+                if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftXb > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (b) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftXb;
+                }
+                if(pC->m_pPto3GPPparams->PanZoomXb + pC->m_pPto3GPPparams->PanZoomTopleftYb > 100)
+                {
+                    /*Change the ratio if the area of the picture defined with (b) parameters is
+                    bigger than the image size*/
+                    pC->m_pPto3GPPparams->PanZoomXb = 100 - pC->m_pPto3GPPparams->PanZoomTopleftYb;
+                }
+            }
+
+            /**
+             * Computes AIR parameters */
+/*        Params.m_inputCoord.m_x = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
+            (pC->m_pPto3GPPparams->PanZoomTopleftXa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftXb \
+                - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+        Params.m_inputCoord.m_y = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
+            (pC->m_pPto3GPPparams->PanZoomTopleftYa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomTopleftYb\
+                 - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+
+        Params.m_inputSize.m_width = (M4OSA_UInt32)(pC->m_pDecodedPlane->u_width *
+            (pC->m_pPto3GPPparams->PanZoomXa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+
+        Params.m_inputSize.m_height =  (M4OSA_UInt32)(pC->m_pDecodedPlane->u_height *
+            (pC->m_pPto3GPPparams->PanZoomXa +
+            (M4OSA_Int16)((pC->m_pPto3GPPparams->PanZoomXb - pC->m_pPto3GPPparams->PanZoomXa) *
+            pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100;
+ */
+            Params.m_inputCoord.m_x = (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
+                (pC->m_pPto3GPPparams->PanZoomTopleftXa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftXb\
+                     - pC->m_pPto3GPPparams->PanZoomTopleftXa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+            Params.m_inputCoord.m_y =
+                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
+                (pC->m_pPto3GPPparams->PanZoomTopleftYa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomTopleftYb\
+                     - pC->m_pPto3GPPparams->PanZoomTopleftYa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+
+            Params.m_inputSize.m_width =
+                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_width *
+                (pC->m_pPto3GPPparams->PanZoomXa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb\
+                     - pC->m_pPto3GPPparams->PanZoomXa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+
+            Params.m_inputSize.m_height =
+                 (M4OSA_UInt32)((((M4OSA_Double)pC->m_pDecodedPlane->u_height *
+                (pC->m_pPto3GPPparams->PanZoomXa +
+                (M4OSA_Double)((M4OSA_Double)(pC->m_pPto3GPPparams->PanZoomXb \
+                    - pC->m_pPto3GPPparams->PanZoomXa) *
+                pC->m_ImageCounter) / (M4OSA_Double)pC->m_NbImage)) / 100) + 0.5);
+
+
+            if((Params.m_inputSize.m_width + Params.m_inputCoord.m_x)\
+                 > pC->m_pDecodedPlane->u_width)
+            {
+                Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width \
+                    - Params.m_inputCoord.m_x;
+            }
+
+            if((Params.m_inputSize.m_height + Params.m_inputCoord.m_y)\
+                 > pC->m_pDecodedPlane->u_height)
+            {
+                Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height\
+                     - Params.m_inputCoord.m_y;
+            }
+
+
+
+            Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+            Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+        }
+
+
+
+    /**
+        Picture rendering: Black borders*/
+
+        if(pC->m_mediaRendering == M4xVSS_kBlackBorders)
+        {
+            M4OSA_memset((M4OSA_MemAddr8)pImagePlanes[0].pac_data,
+                (pImagePlanes[0].u_height*pImagePlanes[0].u_stride),Y_PLANE_BORDER_VALUE);
+            M4OSA_memset((M4OSA_MemAddr8)pImagePlanes[1].pac_data,
+                (pImagePlanes[1].u_height*pImagePlanes[1].u_stride),U_PLANE_BORDER_VALUE);
+            M4OSA_memset((M4OSA_MemAddr8)pImagePlanes[2].pac_data,
+                (pImagePlanes[2].u_height*pImagePlanes[2].u_stride),V_PLANE_BORDER_VALUE);
+
+            /**
+            First without pan&zoom*/
+            if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+            {
+                switch(pBasicTags.orientation)
+                {
+                default:
+                case M4COMMON_kOrientationUnknown:
+                    Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+                case M4COMMON_kOrientationTopLeft:
+                case M4COMMON_kOrientationTopRight:
+                case M4COMMON_kOrientationBottomRight:
+                case M4COMMON_kOrientationBottomLeft:
+                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
+                         /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
+                         //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+                    {
+                        /*it is height so black borders will be on the top and on the bottom side*/
+                        Params.m_outputSize.m_width = pImagePlanes->u_width;
+                        Params.m_outputSize.m_height =
+                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height \
+                                * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
+                        /*number of lines at the top*/
+                        pImagePlanes[0].u_topleft =
+                            (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
+                        pImagePlanes[0].u_height = Params.m_outputSize.m_height;
+                        pImagePlanes[1].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[1].u_stride;
+                        pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
+                        pImagePlanes[2].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                -(Params.m_outputSize.m_height>>1)))>>1)*pImagePlanes[2].u_stride;
+                        pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
+                    }
+                    else
+                    {
+                        /*it is width so black borders will be on the left and right side*/
+                        Params.m_outputSize.m_height = pImagePlanes->u_height;
+                        Params.m_outputSize.m_width =
+                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+                                * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
+
+                        pImagePlanes[0].u_topleft =
+                            (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                -Params.m_outputSize.m_width)>>1));
+                        pImagePlanes[0].u_width = Params.m_outputSize.m_width;
+                        pImagePlanes[1].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                -(Params.m_outputSize.m_width>>1)))>>1);
+                        pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
+                        pImagePlanes[2].u_topleft =
+                             (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                -(Params.m_outputSize.m_width>>1)))>>1);
+                        pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
+                    }
+                    break;
+                case M4COMMON_kOrientationLeftTop:
+                case M4COMMON_kOrientationLeftBottom:
+                case M4COMMON_kOrientationRightTop:
+                case M4COMMON_kOrientationRightBottom:
+                        if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+                             /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
+                             //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
+                        {
+                            /*it is height so black borders will be on the top and on
+                             the bottom side*/
+                            Params.m_outputSize.m_height = pImagePlanes->u_width;
+                            Params.m_outputSize.m_width =
+                                 (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+                                    * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_height);
+                            /*number of lines at the top*/
+                            pImagePlanes[0].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                    -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
+                            pImagePlanes[0].u_height = Params.m_outputSize.m_width;
+                            pImagePlanes[1].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                    -(Params.m_outputSize.m_width>>1)))>>1)\
+                                        *pImagePlanes[1].u_stride)+1;
+                            pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
+                            pImagePlanes[2].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                    -(Params.m_outputSize.m_width>>1)))>>1)\
+                                        *pImagePlanes[2].u_stride)+1;
+                            pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
+                        }
+                        else
+                        {
+                            /*it is width so black borders will be on the left and right side*/
+                            Params.m_outputSize.m_width = pImagePlanes->u_height;
+                            Params.m_outputSize.m_height =
+                                 (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
+                                     * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_width);
+
+                            pImagePlanes[0].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                    -Params.m_outputSize.m_height))>>1))+1;
+                            pImagePlanes[0].u_width = Params.m_outputSize.m_height;
+                            pImagePlanes[1].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
+                            pImagePlanes[2].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
+                        }
+                    break;
+                }
+            }
+
+            /**
+            Secondly with pan&zoom*/
+            else
+            {
+                switch(pBasicTags.orientation)
+                {
+                default:
+                case M4COMMON_kOrientationUnknown:
+                    Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+                case M4COMMON_kOrientationTopLeft:
+                case M4COMMON_kOrientationTopRight:
+                case M4COMMON_kOrientationBottomRight:
+                case M4COMMON_kOrientationBottomLeft:
+                    /*NO ROTATION*/
+                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_width)\
+                         /pC->m_pDecodedPlane->u_width) <= pImagePlanes->u_height)
+                            //Params.m_inputSize.m_height < Params.m_inputSize.m_width)
+                    {
+                        /*Black borders will be on the top and bottom of the output video*/
+                        /*Maximum output height if the input image aspect ratio is kept and if
+                        the output width is the screen width*/
+                        M4OSA_UInt32 tempOutputSizeHeight =
+                            (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height\
+                                 * pImagePlanes->u_width) /pC->m_pDecodedPlane->u_width);
+                        M4OSA_UInt32 tempInputSizeHeightMax = 0;
+                        M4OSA_UInt32 tempFinalInputHeight = 0;
+                        /*The output width is the screen width*/
+                        Params.m_outputSize.m_width = pImagePlanes->u_width;
+                        tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
+
+                        /*Maximum input height according to the maximum output height
+                        (proportional to the maximum output height)*/
+                        tempInputSizeHeightMax = (pImagePlanes->u_height\
+                            *Params.m_inputSize.m_height)/tempOutputSizeHeight;
+                        tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
+
+                        /*Check if the maximum possible input height is contained into the
+                        input image height*/
+                        if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_height)
+                        {
+                            /*The maximum possible input height is contained in the input
+                            image height,
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR height will be the maximum possible*/
+                            if(((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
+                                 <= Params.m_inputCoord.m_y
+                                && ((tempInputSizeHeightMax - Params.m_inputSize.m_height)>>1)\
+                                     <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y\
+                                         + Params.m_inputSize.m_height))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on the
+                                top and bottom side*/
+                                Params.m_inputCoord.m_y -= ((tempInputSizeHeightMax \
+                                    - Params.m_inputSize.m_height)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
+                                -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
+                            {
+                                /*There is not enough place above the input pan zoom area to
+                                extend it symmetrically,
+                                so extend it to the maximum on the top*/
+                                Params.m_inputCoord.m_y = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place below the input pan zoom area to
+                                extend it symmetrically,
+                                so extend it to the maximum on the bottom*/
+                                Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height \
+                                    - tempInputSizeHeightMax;
+                            }
+                            /*The input height of the AIR is the maximum possible height*/
+                            Params.m_inputSize.m_height = tempInputSizeHeightMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input height is greater than the input
+                            image height,
+                            that means black borders are necessary to keep aspect ratio
+                            The input height of the AIR is all the input image height*/
+                            Params.m_outputSize.m_height =
+                                (tempOutputSizeHeight*pC->m_pDecodedPlane->u_height)\
+                                    /Params.m_inputSize.m_height;
+                            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+                            Params.m_inputCoord.m_y = 0;
+                            Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+                            pImagePlanes[0].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                    -Params.m_outputSize.m_height)>>1))*pImagePlanes[0].u_stride;
+                            pImagePlanes[0].u_height = Params.m_outputSize.m_height;
+                            pImagePlanes[1].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                    -(Params.m_outputSize.m_height>>1)))>>1)\
+                                        *pImagePlanes[1].u_stride);
+                            pImagePlanes[1].u_height = Params.m_outputSize.m_height>>1;
+                            pImagePlanes[2].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                    -(Params.m_outputSize.m_height>>1)))>>1)\
+                                        *pImagePlanes[2].u_stride);
+                            pImagePlanes[2].u_height = Params.m_outputSize.m_height>>1;
+                        }
+                    }
+                    else
+                    {
+                        /*Black borders will be on the left and right side of the output video*/
+                        /*Maximum output width if the input image aspect ratio is kept and if the
+                         output height is the screen height*/
+                        M4OSA_UInt32 tempOutputSizeWidth =
+                             (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width \
+                                * pImagePlanes->u_height) /pC->m_pDecodedPlane->u_height);
+                        M4OSA_UInt32 tempInputSizeWidthMax = 0;
+                        M4OSA_UInt32 tempFinalInputWidth = 0;
+                        /*The output height is the screen height*/
+                        Params.m_outputSize.m_height = pImagePlanes->u_height;
+                        tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
+
+                        /*Maximum input width according to the maximum output width
+                        (proportional to the maximum output width)*/
+                        tempInputSizeWidthMax =
+                             (pImagePlanes->u_width*Params.m_inputSize.m_width)\
+                                /tempOutputSizeWidth;
+                        tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
+
+                        /*Check if the maximum possible input width is contained into the input
+                         image width*/
+                        if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_width)
+                        {
+                            /*The maximum possible input width is contained in the input
+                            image width,
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR width will be the maximum possible*/
+                            if(((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1) \
+                                <= Params.m_inputCoord.m_x
+                                && ((tempInputSizeWidthMax - Params.m_inputSize.m_width)>>1)\
+                                     <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
+                                        + Params.m_inputSize.m_width))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on the
+                                     right and left side*/
+                                Params.m_inputCoord.m_x -= ((tempInputSizeWidthMax\
+                                     - Params.m_inputSize.m_width)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
+                                -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
+                            {
+                                /*There is not enough place above the input pan zoom area to
+                                    extend it symmetrically,
+                                so extend it to the maximum on the left*/
+                                Params.m_inputCoord.m_x = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place below the input pan zoom area
+                                    to extend it symmetrically,
+                                so extend it to the maximum on the right*/
+                                Params.m_inputCoord.m_x = pC->m_pDecodedPlane->u_width \
+                                    - tempInputSizeWidthMax;
+                            }
+                            /*The input width of the AIR is the maximum possible width*/
+                            Params.m_inputSize.m_width = tempInputSizeWidthMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input width is greater than the input
+                            image width,
+                            that means black borders are necessary to keep aspect ratio
+                            The input width of the AIR is all the input image width*/
+                            Params.m_outputSize.m_width =\
+                                 (tempOutputSizeWidth*pC->m_pDecodedPlane->u_width)\
+                                    /Params.m_inputSize.m_width;
+                            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+                            Params.m_inputCoord.m_x = 0;
+                            Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+                            pImagePlanes[0].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                    -Params.m_outputSize.m_width)>>1));
+                            pImagePlanes[0].u_width = Params.m_outputSize.m_width;
+                            pImagePlanes[1].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                    -(Params.m_outputSize.m_width>>1)))>>1);
+                            pImagePlanes[1].u_width = Params.m_outputSize.m_width>>1;
+                            pImagePlanes[2].u_topleft =
+                                 (M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                    -(Params.m_outputSize.m_width>>1)))>>1);
+                            pImagePlanes[2].u_width = Params.m_outputSize.m_width>>1;
+                        }
+                    }
+                    break;
+                case M4COMMON_kOrientationLeftTop:
+                case M4COMMON_kOrientationLeftBottom:
+                case M4COMMON_kOrientationRightTop:
+                case M4COMMON_kOrientationRightBottom:
+                    /*ROTATION*/
+                    if((M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+                         /pC->m_pDecodedPlane->u_height) < pImagePlanes->u_height)
+                         //Params.m_inputSize.m_height > Params.m_inputSize.m_width)
+                    {
+                        /*Black borders will be on the left and right side of the output video*/
+                        /*Maximum output height if the input image aspect ratio is kept and if
+                        the output height is the screen width*/
+                        M4OSA_UInt32 tempOutputSizeHeight =
+                        (M4OSA_UInt32)((pC->m_pDecodedPlane->u_width * pImagePlanes->u_width)\
+                             /pC->m_pDecodedPlane->u_height);
+                        M4OSA_UInt32 tempInputSizeHeightMax = 0;
+                        M4OSA_UInt32 tempFinalInputHeight = 0;
+                        /*The output width is the screen height*/
+                        Params.m_outputSize.m_height = pImagePlanes->u_width;
+                        Params.m_outputSize.m_width= pImagePlanes->u_height;
+                        tempOutputSizeHeight = (tempOutputSizeHeight>>1)<<1;
+
+                        /*Maximum input height according to the maximum output height
+                             (proportional to the maximum output height)*/
+                        tempInputSizeHeightMax =
+                            (pImagePlanes->u_height*Params.m_inputSize.m_width)\
+                                /tempOutputSizeHeight;
+                        tempInputSizeHeightMax = (tempInputSizeHeightMax>>1)<<1;
+
+                        /*Check if the maximum possible input height is contained into the
+                             input image width (rotation included)*/
+                        if(tempInputSizeHeightMax <= pC->m_pDecodedPlane->u_width)
+                        {
+                            /*The maximum possible input height is contained in the input
+                            image width (rotation included),
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR width will be the maximum possible*/
+                            if(((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1) \
+                                <= Params.m_inputCoord.m_x
+                                && ((tempInputSizeHeightMax - Params.m_inputSize.m_width)>>1)\
+                                     <= pC->m_pDecodedPlane->u_width -(Params.m_inputCoord.m_x \
+                                        + Params.m_inputSize.m_width))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on the
+                                 right and left side*/
+                                Params.m_inputCoord.m_x -= ((tempInputSizeHeightMax \
+                                    - Params.m_inputSize.m_width)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_x < pC->m_pDecodedPlane->u_width\
+                                -(Params.m_inputCoord.m_x + Params.m_inputSize.m_width))
+                            {
+                                /*There is not enough place on the left of the input pan
+                                zoom area to extend it symmetrically,
+                                so extend it to the maximum on the left*/
+                                Params.m_inputCoord.m_x = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place on the right of the input pan zoom
+                                 area to extend it symmetrically,
+                                so extend it to the maximum on the right*/
+                                Params.m_inputCoord.m_x =
+                                     pC->m_pDecodedPlane->u_width - tempInputSizeHeightMax;
+                            }
+                            /*The input width of the AIR is the maximum possible width*/
+                            Params.m_inputSize.m_width = tempInputSizeHeightMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input height is greater than the input
+                            image width (rotation included),
+                            that means black borders are necessary to keep aspect ratio
+                            The input width of the AIR is all the input image width*/
+                            Params.m_outputSize.m_width =
+                            (tempOutputSizeHeight*pC->m_pDecodedPlane->u_width)\
+                                /Params.m_inputSize.m_width;
+                            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+                            Params.m_inputCoord.m_x = 0;
+                            Params.m_inputSize.m_width = pC->m_pDecodedPlane->u_width;
+                            pImagePlanes[0].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_height\
+                                    -Params.m_outputSize.m_width))>>1)*pImagePlanes[0].u_stride)+1;
+                            pImagePlanes[0].u_height = Params.m_outputSize.m_width;
+                            pImagePlanes[1].u_topleft =
+                            ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_height\
+                                -(Params.m_outputSize.m_width>>1)))>>1)\
+                                    *pImagePlanes[1].u_stride)+1;
+                            pImagePlanes[1].u_height = Params.m_outputSize.m_width>>1;
+                            pImagePlanes[2].u_topleft =
+                            ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_height\
+                                -(Params.m_outputSize.m_width>>1)))>>1)\
+                                    *pImagePlanes[2].u_stride)+1;
+                            pImagePlanes[2].u_height = Params.m_outputSize.m_width>>1;
+                        }
+                    }
+                    else
+                    {
+                        /*Black borders will be on the top and bottom of the output video*/
+                        /*Maximum output width if the input image aspect ratio is kept and if
+                         the output width is the screen height*/
+                        M4OSA_UInt32 tempOutputSizeWidth =
+                        (M4OSA_UInt32)((pC->m_pDecodedPlane->u_height * pImagePlanes->u_height)\
+                             /pC->m_pDecodedPlane->u_width);
+                        M4OSA_UInt32 tempInputSizeWidthMax = 0;
+                        M4OSA_UInt32 tempFinalInputWidth = 0, tempFinalOutputWidth = 0;
+                        /*The output height is the screen width*/
+                        Params.m_outputSize.m_width = pImagePlanes->u_height;
+                        Params.m_outputSize.m_height= pImagePlanes->u_width;
+                        tempOutputSizeWidth = (tempOutputSizeWidth>>1)<<1;
+
+                        /*Maximum input width according to the maximum output width
+                         (proportional to the maximum output width)*/
+                        tempInputSizeWidthMax =
+                        (pImagePlanes->u_width*Params.m_inputSize.m_height)/tempOutputSizeWidth;
+                        tempInputSizeWidthMax = (tempInputSizeWidthMax>>1)<<1;
+
+                        /*Check if the maximum possible input width is contained into the input
+                         image height (rotation included)*/
+                        if(tempInputSizeWidthMax <= pC->m_pDecodedPlane->u_height)
+                        {
+                            /*The maximum possible input width is contained in the input
+                             image height (rotation included),
+                            that means no black borders, the input pan zoom area will be extended
+                            so that the input AIR height will be the maximum possible*/
+                            if(((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1) \
+                                <= Params.m_inputCoord.m_y
+                                && ((tempInputSizeWidthMax - Params.m_inputSize.m_height)>>1)\
+                                     <= pC->m_pDecodedPlane->u_height -(Params.m_inputCoord.m_y \
+                                        + Params.m_inputSize.m_height))
+                            {
+                                /*The input pan zoom area can be extended symmetrically on
+                                the right and left side*/
+                                Params.m_inputCoord.m_y -= ((tempInputSizeWidthMax \
+                                    - Params.m_inputSize.m_height)>>1);
+                            }
+                            else if(Params.m_inputCoord.m_y < pC->m_pDecodedPlane->u_height\
+                                -(Params.m_inputCoord.m_y + Params.m_inputSize.m_height))
+                            {
+                                /*There is not enough place on the top of the input pan zoom
+                                area to extend it symmetrically,
+                                so extend it to the maximum on the top*/
+                                Params.m_inputCoord.m_y = 0;
+                            }
+                            else
+                            {
+                                /*There is not enough place on the bottom of the input pan zoom
+                                 area to extend it symmetrically,
+                                so extend it to the maximum on the bottom*/
+                                Params.m_inputCoord.m_y = pC->m_pDecodedPlane->u_height\
+                                     - tempInputSizeWidthMax;
+                            }
+                            /*The input height of the AIR is the maximum possible height*/
+                            Params.m_inputSize.m_height = tempInputSizeWidthMax;
+                        }
+                        else
+                        {
+                            /*The maximum possible input width is greater than the input\
+                             image height (rotation included),
+                            that means black borders are necessary to keep aspect ratio
+                            The input height of the AIR is all the input image height*/
+                            Params.m_outputSize.m_height =
+                                (tempOutputSizeWidth*pC->m_pDecodedPlane->u_height)\
+                                    /Params.m_inputSize.m_height;
+                            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+                            Params.m_inputCoord.m_y = 0;
+                            Params.m_inputSize.m_height = pC->m_pDecodedPlane->u_height;
+                            pImagePlanes[0].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[0].u_width\
+                                    -Params.m_outputSize.m_height))>>1))+1;
+                            pImagePlanes[0].u_width = Params.m_outputSize.m_height;
+                            pImagePlanes[1].u_topleft =
+                                ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[1].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[1].u_width = Params.m_outputSize.m_height>>1;
+                            pImagePlanes[2].u_topleft =
+                                 ((M4xVSS_ABS((M4OSA_Int32)(pImagePlanes[2].u_width\
+                                    -(Params.m_outputSize.m_height>>1)))>>1))+1;
+                            pImagePlanes[2].u_width = Params.m_outputSize.m_height>>1;
+                        }
+                    }
+                    break;
+                }
+            }
+
+            /*Width and height have to be even*/
+            Params.m_outputSize.m_width = (Params.m_outputSize.m_width>>1)<<1;
+            Params.m_outputSize.m_height = (Params.m_outputSize.m_height>>1)<<1;
+            Params.m_inputSize.m_width = (Params.m_inputSize.m_width>>1)<<1;
+            Params.m_inputSize.m_height = (Params.m_inputSize.m_height>>1)<<1;
+            pImagePlanes[0].u_width = (pImagePlanes[0].u_width>>1)<<1;
+            pImagePlanes[1].u_width = (pImagePlanes[1].u_width>>1)<<1;
+            pImagePlanes[2].u_width = (pImagePlanes[2].u_width>>1)<<1;
+            pImagePlanes[0].u_height = (pImagePlanes[0].u_height>>1)<<1;
+            pImagePlanes[1].u_height = (pImagePlanes[1].u_height>>1)<<1;
+            pImagePlanes[2].u_height = (pImagePlanes[2].u_height>>1)<<1;
+
+            /*Check that values are coherent*/
+            if(Params.m_inputSize.m_height == Params.m_outputSize.m_height)
+            {
+                Params.m_inputSize.m_width = Params.m_outputSize.m_width;
+            }
+            else if(Params.m_inputSize.m_width == Params.m_outputSize.m_width)
+            {
+                Params.m_inputSize.m_height = Params.m_outputSize.m_height;
+            }
+        }
+
+        /**
+        Picture rendering: Resizing and Cropping*/
+        if(pC->m_mediaRendering != M4xVSS_kBlackBorders)
+        {
+            switch(pBasicTags.orientation)
+            {
+            default:
+            case M4COMMON_kOrientationUnknown:
+                Params.m_outputOrientation = M4COMMON_kOrientationTopLeft;
+            case M4COMMON_kOrientationTopLeft:
+            case M4COMMON_kOrientationTopRight:
+            case M4COMMON_kOrientationBottomRight:
+            case M4COMMON_kOrientationBottomLeft:
+                Params.m_outputSize.m_height = pImagePlanes->u_height;
+                Params.m_outputSize.m_width = pImagePlanes->u_width;
+                break;
+            case M4COMMON_kOrientationLeftTop:
+            case M4COMMON_kOrientationLeftBottom:
+            case M4COMMON_kOrientationRightTop:
+            case M4COMMON_kOrientationRightBottom:
+                Params.m_outputSize.m_height = pImagePlanes->u_width;
+                Params.m_outputSize.m_width = pImagePlanes->u_height;
+                break;
+            }
+        }
+
+        /**
+        Picture rendering: Cropping*/
+        if(pC->m_mediaRendering == M4xVSS_kCropping)
+        {
+            if((Params.m_outputSize.m_height * Params.m_inputSize.m_width)\
+                 /Params.m_outputSize.m_width<Params.m_inputSize.m_height)
+            {
+                M4OSA_UInt32 tempHeight = Params.m_inputSize.m_height;
+                /*height will be cropped*/
+                Params.m_inputSize.m_height =  (M4OSA_UInt32)((Params.m_outputSize.m_height \
+                    * Params.m_inputSize.m_width) /Params.m_outputSize.m_width);
+                Params.m_inputSize.m_height =  (Params.m_inputSize.m_height>>1)<<1;
+                if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+                {
+                    Params.m_inputCoord.m_y = (M4OSA_Int32)((M4OSA_Int32)\
+                        ((pC->m_pDecodedPlane->u_height - Params.m_inputSize.m_height))>>1);
+                }
+                else
+                {
+                    Params.m_inputCoord.m_y += (M4OSA_Int32)((M4OSA_Int32)\
+                        ((tempHeight - Params.m_inputSize.m_height))>>1);
+                }
+            }
+            else
+            {
+                M4OSA_UInt32 tempWidth= Params.m_inputSize.m_width;
+                /*width will be cropped*/
+                Params.m_inputSize.m_width =  (M4OSA_UInt32)((Params.m_outputSize.m_width \
+                    * Params.m_inputSize.m_height) /Params.m_outputSize.m_height);
+                Params.m_inputSize.m_width =  (Params.m_inputSize.m_width>>1)<<1;
+                if(M4OSA_FALSE == pC->m_pPto3GPPparams->isPanZoom)
+                {
+                    Params.m_inputCoord.m_x = (M4OSA_Int32)((M4OSA_Int32)\
+                        ((pC->m_pDecodedPlane->u_width - Params.m_inputSize.m_width))>>1);
+                }
+                else
+                {
+                    Params.m_inputCoord.m_x += (M4OSA_Int32)\
+                        (((M4OSA_Int32)(tempWidth - Params.m_inputSize.m_width))>>1);
+                }
+            }
+        }
+
+
+
+        /**
+         * Call AIR functions */
+        if(M4OSA_NULL == pC->m_air_context)
+        {
+            err = M4AIR_create(&pC->m_air_context, M4AIR_kYUV420P);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+                M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+                pC->m_pDecodedPlane = M4OSA_NULL;
+                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+                     Error when initializing AIR: 0x%x", err);
+                return err;
+            }
+        }
+
+        err = M4AIR_configure(pC->m_air_context, &Params);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct:\
+                 Error when configuring AIR: 0x%x", err);
+            M4AIR_cleanUp(pC->m_air_context);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+            pC->m_pDecodedPlane = M4OSA_NULL;
+            return err;
+        }
+
+        err = M4AIR_get(pC->m_air_context, pC->m_pDecodedPlane, pImagePlanes);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when getting AIR plane: 0x%x", err);
+            M4AIR_cleanUp(pC->m_air_context);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+            pC->m_pDecodedPlane = M4OSA_NULL;
+            return err;
+        }
+        pImagePlanes[0] = pImagePlanes1;
+        pImagePlanes[1] = pImagePlanes2;
+        pImagePlanes[2] = pImagePlanes3;
+    }
+
+
+    /**
+     * Increment the image counter */
+    pC->m_ImageCounter++;
+
+    /**
+     * Check end of sequence */
+    last_frame_flag    = (pC->m_ImageCounter >= pC->m_NbImage);
+
+    /**
+     * Keep the picture duration */
+    *pPictureDuration = pC->m_timeDuration;
+
+    if (1 == last_frame_flag)
+    {
+        if(M4OSA_NULL != pC->m_air_context)
+        {
+            err = M4AIR_cleanUp(pC->m_air_context);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_PictureCallbackFct: Error when cleaning AIR: 0x%x", err);
+                return err;
+            }
+        }
+        if(M4OSA_NULL != pC->m_pDecodedPlane)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane[0].pac_data);
+            M4OSA_free((M4OSA_MemAddr32)pC->m_pDecodedPlane);
+            pC->m_pDecodedPlane = M4OSA_NULL;
+        }
+        return M4PTO3GPP_WAR_LAST_PICTURE;
+    }
+
+    M4OSA_TRACE1_0("M4xVSS_PictureCallbackFct: Leaving ");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
+ * @brief    This function initializes Pto3GPP with the given parameters
+ * @note    The "Pictures to 3GPP" parameters are given by the internal xVSS
+ *            context. This context contains a pointer on the current element
+ *            of the chained list of Pto3GPP parameters.
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4PTO3GPP_WAR_LAST_PICTURE: The returned image is the last one
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStartConvertPictureTo3gp(M4OSA_Context pContext)
+{
+    /************************************************************************/
+    /* Definitions to generate dummy AMR file used to add AMR silence in files generated
+     by Pto3GPP */
+    #define M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE     13
+    /* This constant is defined in M4VSS3GPP_InternalConfig.h */
+    extern const M4OSA_UInt8\
+         M4VSS3GPP_AMR_AU_SILENCE_FRAME_048[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE];
+
+    /* AMR silent frame used to compute dummy AMR silence file */
+    #define M4VSS3GPP_AMR_HEADER_SIZE 6
+    const M4OSA_UInt8 M4VSS3GPP_AMR_HEADER[M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE] =
+    { 0x23, 0x21, 0x41, 0x4d, 0x52, 0x0a };
+    /************************************************************************/
+
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4PTO3GPP_Context pM4PTO3GPP_Ctxt = M4OSA_NULL;
+    M4PTO3GPP_Params Params;
+     M4xVSS_PictureCallbackCtxt*    pCallBackCtxt;
+    M4OSA_Bool cmpResult=M4OSA_FALSE;
+    M4OSA_Context pDummyAMRFile;
+    M4OSA_Char out_amr[64];
+    /*UTF conversion support*/
+    M4OSA_Char* pDecodedPath = M4OSA_NULL;
+    M4OSA_UInt32 i;
+
+    /**
+     * Create a M4PTO3GPP instance */
+    err = M4PTO3GPP_Init( &pM4PTO3GPP_Ctxt, xVSS_context->pFileReadPtr,
+         xVSS_context->pFileWritePtr);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Init returned %ld\n",err);
+        return err;
+    }
+
+    /* replay recorded external encoder registrations on the PTO3GPP */
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalEncs[i].registered)
+        {
+            err = M4PTO3GPP_RegisterExternalVideoEncoder(pM4PTO3GPP_Ctxt, i,
+                    xVSS_context->registeredExternalEncs[i].pEncoderInterface,
+                    xVSS_context->registeredExternalEncs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
+                     M4PTO3GPP_registerExternalVideoEncoder() returns 0x%x!", err);
+                M4PTO3GPP_CleanUp(pM4PTO3GPP_Ctxt);
+                return err;
+            }
+        }
+    }
+
+    pCallBackCtxt = (M4xVSS_PictureCallbackCtxt*)M4OSA_malloc(sizeof(M4xVSS_PictureCallbackCtxt),
+         M4VS,(M4OSA_Char *) "Pto3gpp callback struct");
+    if(pCallBackCtxt == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalStartConvertPictureTo3gp");
+        return M4ERR_ALLOC;
+    }
+
+    Params.OutputVideoFrameSize = xVSS_context->pSettings->xVSS.outputVideoSize;
+    Params.OutputVideoFormat = xVSS_context->pSettings->xVSS.outputVideoFormat;
+
+    /**
+     * Generate "dummy" amr file containing silence in temporary folder */
+    M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, 64);
+    M4OSA_chrNCat(out_amr, (M4OSA_Char *)"dummy.amr\0", 10);
+
+    /**
+     * UTF conversion: convert the temporary path into the customer format*/
+    pDecodedPath = out_amr;
+
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 length = 0;
+        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
+             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalStartConvertPictureTo3gp:\
+                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+            return err;
+        }
+        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+    }
+
+    /**
+    * End of the conversion, now use the converted path*/
+
+    err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, pDecodedPath, M4OSA_kFileWrite);
+
+    /*Commented because of the use of the UTF conversion see above*/
+/*    err = xVSS_context->pFileWritePtr->openWrite(&pDummyAMRFile, out_amr, M4OSA_kFileWrite);
+ */
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't open output dummy amr file %s,\
+             error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    err =  xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
+        (M4OSA_Int8*)M4VSS3GPP_AMR_HEADER, M4VSS3GPP_AMR_HEADER_SIZE);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: Can't write output dummy amr file %s,\
+             error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    err =  xVSS_context->pFileWritePtr->writeData(pDummyAMRFile,
+         (M4OSA_Int8*)M4VSS3GPP_AMR_AU_SILENCE_FRAME_048, M4VSS3GPP_AMR_AU_SILENCE_FRAME_048_SIZE);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
+            Can't write output dummy amr file %s, error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    err =  xVSS_context->pFileWritePtr->closeWrite(pDummyAMRFile);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("M4xVSS_internalConvertPictureTo3gp: \
+            Can't close output dummy amr file %s, error: 0x%x\n",out_amr, err);
+        return err;
+    }
+
+    /**
+     * Fill parameters for Pto3GPP with the parameters contained in the current element of the
+     * Pto3GPP parameters chained list and with default parameters */
+/*+ New Encoder bitrates */
+    if(xVSS_context->pSettings->xVSS.outputVideoBitrate == 0) {
+        Params.OutputVideoBitrate    = M4VIDEOEDITING_kVARIABLE_KBPS;
+    }
+    else {
+          Params.OutputVideoBitrate = xVSS_context->pSettings->xVSS.outputVideoBitrate;
+    }
+    M4OSA_TRACE1_1("M4xVSS_internalStartConvertPicTo3GP: video bitrate = %d",
+        Params.OutputVideoBitrate);
+/*- New Encoder bitrates */
+    Params.OutputFileMaxSize    = M4PTO3GPP_kUNLIMITED;
+    Params.pPictureCallbackFct    = M4xVSS_PictureCallbackFct;
+    Params.pPictureCallbackCtxt    = pCallBackCtxt;
+    /*FB: change to use the converted path (UTF conversion) see the conversion above*/
+    /*Fix :- Adding Audio Track in Image as input :AudioTarckFile Setting to NULL */
+    Params.pInputAudioTrackFile    = M4OSA_NULL;//(M4OSA_Void*)pDecodedPath;//out_amr;
+    Params.AudioPaddingMode        = M4PTO3GPP_kAudioPaddingMode_Loop;
+    Params.AudioFileFormat        = M4VIDEOEDITING_kFileType_AMR;
+    Params.pOutput3gppFile        = xVSS_context->pPTo3GPPcurrentParams->pFileOut;
+    Params.pTemporaryFile        = xVSS_context->pPTo3GPPcurrentParams->pFileTemp;
+    /*+PR No:  blrnxpsw#223*/
+    /*Increasing frequency of Frame, calculating Nos of Frame = duration /FPS */
+    /*Other changes made is @ M4xVSS_API.c @ line 3841 in M4xVSS_SendCommand*/
+    /*If case check for PanZoom removed */
+    Params.NbVideoFrames            = (M4OSA_UInt32)
+        (xVSS_context->pPTo3GPPcurrentParams->duration \
+            / xVSS_context->pPTo3GPPcurrentParams->framerate); /* */
+    pCallBackCtxt->m_timeDuration    = xVSS_context->pPTo3GPPcurrentParams->framerate;
+    /*-PR No:  blrnxpsw#223*/
+    pCallBackCtxt->m_ImageCounter    = 0;
+    pCallBackCtxt->m_FileIn            = xVSS_context->pPTo3GPPcurrentParams->pFileIn;
+    pCallBackCtxt->m_NbImage        = Params.NbVideoFrames;
+    pCallBackCtxt->m_pFileReadPtr    = xVSS_context->pFileReadPtr;
+    pCallBackCtxt->m_pDecodedPlane    = M4OSA_NULL;
+    pCallBackCtxt->m_pPto3GPPparams    = xVSS_context->pPTo3GPPcurrentParams;
+    pCallBackCtxt->m_air_context    = M4OSA_NULL;
+    pCallBackCtxt->m_mediaRendering = xVSS_context->pPTo3GPPcurrentParams->MediaRendering;
+
+    /**
+     * Set the input and output files */
+    err = M4PTO3GPP_Open(pM4PTO3GPP_Ctxt, &Params);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Open returned: 0x%x\n",err);
+        if(pCallBackCtxt != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)pCallBackCtxt);
+            pCallBackCtxt = M4OSA_NULL;
+        }
+        M4PTO3GPP_CleanUp(pM4PTO3GPP_Ctxt);
+        return err;
+    }
+
+    /**
+     * Save context to be able to call Pto3GPP step function in M4xVSS_step function */
+    xVSS_context->pM4PTO3GPP_Ctxt = pM4PTO3GPP_Ctxt;
+    xVSS_context->pCallBackCtxt = pCallBackCtxt;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
+ * @brief    This function cleans up Pto3GPP
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalStopConvertPictureTo3gp(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4OSA_Char out_amr[64];
+    /*UTF conversion support*/
+    M4OSA_Char* pDecodedPath = M4OSA_NULL;
+
+    /**
+    * Free the PTO3GPP callback context */
+    if(M4OSA_NULL != xVSS_context->pCallBackCtxt)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCallBackCtxt);
+        xVSS_context->pCallBackCtxt = M4OSA_NULL;
+    }
+
+    /**
+     * Finalize the output file */
+    err = M4PTO3GPP_Close(xVSS_context->pM4PTO3GPP_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_Close returned 0x%x\n",err);
+        M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
+        return err;
+    }
+
+    /**
+     * Free this M4PTO3GPP instance */
+    err = M4PTO3GPP_CleanUp(xVSS_context->pM4PTO3GPP_Ctxt);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4PTO3GPP_CleanUp returned 0x%x\n",err);
+        return err;
+    }
+
+    /**
+     * Remove dummy.amr file */
+    M4OSA_chrNCopy(out_amr, xVSS_context->pTempPath, 64);
+    M4OSA_chrNCat(out_amr, (M4OSA_Char *)"dummy.amr\0", 10);
+
+    /**
+     * UTF conversion: convert the temporary path into the customer format*/
+    pDecodedPath = out_amr;
+
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 length = 0;
+        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) out_amr,
+             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalStopConvertPictureTo3gp:\
+                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+            return err;
+        }
+        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+    }
+    /**
+    * End of the conversion, now use the decoded path*/
+    M4OSA_fileExtraDelete(pDecodedPath);
+
+    /*Commented because of the use of the UTF conversion*/
+/*    M4OSA_fileExtraDelete(out_amr);
+ */
+
+    xVSS_context->pM4PTO3GPP_Ctxt = M4OSA_NULL;
+    xVSS_context->pCallBackCtxt = M4OSA_NULL;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+ * @brief    This function converts an RGB565 plane to YUV420 planar
+ * @note    It is used only for framing effect
+ *            It allocates output YUV planes
+ * @param    framingCtx    (IN) The framing struct containing input RGB565 plane
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertRGBtoYUV(M4xVSS_FramingStruct* framingCtx)
+{
+    M4OSA_ERR err;
+
+    /**
+     * Allocate output YUV planes */
+    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_malloc(3*sizeof(M4VIFI_ImagePlane),
+         M4VS, (M4OSA_Char *)"M4xVSS_internalConvertRGBtoYUV: Output plane YUV");
+    if(framingCtx->FramingYuv == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[0].u_width = framingCtx->FramingRgb->u_width;
+    framingCtx->FramingYuv[0].u_height = framingCtx->FramingRgb->u_height;
+    framingCtx->FramingYuv[0].u_topleft = 0;
+    framingCtx->FramingYuv[0].u_stride = framingCtx->FramingRgb->u_width;
+    framingCtx->FramingYuv[0].pac_data =
+         (M4VIFI_UInt8*)M4OSA_malloc((framingCtx->FramingYuv[0].u_width\
+            *framingCtx->FramingYuv[0].u_height*3)>>1, M4VS, (M4OSA_Char *)\
+                "Alloc for the Convertion output YUV");;
+    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertRGBtoYUV");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[1].u_width = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[1].u_height = (framingCtx->FramingRgb->u_height)>>1;
+    framingCtx->FramingYuv[1].u_topleft = 0;
+    framingCtx->FramingYuv[1].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[1].pac_data = framingCtx->FramingYuv[0].pac_data \
+        + framingCtx->FramingYuv[0].u_width * framingCtx->FramingYuv[0].u_height;
+    framingCtx->FramingYuv[2].u_width = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[2].u_height = (framingCtx->FramingRgb->u_height)>>1;
+    framingCtx->FramingYuv[2].u_topleft = 0;
+    framingCtx->FramingYuv[2].u_stride = (framingCtx->FramingRgb->u_width)>>1;
+    framingCtx->FramingYuv[2].pac_data = framingCtx->FramingYuv[1].pac_data \
+        + framingCtx->FramingYuv[1].u_width * framingCtx->FramingYuv[1].u_height;
+
+    /**
+     * Convert input RGB 565 to YUV 420 to be able to merge it with output video in framing
+      effect */
+    err = M4VIFI_xVSS_RGB565toYUV420(M4OSA_NULL, framingCtx->FramingRgb, framingCtx->FramingYuv);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalConvertRGBtoYUV:\
+             error when converting from RGB to YUV: 0x%x\n", err);
+    }
+
+    framingCtx->duration = 0;
+    framingCtx->previousClipTime = -1;
+    framingCtx->previewOffsetClipTime = -1;
+
+    /**
+     * Only one element in the chained list (no animated image with RGB buffer...) */
+    framingCtx->pCurrent = framingCtx;
+    framingCtx->pNext = framingCtx;
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_internalSetPlaneTransparent(M4OSA_UInt8* planeIn, M4OSA_UInt32 size)
+{
+    M4OSA_UInt32 i;
+    M4OSA_UInt8* plane = planeIn;
+    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+
+    for(i=0; i<(size>>1); i++)
+    {
+        *plane++ = transparent1;
+        *plane++ = transparent2;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype M4OSA_ERR M4xVSS_internalConvertARBG888toYUV420_FrammingEffect(M4OSA_Context pContext,
+ *                                                M4VSS3GPP_EffectSettings* pEffect,
+ *                                                M4xVSS_FramingStruct* framingCtx,
+                                                  M4VIDEOEDITING_VideoFrameSize OutputVideoResolution)
+ *
+ * @brief    This function converts ARGB8888 input file  to YUV420 whenused for framming effect
+ * @note    The input ARGB8888 file path is contained in the pEffect structure
+ *            If the ARGB8888 must be resized to fit output video size, this function
+ *            will do it.
+ * @param    pContext    (IN) The integrator own context
+ * @param    pEffect        (IN) The effect structure containing all informations on
+ *                        the file to decode, resizing ...
+ * @param    framingCtx    (IN/OUT) Structure in which the output RGB will be stored
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+
+
+M4OSA_ERR M4xVSS_internalConvertARGB888toYUV420_FrammingEffect(M4OSA_Context pContext,
+                                                               M4VSS3GPP_EffectSettings* pEffect,
+                                                               M4xVSS_FramingStruct* framingCtx,
+                                                               M4VIDEOEDITING_VideoFrameSize\
+                                                                 OutputVideoResolution)
+{
+    M4OSA_ERR err;
+    M4OSA_Context pARGBIn;
+    M4OSA_UInt32 file_size;
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_UInt32 width, height, width_out, height_out;
+    M4OSA_Void* pFile = pEffect->xVSS.pFramingFilePath;
+    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+    /*UTF conversion support*/
+    M4OSA_Char* pDecodedPath = M4OSA_NULL;
+    M4OSA_UInt32 i = 0,j = 0;
+    M4VIFI_ImagePlane rgbPlane;
+    M4OSA_UInt32 frameSize_argb=(framingCtx->width * framingCtx->height * 4);
+    M4OSA_UInt32 frameSize = (framingCtx->width * framingCtx->height * 3); //Size of RGB888 data
+    M4OSA_UInt8 *pTmpData = (M4OSA_UInt8*) M4OSA_malloc(frameSize_argb, M4VS, (M4OSA_Char*)\
+        "Image argb data");
+    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: Entering ");
+    M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect width and height %d %d ",
+        framingCtx->width,framingCtx->height);
+    if(pTmpData == M4OSA_NULL) {
+        M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+        return M4ERR_ALLOC;
+    }
+    /**
+     * UTF conversion: convert the file path into the customer format*/
+    pDecodedPath = pFile;
+
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+            && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 length = 0;
+        err = M4xVSS_internalConvertFromUTF8(xVSS_context, (M4OSA_Void*) pFile,
+             (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer, &length);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalDecodePNG:\
+                 M4xVSS_internalConvertFromUTF8 returns err: 0x%x",err);
+            return err;
+        }
+        pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+    }
+
+    /**
+    * End of the conversion, now use the decoded path*/
+
+     /* Open input ARGB8888 file and store it into memory */
+    err = xVSS_context->pFileReadPtr->openRead(&pARGBIn, pDecodedPath, M4OSA_kFileRead);
+
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("Can't open input ARGB8888 file %s, error: 0x%x\n",pFile, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        return err;
+    }
+
+    err = xVSS_context->pFileReadPtr->readData(pARGBIn,(M4OSA_MemAddr8)pTmpData, &frameSize_argb);
+    if(err != M4NO_ERROR)
+    {
+        xVSS_context->pFileReadPtr->closeRead(pARGBIn);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+    }
+
+
+    err =  xVSS_context->pFileReadPtr->closeRead(pARGBIn);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_2("Can't close input png file %s, error: 0x%x\n",pFile, err);
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;
+        return err;
+    }
+
+    /* rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(frameSize, M4VS,\
+        (M4OSA_Char*)"Image clip RGB888 data"); */
+    /* temp fix for crashing happening in filter :  allocation 2memory for 2 more width */
+    rgbPlane.pac_data = (M4VIFI_UInt8*)M4OSA_malloc(((frameSize)+ (2 * framingCtx->width)),
+         M4VS, (M4OSA_Char*)"Image clip RGB888 data");
+    if(rgbPlane.pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Failed to allocate memory for Image clip");
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        return M4ERR_ALLOC;
+    }
+
+        rgbPlane.u_height = (( framingCtx->height+1)>>1)<<1;;
+        rgbPlane.u_width = (( framingCtx->width+1)>>1)<<1;;
+        rgbPlane.u_stride = rgbPlane.u_width*3;
+        rgbPlane.u_topleft = 0;
+
+    M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+          Remove the alpha channel  ");
+      /** Remove the alpha channel */
+    for (i=0, j = 0; i < frameSize_argb; i++) {
+        if ((i % 4) == 0) continue;
+        rgbPlane.pac_data[j] = pTmpData[i];
+        j++;
+    }
+
+    M4OSA_free((M4OSA_MemAddr32)pTmpData);
+    /**
+     * Check if output sizes are odd */
+    if(rgbPlane.u_height % 2 != 0)
+    {
+
+        M4VIFI_UInt8* output_pac_data = rgbPlane.pac_data;
+        M4OSA_UInt32 i;
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+             output height is odd  ");
+        output_pac_data +=rgbPlane.u_width * rgbPlane.u_height*3;
+        for(i=0;i<rgbPlane.u_width;i++)
+        {
+            *output_pac_data++ = transparent1;
+            *output_pac_data++ = transparent2;
+        }
+
+        /**
+         * We just add a white line to the PNG that will be transparent */
+        rgbPlane.u_height++;
+    }
+    if(rgbPlane.u_width % 2 != 0)
+    {
+
+        /**
+         * We add a new column of white (=transparent), but we need to parse all RGB lines ... */
+        M4OSA_UInt32 i;
+        M4VIFI_UInt8* newRGBpac_data;
+        M4VIFI_UInt8* output_pac_data, *input_pac_data;
+
+        rgbPlane.u_width++;
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
+             output width is odd  ");
+        /**
+         * We need to allocate a new RGB output buffer in which all decoded data
+          + white line will be copied */
+        newRGBpac_data = (M4VIFI_UInt8*)M4OSA_malloc(rgbPlane.u_height*rgbPlane.u_width*3\
+            *sizeof(M4VIFI_UInt8), M4VS, (M4OSA_Char *)"New Framing GIF Output pac_data RGB");
+        if(newRGBpac_data == M4OSA_NULL)
+        {
+            M4OSA_TRACE1_0("Allocation error in \
+                M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+            /**
+             * Destroy SPS instance */
+            //M4SPS_destroy(pSPSContext);
+            return M4ERR_ALLOC;
+        }
+
+        output_pac_data= newRGBpac_data;
+        input_pac_data = rgbPlane.pac_data;
+
+        for(i=0;i<rgbPlane.u_height;i++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)output_pac_data, (M4OSA_MemAddr8)input_pac_data,
+                 (rgbPlane.u_width-1)*3);
+            output_pac_data += ((rgbPlane.u_width-1)*3);
+            /* Put the pixel to transparency color */
+            *output_pac_data++ = transparent1;
+            *output_pac_data++ = transparent2;
+            input_pac_data += ((rgbPlane.u_width-1)*3);
+        }
+
+        rgbPlane.pac_data = newRGBpac_data;
+    }
+
+    /**
+     * Initialize chained list parameters */
+    framingCtx->duration = 0;
+    framingCtx->previousClipTime = -1;
+    framingCtx->previewOffsetClipTime = -1;
+
+    /**
+     * Only one element in the chained list (no animated image ...) */
+    framingCtx->pCurrent = framingCtx;
+    framingCtx->pNext = framingCtx;
+
+    /**
+     * Get output width/height */
+     switch(OutputVideoResolution)
+    //switch(xVSS_context->pSettings->xVSS.outputVideoSize)
+    {
+    case M4VIDEOEDITING_kSQCIF:
+        width_out = 128;
+        height_out = 96;
+        break;
+    case M4VIDEOEDITING_kQQVGA:
+        width_out = 160;
+        height_out = 120;
+        break;
+    case M4VIDEOEDITING_kQCIF:
+        width_out = 176;
+        height_out = 144;
+        break;
+    case M4VIDEOEDITING_kQVGA:
+        width_out = 320;
+        height_out = 240;
+        break;
+    case M4VIDEOEDITING_kCIF:
+        width_out = 352;
+        height_out = 288;
+        break;
+    case M4VIDEOEDITING_kVGA:
+        width_out = 640;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_kWVGA:
+        width_out = 800;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_kNTSC:
+        width_out = 720;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_k640_360:
+        width_out = 640;
+        height_out = 360;
+        break;
+    case M4VIDEOEDITING_k854_480:
+        // StageFright encoders require %16 resolution
+        width_out = M4ENCODER_854_480_Width;
+        height_out = 480;
+        break;
+    case M4VIDEOEDITING_kHD1280:
+        width_out = 1280;
+        height_out = 720;
+        break;
+    case M4VIDEOEDITING_kHD1080:
+        // StageFright encoders require %16 resolution
+        width_out = M4ENCODER_HD1080_Width;
+        height_out = 720;
+        break;
+    case M4VIDEOEDITING_kHD960:
+        width_out = 960;
+        height_out = 720;
+        break;
+
+    /**
+     * If output video size is not given, we take QCIF size,
+     * should not happen, because already done in M4xVSS_sendCommand */
+    default:
+        width_out = 176;
+        height_out = 144;
+        break;
+    }
+
+
+        /**
+     * Allocate output planes structures */
+    framingCtx->FramingRgb = (M4VIFI_ImagePlane*)M4OSA_malloc(sizeof(M4VIFI_ImagePlane), M4VS,
+         (M4OSA_Char *)"Framing Output plane RGB");
+    if(framingCtx->FramingRgb == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+        M4OSA_free((M4OSA_MemAddr32)pTmpData);
+        pTmpData = M4OSA_NULL;NULL;
+        return M4ERR_ALLOC;
+    }
+    /**
+     * Resize RGB if needed */
+    if((pEffect->xVSS.bResize) &&
+         (rgbPlane.u_width != width_out || rgbPlane.u_height != height_out))
+    {
+        width = width_out;
+        height = height_out;
+
+        M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect: \
+             New Width and height %d %d  ",width,height);
+
+        framingCtx->FramingRgb->u_height = height_out;
+        framingCtx->FramingRgb->u_width = width_out;
+        framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*3;
+        framingCtx->FramingRgb->u_topleft = 0;
+
+        framingCtx->FramingRgb->pac_data =
+             (M4VIFI_UInt8*)M4OSA_malloc(framingCtx->FramingRgb->u_height*framingCtx->\
+                FramingRgb->u_width*3*sizeof(M4VIFI_UInt8), M4VS,
+                  (M4OSA_Char *)"Framing Output pac_data RGB");
+        if(framingCtx->FramingRgb->pac_data == M4OSA_NULL)
+        {
+            M4OSA_TRACE1_0("Allocation error in \
+                M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+            M4OSA_free((M4OSA_MemAddr32)pTmpData);
+            pTmpData = M4OSA_NULL;NULL;
+            return M4ERR_ALLOC;
+        }
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:  Resizing Needed ");
+        M4OSA_TRACE1_2("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+              rgbPlane.u_height & rgbPlane.u_width %d %d",rgbPlane.u_height,rgbPlane.u_width);
+        err = M4VIFI_ResizeBilinearRGB888toRGB888(M4OSA_NULL, &rgbPlane,framingCtx->FramingRgb);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect :\
+                when resizing RGB plane: 0x%x\n", err);
+            return err;
+        }
+
+        if(rgbPlane.pac_data != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)rgbPlane.pac_data);
+            rgbPlane.pac_data = M4OSA_NULL;
+
+        }
+
+    }
+    else
+    {
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+              Resizing Not Needed ");
+        width = framingCtx->width;
+        height =    framingCtx->height;
+        framingCtx->FramingRgb->u_height = height;
+        framingCtx->FramingRgb->u_width = width;
+        framingCtx->FramingRgb->u_stride = framingCtx->FramingRgb->u_width*3;
+        framingCtx->FramingRgb->u_topleft = 0;
+        framingCtx->FramingRgb->pac_data = rgbPlane.pac_data;
+    }
+
+
+    if(pEffect->xVSS.bResize)
+    {
+        /**
+         * Force topleft to 0 for pure framing effect */
+        framingCtx->topleft_x = 0;
+        framingCtx->topleft_y = 0;
+    }
+
+
+
+    /**
+     * Convert  RGB output to YUV 420 to be able to merge it with output video in framing
+     effect */
+    framingCtx->FramingYuv = (M4VIFI_ImagePlane*)M4OSA_malloc(3*sizeof(M4VIFI_ImagePlane), M4VS,
+         (M4OSA_Char *)"Framing Output plane YUV");
+    if(framingCtx->FramingYuv == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[0].u_width = ((width+1)>>1)<<1;
+    framingCtx->FramingYuv[0].u_height = ((height+1)>>1)<<1;
+    framingCtx->FramingYuv[0].u_topleft = 0;
+    framingCtx->FramingYuv[0].u_stride = ((width+1)>>1)<<1;
+     framingCtx->FramingYuv[0].pac_data = (M4VIFI_UInt8*)M4OSA_malloc
+        ((framingCtx->FramingYuv[0].u_width*framingCtx->FramingYuv[0].u_height*3)>>1, M4VS,
+            (M4OSA_Char *)"Alloc for the output YUV");;
+    if(framingCtx->FramingYuv[0].pac_data == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertARGB888toYUV420_FrammingEffect");
+        return M4ERR_ALLOC;
+    }
+    framingCtx->FramingYuv[1].u_width = (((width+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[1].u_height = (((height+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[1].u_topleft = 0;
+    framingCtx->FramingYuv[1].u_stride = (((width+1)>>1)<<1)>>1;
+
+    framingCtx->FramingYuv[1].pac_data = (M4VIFI_UInt8*)M4OSA_malloc\
+        (((framingCtx->FramingYuv[0].u_width)/2*(framingCtx->FramingYuv[0].u_height)/2), M4VS,
+             (M4OSA_Char *)"Alloc for the output YUV");;
+
+    framingCtx->FramingYuv[2].u_width = (((width+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[2].u_height = (((height+1)>>1)<<1)>>1;
+    framingCtx->FramingYuv[2].u_topleft = 0;
+    framingCtx->FramingYuv[2].u_stride = (((width+1)>>1)<<1)>>1;
+
+    framingCtx->FramingYuv[2].pac_data = (M4VIFI_UInt8*)M4OSA_malloc
+        (((framingCtx->FramingYuv[0].u_width)/2*(framingCtx->FramingYuv[0].u_height)/2), M4VS,
+            (M4OSA_Char *)"Alloc for the  output YUV");;
+
+
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:\
+              convert RGB to YUV ");
+
+    err = M4VIFI_RGB888toYUV420(M4OSA_NULL, framingCtx->FramingRgb,  framingCtx->FramingYuv);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("SPS png: error when converting from RGB to YUV: 0x%x\n", err);
+    }
+
+        M4OSA_TRACE1_0("M4xVSS_internalConvertARGB888toYUV420_FrammingEffect:  Leaving ");
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
+ *
+ * @brief    This function prepares VSS for editing
+ * @note    It also set special xVSS effect as external effects for the VSS
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGenerateEditedFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4VSS3GPP_EditContext pVssCtxt;
+    M4OSA_UInt32 i,j;
+    M4OSA_ERR err;
+
+    /**
+     * Create a VSS 3GPP edition instance */
+    err = M4VSS3GPP_editInit( &pVssCtxt, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile: M4VSS3GPP_editInit returned 0x%x\n",
+            err);
+        M4VSS3GPP_editCleanUp(pVssCtxt);
+        return err;
+    }
+
+#ifdef M4VSS_ENABLE_EXTERNAL_DECODERS
+    /* replay recorded external decoder registrations on the VSS3GPP */
+    for (i=0; i<M4VD_kVideoType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalDecs[i].registered)
+        {
+            err = M4VSS3GPP_editRegisterExternalVideoDecoder(pVssCtxt, i,
+                    xVSS_context->registeredExternalDecs[i].pDecoderInterface,
+                    xVSS_context->registeredExternalDecs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile: \
+                    M4VSS3GPP_editRegisterExternalVideoDecoder() returns 0x%x!", err);
+                M4VSS3GPP_editCleanUp(pVssCtxt);
+                return err;
+            }
+        }
+    }
+#endif /* M4VSS_ENABLE_EXTERNAL_DECODERS */
+
+    /* replay recorded external encoder registrations on the VSS3GPP */
+    for (i=0; i<M4VE_kEncoderType_NB; i++)
+    {
+        if (xVSS_context->registeredExternalEncs[i].registered)
+        {
+            err = M4VSS3GPP_editRegisterExternalVideoEncoder(pVssCtxt, i,
+                    xVSS_context->registeredExternalEncs[i].pEncoderInterface,
+                    xVSS_context->registeredExternalEncs[i].pUserData);
+            if (M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
+                     M4VSS3GPP_editRegisterExternalVideoEncoder() returns 0x%x!", err);
+                M4VSS3GPP_editCleanUp(pVssCtxt);
+                return err;
+            }
+        }
+    }
+
+    /* In case of MMS use case, we fill directly into the VSS context the targeted bitrate */
+    if(xVSS_context->targetedBitrate != 0)
+    {
+        M4VSS3GPP_InternalEditContext* pVSSContext = (M4VSS3GPP_InternalEditContext*)pVssCtxt;
+
+        pVSSContext->bIsMMS = M4OSA_TRUE;
+        pVSSContext->uiMMSVideoBitrate = xVSS_context->targetedBitrate;
+        pVSSContext->MMSvideoFramerate = xVSS_context->pSettings->videoFrameRate;
+    }
+
+    /*Warning: since the adding of the UTF conversion, pSettings has been changed in the next
+    part in  pCurrentEditSettings (there is a specific current editing structure for the saving,
+     as for the preview)*/
+
+    /**
+     * Set the external video effect functions, for saving mode (to be moved to
+      M4xVSS_saveStart() ?)*/
+    for (i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+    {
+        for (j=0; j<xVSS_context->pCurrentEditSettings->nbEffects; j++)
+        {
+            if (M4xVSS_kVideoEffectType_BlackAndWhite ==
+            xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_BlackAndWhite;
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set
+                 during sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Pink ==
+                xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Pink; /**< we don't
+                // use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context,
+                  it is already set during sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Green ==
+                 xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                    M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                    // (M4OSA_Void*)M4xVSS_kVideoEffectType_Green;
+                     /**< we don't use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set during
+                  sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Sepia ==
+                 xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Sepia;
+                /**< we don't use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Fifties ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectFifties;
+                /**
+                 * We do not need to set the framing context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Negative ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_Negative;
+                 /**< we don't use any function context */
+                /*commented FB*/
+                /**
+                 * We do not need to set the color context, it is already set during
+                  sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Framing ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectFraming;
+                /**
+                 * We do not need to set the framing context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_ZoomIn ==
+             xVSS_context->pSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectZoom;
+                xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
+                 (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomIn; /**< we don't use any
+                 function context */
+            }
+            if (M4xVSS_kVideoEffectType_ZoomOut ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectZoom;
+                xVSS_context->pCurrentEditSettings->Effects[j].pExtVideoEffectFctCtxt =
+                 (M4OSA_Void*)M4xVSS_kVideoEffectType_ZoomOut; /**< we don't use any
+                 function context */
+            }
+            if (M4xVSS_kVideoEffectType_ColorRGB16 ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
+                /**< we don't use any function context */
+                /**
+                 * We do not need to set the color context, it is already set during
+                 sendCommand function */
+            }
+            if (M4xVSS_kVideoEffectType_Gradient ==
+             xVSS_context->pCurrentEditSettings->Effects[j].VideoEffectType)
+            {
+                xVSS_context->pCurrentEditSettings->Effects[j].ExtVideoEffectFct =
+                 M4VSS3GPP_externalVideoEffectColor;
+                //xVSS_context->pSettings->Effects[j].pExtVideoEffectFctCtxt =
+                // (M4OSA_Void*)M4xVSS_kVideoEffectType_ColorRGB16;
+                /**< we don't use any function context */
+                /**
+                 * We do not need to set the color context, it is already set during
+                 sendCommand function */
+            }
+
+        }
+    }
+
+    /**
+     * Open the VSS 3GPP */
+    err = M4VSS3GPP_editOpen(pVssCtxt, xVSS_context->pCurrentEditSettings);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGenerateEditedFile:\
+             M4VSS3GPP_editOpen returned 0x%x\n",err);
+        M4VSS3GPP_editCleanUp(pVssCtxt);
+        return err;
+    }
+
+    /**
+     * Save VSS context to be able to close / free VSS later */
+    xVSS_context->pCurrentEditContext = pVssCtxt;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up VSS
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalCloseEditedFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4VSS3GPP_EditContext pVssCtxt = xVSS_context->pCurrentEditContext;
+    M4OSA_ERR err;
+
+    if(xVSS_context->pCurrentEditContext != M4OSA_NULL)
+    {
+        /**
+         * Close the VSS 3GPP */
+        err = M4VSS3GPP_editClose(pVssCtxt);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile:\
+                 M4VSS3GPP_editClose returned 0x%x\n",err);
+            M4VSS3GPP_editCleanUp(pVssCtxt);
+            return err;
+        }
+
+        /**
+         * Free this VSS3GPP edition instance */
+        err = M4VSS3GPP_editCleanUp(pVssCtxt);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalCloseEditedFile: \
+                M4VSS3GPP_editCleanUp returned 0x%x\n",err);
+            return err;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
+ *
+ * @brief    This function prepares VSS for audio mixing
+ * @note    It takes its parameters from the BGM settings in the xVSS internal context
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ * @return    M4ERR_ALLOC: Allocation error (no more memory)
+ ******************************************************************************
+ */
+/***
+ * FB: the function has been modified since the structure used for the saving is now the
+ *  pCurrentEditSettings and not the pSettings
+ * This change has been added for the UTF support
+ * All the "xVSS_context->pSettings" has been replaced by "xVSS_context->pCurrentEditSettings"
+ ***/
+M4OSA_ERR M4xVSS_internalGenerateAudioMixFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4VSS3GPP_AudioMixingSettings* pAudioMixSettings;
+    M4VSS3GPP_AudioMixingContext pAudioMixingCtxt;
+    M4OSA_ERR err;
+    M4VIDEOEDITING_ClipProperties fileProperties;
+
+    /**
+     * Allocate audio mixing settings structure and fill it with BGM parameters */
+    pAudioMixSettings = (M4VSS3GPP_AudioMixingSettings*)M4OSA_malloc
+        (sizeof(M4VSS3GPP_AudioMixingSettings), M4VS, (M4OSA_Char *)"pAudioMixSettings");
+    if(pAudioMixSettings == M4OSA_NULL)
+    {
+        M4OSA_TRACE1_0("Allocation error in M4xVSS_internalGenerateAudioMixFile");
+        return M4ERR_ALLOC;
+    }
+
+    if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType ==
+         M4VIDEOEDITING_kFileType_3GPP)
+    {
+        err = M4xVSS_internalGetProperties((M4OSA_Context)xVSS_context,
+             (M4OSA_Char*)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile,
+                 &fileProperties);
+        if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
+                 impossible to retrieve audio BGM properties ->\
+                     reencoding audio background music", err);
+            fileProperties.AudioStreamType =
+                 xVSS_context->pCurrentEditSettings->xVSS.outputAudioFormat+1;
+                  /* To force BGM encoding */
+        }
+    }
+
+    pAudioMixSettings->bRemoveOriginal = M4OSA_FALSE;
+    pAudioMixSettings->AddedAudioFileType =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->FileType;
+    pAudioMixSettings->pAddedAudioTrackFile =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile;
+    pAudioMixSettings->uiAddVolume =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume;
+
+    pAudioMixSettings->outputAudioFormat = xVSS_context->pSettings->xVSS.outputAudioFormat;
+    pAudioMixSettings->outputASF = xVSS_context->pSettings->xVSS.outputAudioSamplFreq;
+    pAudioMixSettings->outputAudioBitrate = xVSS_context->pSettings->xVSS.outputAudioBitrate;
+    pAudioMixSettings->uiSamplingFrequency =
+     xVSS_context->pSettings->xVSS.pBGMtrack->uiSamplingFrequency;
+    pAudioMixSettings->uiNumChannels = xVSS_context->pSettings->xVSS.pBGMtrack->uiNumChannels;
+
+    pAudioMixSettings->b_DuckingNeedeed =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->b_DuckingNeedeed;
+    pAudioMixSettings->fBTVolLevel =
+     (M4OSA_Float )xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddVolume/100;
+    pAudioMixSettings->InDucking_threshold =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->InDucking_threshold;
+    pAudioMixSettings->InDucking_lowVolume =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->lowVolume/100;
+    pAudioMixSettings->fPTVolLevel =
+     (M4OSA_Float)xVSS_context->pSettings->PTVolLevel/100;
+    pAudioMixSettings->bLoop = xVSS_context->pSettings->xVSS.pBGMtrack->bLoop;
+
+    if(xVSS_context->pSettings->xVSS.bAudioMono)
+    {
+        pAudioMixSettings->outputNBChannels = 1;
+    }
+    else
+    {
+        pAudioMixSettings->outputNBChannels = 2;
+    }
+
+    /**
+     * Fill audio mix settings with BGM parameters */
+    pAudioMixSettings->uiBeginLoop =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiBeginLoop;
+    pAudioMixSettings->uiEndLoop =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiEndLoop;
+    pAudioMixSettings->uiAddCts =
+     xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->uiAddCts;
+
+    /**
+     * Output file of the audio mixer will be final file (audio mixing is the last step) */
+    pAudioMixSettings->pOutputClipFile = xVSS_context->pOutputFile;
+    pAudioMixSettings->pTemporaryFile = xVSS_context->pTemporaryFile;
+
+    /**
+     * Input file of the audio mixer is a temporary file containing all audio/video editions */
+    pAudioMixSettings->pOriginalClipFile = xVSS_context->pCurrentEditSettings->pOutputFile;
+
+    /**
+     * Save audio mixing settings pointer to be able to free it in
+     M4xVSS_internalCloseAudioMixedFile function */
+    xVSS_context->pAudioMixSettings = pAudioMixSettings;
+
+    /**
+     * Create a VSS 3GPP audio mixing instance */
+    err = M4VSS3GPP_audioMixingInit(&pAudioMixingCtxt, pAudioMixSettings,
+         xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+
+    /**
+     * Save audio mixing context to be able to call audio mixing step function in
+      M4xVSS_step function */
+    xVSS_context->pAudioMixContext = pAudioMixingCtxt;
+
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGenerateAudioMixFile:\
+             M4VSS3GPP_audioMixingInit returned 0x%x\n",err);
+        //M4VSS3GPP_audioMixingCleanUp(pAudioMixingCtxt);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up VSS for audio mixing
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalCloseAudioMixedFile(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    /**
+     * Free this VSS3GPP audio mixing instance */
+    if(xVSS_context->pAudioMixContext != M4OSA_NULL)
+    {
+        err = M4VSS3GPP_audioMixingCleanUp(xVSS_context->pAudioMixContext);
+        if (err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalCloseAudioMixedFile:\
+                 M4VSS3GPP_audioMixingCleanUp returned 0x%x\n",err);
+            return err;
+        }
+    }
+
+    /**
+     * Free VSS audio mixing settings */
+    if(xVSS_context->pAudioMixSettings != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pAudioMixSettings);
+        xVSS_context->pAudioMixSettings = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up preview edition structure used to generate
+ *            preview.3gp file given to the VPS
+ * @note    It also free the preview structure given to the VPS
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalFreePreview(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_UInt8 i;
+
+    /**
+     * Free clip/transition settings */
+    for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+    {
+        M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
+
+        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList[i]));
+        xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
+
+        /**
+         * Because there is 1 less transition than clip number */
+        if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pTransitionList[i]));
+            xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
+        }
+    }
+
+    /**
+     * Free clip/transition list */
+    if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList));
+        xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
+    }
+    if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pTransitionList));
+        xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
+    }
+
+    /**
+     * Free output preview file path */
+    if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
+    {
+        M4OSA_free(xVSS_context->pCurrentEditSettings->pOutputFile);
+        xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+    }
+
+    /**
+     * Free temporary preview file path */
+    if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
+    {
+        M4OSA_fileExtraDelete(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+        M4OSA_free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+        xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+    }
+
+    /**
+     * Free "local" BGM settings */
+    if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+    {
+        if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+        {
+            M4OSA_free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
+            xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+        }
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
+        xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
+    }
+
+    /**
+     * Free current edit settings structure */
+    if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings);
+        xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+    }
+
+    /**
+     * Free preview effects given to application */
+    if(M4OSA_NULL != xVSS_context->pPreviewSettings->Effects)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pPreviewSettings->Effects);
+        xVSS_context->pPreviewSettings->Effects = M4OSA_NULL;
+        xVSS_context->pPreviewSettings->nbEffects = 0;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up saving edition structure used to generate
+ *            output.3gp file given to the VPS
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalFreeSaving(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_UInt8 i;
+
+    if(xVSS_context->pCurrentEditSettings != M4OSA_NULL)
+    {
+        /**
+         * Free clip/transition settings */
+        for(i=0; i<xVSS_context->pCurrentEditSettings->uiClipNumber; i++)
+        {
+            M4xVSS_FreeClipSettings(xVSS_context->pCurrentEditSettings->pClipList[i]);
+
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList[i]));
+            xVSS_context->pCurrentEditSettings->pClipList[i] = M4OSA_NULL;
+
+            /**
+             * Because there is 1 less transition than clip number */
+            if(i != xVSS_context->pCurrentEditSettings->uiClipNumber-1)
+            {
+                M4OSA_free((M4OSA_MemAddr32)\
+                    (xVSS_context->pCurrentEditSettings->pTransitionList[i]));
+                xVSS_context->pCurrentEditSettings->pTransitionList[i] = M4OSA_NULL;
+            }
+        }
+
+        /**
+         * Free clip/transition list */
+        if(xVSS_context->pCurrentEditSettings->pClipList != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pClipList));
+            xVSS_context->pCurrentEditSettings->pClipList = M4OSA_NULL;
+        }
+        if(xVSS_context->pCurrentEditSettings->pTransitionList != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->pTransitionList));
+            xVSS_context->pCurrentEditSettings->pTransitionList = M4OSA_NULL;
+        }
+
+        if(xVSS_context->pCurrentEditSettings->Effects != M4OSA_NULL)
+        {
+            M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pCurrentEditSettings->Effects));
+            xVSS_context->pCurrentEditSettings->Effects = M4OSA_NULL;
+            xVSS_context->pCurrentEditSettings->nbEffects = 0;
+        }
+
+        /**
+         * Free output saving file path */
+        if(xVSS_context->pCurrentEditSettings->pOutputFile != M4OSA_NULL)
+        {
+            if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+            {
+                M4OSA_fileExtraDelete(xVSS_context->pCurrentEditSettings->pOutputFile);
+                M4OSA_free(xVSS_context->pCurrentEditSettings->pOutputFile);
+            }
+            if(xVSS_context->pOutputFile != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)xVSS_context->pOutputFile);
+                xVSS_context->pOutputFile = M4OSA_NULL;
+            }
+            xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+            xVSS_context->pCurrentEditSettings->pOutputFile = M4OSA_NULL;
+        }
+
+        /**
+         * Free temporary saving file path */
+        if(xVSS_context->pCurrentEditSettings->pTemporaryFile != M4OSA_NULL)
+        {
+            M4OSA_fileExtraDelete(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+            M4OSA_free(xVSS_context->pCurrentEditSettings->pTemporaryFile);
+            xVSS_context->pCurrentEditSettings->pTemporaryFile = M4OSA_NULL;
+        }
+
+        /**
+         * Free "local" BGM settings */
+        if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack != M4OSA_NULL)
+        {
+            if(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+            {
+                M4OSA_free(xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile);
+                xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+            }
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack);
+            xVSS_context->pCurrentEditSettings->xVSS.pBGMtrack = M4OSA_NULL;
+        }
+
+        /**
+         * Free current edit settings structure */
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pCurrentEditSettings);
+        xVSS_context->pCurrentEditSettings = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_freeSettings(M4OSA_Context pContext)
+ *
+ * @brief    This function cleans up an M4VSS3GPP_EditSettings structure
+ * @note
+ * @param    pSettings    (IN) Pointer on M4VSS3GPP_EditSettings structure to free
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_freeSettings(M4VSS3GPP_EditSettings* pSettings)
+{
+    M4OSA_UInt8 i,j;
+
+    /**
+     * For each clip ... */
+    for(i=0; i<pSettings->uiClipNumber; i++)
+    {
+        /**
+         * ... free clip settings */
+        if(pSettings->pClipList[i] != M4OSA_NULL)
+        {
+            M4xVSS_FreeClipSettings(pSettings->pClipList[i]);
+
+            M4OSA_free((M4OSA_MemAddr32)(pSettings->pClipList[i]));
+            pSettings->pClipList[i] = M4OSA_NULL;
+        }
+
+        /**
+         * ... free transition settings */
+        if(i < pSettings->uiClipNumber-1) /* Because there is 1 less transition than clip number */
+        {
+            if(pSettings->pTransitionList[i] != M4OSA_NULL)
+            {
+                switch (pSettings->pTransitionList[i]->VideoTransitionType)
+                {
+                    case M4xVSS_kVideoTransitionType_AlphaMagic:
+
+                        /**
+                         * In case of Alpha Magic transition,
+                          some extra parameters need to be freed */
+                        if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt\
+                             != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                                pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
+                                    pPlane->pac_data));
+                            ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i\
+                                ]->pExtVideoTransitionFctCtxt)->pPlane->pac_data = M4OSA_NULL;
+
+                            M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                                pSettings->pTransitionList[i]->\
+                                    pExtVideoTransitionFctCtxt)->pPlane));
+                            ((M4xVSS_internal_AlphaMagicSettings*)pSettings->pTransitionList[i]\
+                                ->pExtVideoTransitionFctCtxt)->pPlane = M4OSA_NULL;
+
+                            M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt));
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
+
+                            for(j=i+1;j<pSettings->uiClipNumber-1;j++)
+                            {
+                                if(pSettings->pTransitionList[j] != M4OSA_NULL)
+                                {
+                                    if(pSettings->pTransitionList[j]->VideoTransitionType ==
+                                     M4xVSS_kVideoTransitionType_AlphaMagic)
+                                    {
+                                        M4OSA_UInt32 pCmpResult=0;
+                                        M4OSA_chrCompare(pSettings->pTransitionList[i]->\
+                                            xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                                pAlphaFilePath,
+                                                pSettings->pTransitionList[j]->\
+                                                xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                                pAlphaFilePath, (M4OSA_Int32 *)&pCmpResult);
+                                        if(pCmpResult == 0)
+                                        {
+                                            /* Free extra internal alpha magic structure and put
+                                            it to NULL to avoid refreeing it */
+                                            M4OSA_free((M4OSA_MemAddr32)(pSettings->\
+                                                pTransitionList[j]->pExtVideoTransitionFctCtxt));
+                                            pSettings->pTransitionList[j]->\
+                                                pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                                        }
+                                    }
+                                }
+                            }
+                        }
+
+                        if(pSettings->pTransitionList[i]->\
+                            xVSS.transitionSpecific.pAlphaMagicSettings != M4OSA_NULL)
+                        {
+                            if(pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                    pAlphaFilePath != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)pSettings->\
+                                    pTransitionList[i]->\
+                                        xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                            pAlphaFilePath);
+                                pSettings->pTransitionList[i]->\
+                                    xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                        pAlphaFilePath = M4OSA_NULL;
+                            }
+                            M4OSA_free((M4OSA_MemAddr32)pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pAlphaMagicSettings);
+                            pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pAlphaMagicSettings = M4OSA_NULL;
+
+                        }
+
+                    break;
+
+
+                    case M4xVSS_kVideoTransitionType_SlideTransition:
+                        if (M4OSA_NULL != pSettings->pTransitionList[i]->\
+                            xVSS.transitionSpecific.pSlideTransitionSettings)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pSlideTransitionSettings);
+                            pSettings->pTransitionList[i]->\
+                                xVSS.transitionSpecific.pSlideTransitionSettings = M4OSA_NULL;
+                        }
+                        if(pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt));
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                        }
+                    break;
+                                        default:
+                    break;
+
+                }
+                /**
+                 * Free transition settings structure */
+                M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList[i]));
+                pSettings->pTransitionList[i] = M4OSA_NULL;
+            }
+        }
+    }
+
+    /**
+     * Free clip list */
+    if(pSettings->pClipList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(pSettings->pClipList));
+        pSettings->pClipList = M4OSA_NULL;
+    }
+
+    /**
+     * Free transition list */
+    if(pSettings->pTransitionList != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)(pSettings->pTransitionList));
+        pSettings->pTransitionList = M4OSA_NULL;
+    }
+
+    /**
+     * RC: Free effects list */
+    if(pSettings->Effects != M4OSA_NULL)
+    {
+        for(i=0; i<pSettings->nbEffects; i++)
+        {
+            /**
+             * For each clip, free framing structure if needed */
+            if(pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Framing
+                || pSettings->Effects[i].VideoEffectType == M4xVSS_kVideoEffectType_Text)
+            {
+#ifdef DECODE_GIF_ON_SAVING
+                M4xVSS_FramingContext* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+#else
+                M4xVSS_FramingStruct* framingCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+                M4xVSS_FramingStruct* framingCtx_save;
+                M4xVSS_Framing3102Struct* framingCtx_first = framingCtx;
+#endif
+
+#ifdef DECODE_GIF_ON_SAVING
+                if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non existant
+                 pointer */
+                {
+                    if(framingCtx->aFramingCtx != M4OSA_NULL)
+                    {
+                        if(pSettings->Effects[i].xVSS.pFramingBuffer == M4OSA_NULL)
+                        {
+                            if(framingCtx->aFramingCtx->FramingRgb != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                    FramingRgb->pac_data);
+                                framingCtx->aFramingCtx->FramingRgb->pac_data = M4OSA_NULL;
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->FramingRgb);
+                                framingCtx->aFramingCtx->FramingRgb = M4OSA_NULL;
+                            }
+                        }
+                        if(framingCtx->aFramingCtx->FramingYuv != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                FramingYuv[0].pac_data);
+                            framingCtx->aFramingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
+                           M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                FramingYuv[1].pac_data);
+                            framingCtx->aFramingCtx->FramingYuv[1].pac_data = M4OSA_NULL;
+                           M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->\
+                                FramingYuv[2].pac_data);
+                            framingCtx->aFramingCtx->FramingYuv[2].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx->FramingYuv);
+                            framingCtx->aFramingCtx->FramingYuv = M4OSA_NULL;
+                        }
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx);
+                        framingCtx->aFramingCtx = M4OSA_NULL;
+                    }
+                    if(framingCtx->aFramingCtx_last != M4OSA_NULL)
+                    {
+                        if(pSettings->Effects[i].xVSS.pFramingBuffer == M4OSA_NULL)
+                        {
+                            if(framingCtx->aFramingCtx_last->FramingRgb != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->\
+                                    FramingRgb->pac_data);
+                                framingCtx->aFramingCtx_last->FramingRgb->pac_data = M4OSA_NULL;
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->\
+                                    FramingRgb);
+                                framingCtx->aFramingCtx_last->FramingRgb = M4OSA_NULL;
+                            }
+                        }
+                        if(framingCtx->aFramingCtx_last->FramingYuv != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->\
+                                FramingYuv[0].pac_data);
+                            framingCtx->aFramingCtx_last->FramingYuv[0].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last->FramingYuv);
+                            framingCtx->aFramingCtx_last->FramingYuv = M4OSA_NULL;
+                        }
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->aFramingCtx_last);
+                        framingCtx->aFramingCtx_last = M4OSA_NULL;
+                    }
+                    if(framingCtx->pEffectFilePath != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->pEffectFilePath);
+                        framingCtx->pEffectFilePath = M4OSA_NULL;
+                    }
+                    /*In case there are still allocated*/
+                    if(framingCtx->pSPSContext != M4OSA_NULL)
+                    {
+                    //    M4SPS_destroy(framingCtx->pSPSContext);
+                        framingCtx->pSPSContext = M4OSA_NULL;
+#if 0
+                        if(framingCtx->inputStream.data_buffer  != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->inputStream.data_buffer);
+                            framingCtx->inputStream.data_buffer = M4OSA_NULL;
+                        }
+#endif
+                    }
+                    /*Alpha blending structure*/
+                    if(framingCtx->alphaBlendingStruct  != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx->alphaBlendingStruct);
+                        framingCtx->alphaBlendingStruct = M4OSA_NULL;
+                    }
+
+                    M4OSA_free((M4OSA_MemAddr32)framingCtx);
+                    framingCtx = M4OSA_NULL;
+                }
+#else
+                do
+                {
+                    if(framingCtx != M4OSA_NULL) /* Bugfix 1.2.0: crash, trying to free non
+                    existant pointer */
+                    {
+                        if(pSettings->Effects[i].xVSS.pFramingBuffer == M4OSA_NULL)
+                        {
+                            if(framingCtx->FramingRgb != M4OSA_NULL)
+                            {
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingRgb->pac_data);
+                                framingCtx->FramingRgb->pac_data = M4OSA_NULL;
+                                M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingRgb);
+                                framingCtx->FramingRgb = M4OSA_NULL;
+                            }
+                        }
+                        if(framingCtx->FramingYuv != M4OSA_NULL)
+                        {
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingYuv[0].pac_data);
+                            framingCtx->FramingYuv[0].pac_data = M4OSA_NULL;
+                            M4OSA_free((M4OSA_MemAddr32)framingCtx->FramingYuv);
+                            framingCtx->FramingYuv = M4OSA_NULL;
+                        }
+                        framingCtx_save = framingCtx->pNext;
+                        M4OSA_free((M4OSA_MemAddr32)framingCtx);
+                        framingCtx = M4OSA_NULL;
+                        framingCtx = framingCtx_save;
+                    }
+                    else
+                    {
+                        /*FB: bug fix P4ME00003002*/
+                        break;
+                    }
+                } while(framingCtx_first != framingCtx);
+#endif
+            }
+            else if( M4xVSS_kVideoEffectType_Fifties == pSettings->Effects[i].VideoEffectType)
+            {
+                /* Free Fifties context */
+                M4xVSS_FiftiesStruct* FiftiesCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+
+                if(FiftiesCtx != M4OSA_NULL)
+                {
+                    M4OSA_free((M4OSA_MemAddr32)FiftiesCtx);
+                    FiftiesCtx = M4OSA_NULL;
+                }
+
+            }
+            else if( M4xVSS_kVideoEffectType_ColorRGB16 == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_BlackAndWhite == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Pink == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Green == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Sepia == pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Negative== pSettings->Effects[i].VideoEffectType
+                || M4xVSS_kVideoEffectType_Gradient== pSettings->Effects[i].VideoEffectType)
+            {
+                /* Free Color context */
+                M4xVSS_ColorStruct* ColorCtx = pSettings->Effects[i].pExtVideoEffectFctCtxt;
+
+                if(ColorCtx != M4OSA_NULL)
+                {
+                    M4OSA_free((M4OSA_MemAddr32)ColorCtx);
+                    ColorCtx = M4OSA_NULL;
+                }
+            }
+
+            /* Free simple fields */
+            if(pSettings->Effects[i].xVSS.pFramingFilePath != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pSettings->Effects[i].xVSS.pFramingFilePath);
+                pSettings->Effects[i].xVSS.pFramingFilePath = M4OSA_NULL;
+            }
+            if(pSettings->Effects[i].xVSS.pFramingBuffer != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pSettings->Effects[i].xVSS.pFramingBuffer);
+                pSettings->Effects[i].xVSS.pFramingBuffer = M4OSA_NULL;
+            }
+            if(pSettings->Effects[i].xVSS.pTextBuffer != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pSettings->Effects[i].xVSS.pTextBuffer);
+                pSettings->Effects[i].xVSS.pTextBuffer = M4OSA_NULL;
+            }
+        }
+        M4OSA_free((M4OSA_MemAddr32)pSettings->Effects);
+        pSettings->Effects = M4OSA_NULL;
+    }
+
+    return M4NO_ERROR;
+}
+
+M4OSA_ERR M4xVSS_freeCommand(M4OSA_Context pContext)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+//    M4OSA_UInt8 i,j;
+
+    /* Free "local" BGM settings */
+    if(xVSS_context->pSettings->xVSS.pBGMtrack != M4OSA_NULL)
+    {
+        if(xVSS_context->pSettings->xVSS.pBGMtrack->pFile != M4OSA_NULL)
+        {
+            M4OSA_free(xVSS_context->pSettings->xVSS.pBGMtrack->pFile);
+            xVSS_context->pSettings->xVSS.pBGMtrack->pFile = M4OSA_NULL;
+        }
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->xVSS.pBGMtrack);
+        xVSS_context->pSettings->xVSS.pBGMtrack = M4OSA_NULL;
+    }
+#if 0
+    /* Parse transitions to free internal "alpha magic" settings structure */
+    /**
+     * In case there is twice or more the same Alpha Magic effect, the effect context
+     * may be freed twice or more.
+     * So, we parse all remaining transition settings to know if the context can be
+     * "re-freed", and if yes, we put its context to NULL to avoid freeing it again */
+    for(i=0; i<xVSS_context->pSettings->uiClipNumber-1; i++)
+    {
+        if(xVSS_context->pSettings->pTransitionList[i] != M4OSA_NULL)
+        {
+            switch (xVSS_context->pSettings->pTransitionList[i]->VideoTransitionType)
+            {
+                case M4xVSS_kVideoTransitionType_AlphaMagic:
+                    /**
+                     * In case of Alpha Magic transition, some extra parameters need to be freed */
+                    if(xVSS_context->pSettings->pTransitionList[i]->\
+                        pExtVideoTransitionFctCtxt != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                            xVSS_context->pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt)->pPlane->pac_data));
+                        ((M4xVSS_internal_AlphaMagicSettings*)xVSS_context->\
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
+                                pPlane->pac_data = M4OSA_NULL;
+
+                        M4OSA_free((M4OSA_MemAddr32)(((M4xVSS_internal_AlphaMagicSettings*)\
+                            xVSS_context->pSettings->pTransitionList[i]->\
+                                pExtVideoTransitionFctCtxt)->pPlane));
+                        ((M4xVSS_internal_AlphaMagicSettings*)xVSS_context->\
+                            pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt)->\
+                                pPlane = M4OSA_NULL;
+
+                        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->\
+                            pTransitionList[i]->pExtVideoTransitionFctCtxt));
+                        xVSS_context->pSettings->pTransitionList[i]->pExtVideoTransitionFctCtxt
+                             = M4OSA_NULL;
+
+                        for(j=i+1;j<xVSS_context->pSettings->uiClipNumber-1;j++)
+                        {
+                            if(xVSS_context->pSettings->pTransitionList[j] != M4OSA_NULL)
+                            {
+                                if(xVSS_context->pSettings->pTransitionList[j]->\
+                                    VideoTransitionType == M4xVSS_kVideoTransitionType_AlphaMagic)
+                                {
+                                    M4OSA_UInt32 pCmpResult=0;
+                                    M4OSA_chrCompare(xVSS_context->pSettings->pTransitionList[i]->\
+                                        xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                            pAlphaFilePath,
+                                        xVSS_context->pSettings->pTransitionList[j]->\
+                                            xVSS.transitionSpecific.pAlphaMagicSettings->\
+                                                pAlphaFilePath, &pCmpResult);
+                                    if(pCmpResult == 0)
+                                        {
+                                        /* Free extra internal alpha magic structure and put it
+                                         to NULL to avoid refreeing it */
+                                        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->\
+                                            pTransitionList[j]->pExtVideoTransitionFctCtxt));
+                                        xVSS_context->pSettings->pTransitionList[j]->\
+                                            pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                                    }
+                                }
+                            }
+                        }
+                    }
+                break;
+
+                case M4xVSS_kVideoTransitionType_SlideTransition:
+                    if(xVSS_context->pSettings->pTransitionList[i]->\
+                        pExtVideoTransitionFctCtxt != M4OSA_NULL)
+                    {
+                        M4OSA_free((M4OSA_MemAddr32)(xVSS_context->pSettings->\
+                            pTransitionList[i]->pExtVideoTransitionFctCtxt));
+                        xVSS_context->pSettings->pTransitionList[i]->\
+                            pExtVideoTransitionFctCtxt = M4OSA_NULL;
+                    }
+                break;
+            }
+        }
+    }
+#endif
+
+    M4xVSS_freeSettings(xVSS_context->pSettings);
+
+    if(xVSS_context->pPTo3GPPparamsList != M4OSA_NULL)
+    {
+        M4xVSS_Pto3GPP_params* pParams = xVSS_context->pPTo3GPPparamsList;
+        M4xVSS_Pto3GPP_params* pParams_sauv;
+
+        while(pParams != M4OSA_NULL)
+        {
+            if(pParams->pFileIn != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                pParams->pFileIn = M4OSA_NULL;
+            }
+            if(pParams->pFileOut != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+                M4OSA_fileExtraDelete(pParams->pFileOut);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                pParams->pFileOut = M4OSA_NULL;
+            }
+            if(pParams->pFileTemp != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                M4OSA_fileExtraDelete(pParams->pFileTemp);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+                pParams->pFileTemp = M4OSA_NULL;
+            }
+            pParams_sauv = pParams;
+            pParams = pParams->pNext;
+            M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+            pParams_sauv = M4OSA_NULL;
+        }
+    }
+
+    if(xVSS_context->pMCSparamsList != M4OSA_NULL)
+    {
+        M4xVSS_MCS_params* pParams = xVSS_context->pMCSparamsList;
+        M4xVSS_MCS_params* pParams_sauv;
+
+        while(pParams != M4OSA_NULL)
+        {
+            if(pParams->pFileIn != M4OSA_NULL)
+            {
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileIn);
+                pParams->pFileIn = M4OSA_NULL;
+            }
+            if(pParams->pFileOut != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+                M4OSA_fileExtraDelete(pParams->pFileOut);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileOut);
+                pParams->pFileOut = M4OSA_NULL;
+            }
+            if(pParams->pFileTemp != M4OSA_NULL)
+            {
+                /* Delete temporary file */
+#ifdef M4xVSS_RESERVED_MOOV_DISK_SPACE
+                M4OSA_fileExtraDelete(pParams->pFileTemp);
+                M4OSA_free((M4OSA_MemAddr32)pParams->pFileTemp);
+#endif/*M4xVSS_RESERVED_MOOV_DISK_SPACE*/
+                pParams->pFileTemp = M4OSA_NULL;
+            }
+            pParams_sauv = pParams;
+            pParams = pParams->pNext;
+            M4OSA_free((M4OSA_MemAddr32)pParams_sauv);
+            pParams_sauv = M4OSA_NULL;
+        }
+    }
+
+    if(xVSS_context->pcmPreviewFile != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pcmPreviewFile);
+        xVSS_context->pcmPreviewFile = M4OSA_NULL;
+    }
+    if(xVSS_context->pSettings->pOutputFile != M4OSA_NULL
+        && xVSS_context->pOutputFile != M4OSA_NULL)
+    {
+        M4OSA_free((M4OSA_MemAddr32)xVSS_context->pSettings->pOutputFile);
+        xVSS_context->pSettings->pOutputFile = M4OSA_NULL;
+        xVSS_context->pOutputFile = M4OSA_NULL;
+    }
+
+    /* Reinit all context variables */
+    xVSS_context->previousClipNumber = 0;
+    xVSS_context->editingStep = M4xVSS_kMicroStateEditing;
+    xVSS_context->analyseStep = M4xVSS_kMicroStateAnalysePto3GPP;
+    xVSS_context->pPTo3GPPparamsList = M4OSA_NULL;
+    xVSS_context->pPTo3GPPcurrentParams = M4OSA_NULL;
+    xVSS_context->pMCSparamsList = M4OSA_NULL;
+    xVSS_context->pMCScurrentParams = M4OSA_NULL;
+    xVSS_context->tempFileIndex = 0;
+    xVSS_context->targetedTimescale = 0;
+
+    return M4NO_ERROR;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext,
+ *                                    M4OSA_Char* pFile,
+ *                                    M4VIDEOEDITING_ClipProperties *pFileProperties)
+ *
+ * @brief    This function retrieve properties of an input 3GP file using MCS
+ * @note
+ * @param    pContext        (IN) The integrator own context
+ * @param    pFile            (IN) 3GP file to analyse
+ * @param    pFileProperties    (IN/OUT) Pointer on a structure that will contain
+ *                            the 3GP file properties
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGetProperties(M4OSA_Context pContext, M4OSA_Char* pFile,
+                                       M4VIDEOEDITING_ClipProperties *pFileProperties)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4MCS_Context mcs_context;
+
+    err = M4MCS_init(&mcs_context, xVSS_context->pFileReadPtr, xVSS_context->pFileWritePtr);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_init: 0x%x", err);
+        return err;
+    }
+
+    /*open the MCS in the "normal opening" mode to retrieve the exact duration*/
+    err = M4MCS_open_normalMode(mcs_context, pFile, M4VIDEOEDITING_kFileType_3GPP,
+        M4OSA_NULL, M4OSA_NULL);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_open: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    err = M4MCS_getInputFileProperties(mcs_context, pFileProperties);
+    if(err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("Error in M4MCS_getInputFileProperties: 0x%x", err);
+        M4MCS_abort(mcs_context);
+        return err;
+    }
+
+    err = M4MCS_abort(mcs_context);
+    if (err != M4NO_ERROR)
+    {
+        M4OSA_TRACE1_1("M4xVSS_internalGetProperties: Error in M4MCS_abort: 0x%x", err);
+        return err;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+ *                                                M4OSA_UInt32* pTargetedTimeScale)
+ *
+ * @brief    This function retrieve targeted time scale
+ * @note
+ * @param    pContext            (IN)    The integrator own context
+ * @param    pTargetedTimeScale    (OUT)    Targeted time scale
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalGetTargetedTimeScale(M4OSA_Context pContext,
+                                                 M4VSS3GPP_EditSettings* pSettings,
+                                                  M4OSA_UInt32* pTargetedTimeScale)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+    M4OSA_UInt32 totalDuration = 0;
+    M4OSA_UInt8 i = 0;
+    M4OSA_UInt32 tempTimeScale = 0, tempDuration = 0;
+
+    for(i=0;i<pSettings->uiClipNumber;i++)
+    {
+        /*search timescale only in mpeg4 case*/
+        if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_3GPP
+            || pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_MP4)
+        {
+            M4VIDEOEDITING_ClipProperties fileProperties;
+
+            /*UTF conversion support*/
+            M4OSA_Char* pDecodedPath = M4OSA_NULL;
+
+            /**
+            * UTF conversion: convert into the customer format, before being used*/
+            pDecodedPath = pSettings->pClipList[i]->pFile;
+
+            if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+                && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+            {
+                M4OSA_UInt32 length = 0;
+                err = M4xVSS_internalConvertFromUTF8(xVSS_context,
+                     (M4OSA_Void*) pSettings->pClipList[i]->pFile,
+                        (M4OSA_Void*) xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                             &length);
+                if(err != M4NO_ERROR)
+                {
+                    M4OSA_TRACE1_1("M4xVSS_Init:\
+                         M4xVSS_internalConvertToUTF8 returns err: 0x%x",err);
+                    return err;
+                }
+                pDecodedPath = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+            }
+
+            /*End of the conversion: use the decoded path*/
+            err = M4xVSS_internalGetProperties(xVSS_context, pDecodedPath, &fileProperties);
+
+            /*get input file properties*/
+            /*err = M4xVSS_internalGetProperties(xVSS_context, pSettings->\
+                pClipList[i]->pFile, &fileProperties);*/
+            if(M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalGetTargetedTimeScale:\
+                     M4xVSS_internalGetProperties returned: 0x%x", err);
+                return err;
+            }
+            if(fileProperties.VideoStreamType == M4VIDEOEDITING_kMPEG4)
+            {
+                if(pSettings->pClipList[i]->uiEndCutTime > 0)
+                {
+                    if(tempDuration < (pSettings->pClipList[i]->uiEndCutTime \
+                        - pSettings->pClipList[i]->uiBeginCutTime))
+                    {
+                        tempTimeScale = fileProperties.uiVideoTimeScale;
+                        tempDuration = (pSettings->pClipList[i]->uiEndCutTime\
+                             - pSettings->pClipList[i]->uiBeginCutTime);
+                    }
+                }
+                else
+                {
+                    if(tempDuration < (fileProperties.uiClipDuration\
+                         - pSettings->pClipList[i]->uiBeginCutTime))
+                    {
+                        tempTimeScale = fileProperties.uiVideoTimeScale;
+                        tempDuration = (fileProperties.uiClipDuration\
+                             - pSettings->pClipList[i]->uiBeginCutTime);
+                    }
+                }
+            }
+        }
+        if(pSettings->pClipList[i]->FileType == M4VIDEOEDITING_kFileType_ARGB8888)
+        {
+            /*the timescale is 30 for PTO3GP*/
+            *pTargetedTimeScale = 30;
+            return M4NO_ERROR;
+
+        }
+    }
+
+    if(tempTimeScale >= 30)/*Define a minimum time scale, otherwise if the timescale is not
+    enough, there will be an infinite loop in the shell encoder*/
+    {
+        *pTargetedTimeScale = tempTimeScale;
+    }
+    else
+    {
+        *pTargetedTimeScale = 30;
+    }
+
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectColor(M4OSA_Void *pFunctionContext,
+                                             M4VIFI_ImagePlane *PlaneIn,
+                                             M4VIFI_ImagePlane *PlaneOut,
+                                             M4VSS3GPP_ExternalProgress *pProgress,
+                                             M4OSA_UInt32 uiEffectKind)
+{
+    M4VIFI_Int32 plane_number;
+    M4VIFI_UInt32 i,j;
+    M4VIFI_UInt8 *p_buf_src, *p_buf_dest;
+    M4xVSS_ColorStruct* ColorContext = (M4xVSS_ColorStruct*)pFunctionContext;
+
+    for (plane_number = 0; plane_number < 3; plane_number++)
+    {
+        p_buf_src = &(PlaneIn[plane_number].pac_data[PlaneIn[plane_number].u_topleft]);
+        p_buf_dest = &(PlaneOut[plane_number].pac_data[PlaneOut[plane_number].u_topleft]);
+        for (i = 0; i < PlaneOut[plane_number].u_height; i++)
+        {
+            /**
+             * Chrominance */
+            if(plane_number==1 || plane_number==2)
+            {
+                //switch ((M4OSA_UInt32)pFunctionContext)
+                // commented because a structure for the effects context exist
+                switch (ColorContext->colorEffectType)
+                {
+                    case M4xVSS_kVideoEffectType_BlackAndWhite:
+                        M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                         PlaneIn[plane_number].u_width, 128);
+                        break;
+                    case M4xVSS_kVideoEffectType_Pink:
+                        M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                         PlaneIn[plane_number].u_width, 255);
+                        break;
+                    case M4xVSS_kVideoEffectType_Green:
+                        M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                         PlaneIn[plane_number].u_width, 0);
+                        break;
+                    case M4xVSS_kVideoEffectType_Sepia:
+                        if(plane_number==1)
+                        {
+                            M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                             PlaneIn[plane_number].u_width, 117);
+                        }
+                        else
+                        {
+                            M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                             PlaneIn[plane_number].u_width, 139);
+                        }
+                        break;
+                    case M4xVSS_kVideoEffectType_Negative:
+                        M4OSA_memcpy((M4OSA_MemAddr8)p_buf_dest,
+                         (M4OSA_MemAddr8)p_buf_src ,PlaneOut[plane_number].u_width);
+                        break;
+
+                    case M4xVSS_kVideoEffectType_ColorRGB16:
+                        {
+                            M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+                            /*first get the r, g, b*/
+                            b = (ColorContext->rgb16ColorData &  0x001f);
+                            g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
+                            r = (ColorContext->rgb16ColorData &  0xf800)>>11;
+
+                            /*keep y, but replace u and v*/
+                            if(plane_number==1)
+                            {
+                                /*then convert to u*/
+                                u = U16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)u);
+                            }
+                            if(plane_number==2)
+                            {
+                                /*then convert to v*/
+                                v = V16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)v);
+                            }
+                        }
+                        break;
+                    case M4xVSS_kVideoEffectType_Gradient:
+                        {
+                            M4OSA_UInt16 r = 0,g = 0,b = 0,y = 0,u = 0,v = 0;
+
+                            /*first get the r, g, b*/
+                            b = (ColorContext->rgb16ColorData &  0x001f);
+                            g = (ColorContext->rgb16ColorData &  0x07e0)>>5;
+                            r = (ColorContext->rgb16ColorData &  0xf800)>>11;
+
+                            /*for color gradation*/
+                            b = (M4OSA_UInt16)( b - ((b*i)/PlaneIn[plane_number].u_height));
+                            g = (M4OSA_UInt16)(g - ((g*i)/PlaneIn[plane_number].u_height));
+                            r = (M4OSA_UInt16)(r - ((r*i)/PlaneIn[plane_number].u_height));
+
+                            /*keep y, but replace u and v*/
+                            if(plane_number==1)
+                            {
+                                /*then convert to u*/
+                                u = U16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)u);
+                            }
+                            if(plane_number==2)
+                            {
+                                /*then convert to v*/
+                                v = V16(r, g, b);
+                                M4OSA_memset((M4OSA_MemAddr8)p_buf_dest,
+                                 PlaneIn[plane_number].u_width, (M4OSA_UInt8)v);
+                            }
+                        }
+                        break;
+                        default:
+                        break;
+                }
+            }
+            /**
+             * Luminance */
+            else
+            {
+                //switch ((M4OSA_UInt32)pFunctionContext)
+                // commented because a structure for the effects context exist
+                switch (ColorContext->colorEffectType)
+                {
+                case M4xVSS_kVideoEffectType_Negative:
+                    for(j=0;j<PlaneOut[plane_number].u_width;j++)
+                    {
+                            p_buf_dest[j] = 255 - p_buf_src[j];
+                    }
+                    break;
+                default:
+                    M4OSA_memcpy((M4OSA_MemAddr8)p_buf_dest,
+                     (M4OSA_MemAddr8)p_buf_src ,PlaneOut[plane_number].u_width);
+                    break;
+                }
+            }
+            p_buf_src += PlaneIn[plane_number].u_stride;
+            p_buf_dest += PlaneOut[plane_number].u_stride;
+        }
+    }
+
+    return M4VIFI_OK;
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectFraming(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function add a fixed or animated image on an input YUV420 planar frame
+ * @note
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFraming( M4OSA_Void *userData,
+                                                M4VIFI_ImagePlane PlaneIn[3],
+                                                M4VIFI_ImagePlane *PlaneOut,
+                                                M4VSS3GPP_ExternalProgress *pProgress,
+                                                M4OSA_UInt32 uiEffectKind )
+{
+    M4VIFI_UInt32 x,y;
+
+    M4VIFI_UInt8 *p_in_Y = PlaneIn[0].pac_data;
+    M4VIFI_UInt8 *p_in_U = PlaneIn[1].pac_data;
+    M4VIFI_UInt8 *p_in_V = PlaneIn[2].pac_data;
+
+    M4xVSS_FramingStruct* Framing = M4OSA_NULL;
+    M4xVSS_FramingStruct* currentFraming = M4OSA_NULL;
+    M4VIFI_UInt8 *FramingRGB = M4OSA_NULL;
+
+    M4VIFI_UInt8 *p_out0;
+    M4VIFI_UInt8 *p_out1;
+    M4VIFI_UInt8 *p_out2;
+
+    M4VIFI_UInt32 topleft[2];
+
+    M4OSA_UInt8 transparent1 = (M4OSA_UInt8)((TRANSPARENT_COLOR & 0xFF00)>>8);
+    M4OSA_UInt8 transparent2 = (M4OSA_UInt8)TRANSPARENT_COLOR;
+
+#ifndef DECODE_GIF_ON_SAVING
+    Framing = (M4xVSS_FramingStruct *)userData;
+    currentFraming = (M4xVSS_FramingStruct *)Framing->pCurrent;
+    FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+
+    /*FB*/
+#ifdef DECODE_GIF_ON_SAVING
+    M4OSA_ERR err;
+    Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+#if 0
+    if(Framing == M4OSA_NULL)
+    {
+        ((M4xVSS_FramingContext*)userData)->clipTime = pProgress->uiOutputTime;
+        err = M4xVSS_internalDecodeGIF(userData);
+        if(M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming:\
+             Error in M4xVSS_internalDecodeGIF: 0x%x", err);
+            return err;
+        }
+        Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+        /* Initializes first GIF time */
+        ((M4xVSS_FramingContext*)userData)->current_gif_time = pProgress->uiOutputTime;
+    }
+#endif
+    currentFraming = (M4xVSS_FramingStruct *)Framing;
+    FramingRGB = Framing->FramingRgb->pac_data;
+#endif /*DECODE_GIF_ON_SAVING*/
+    /*end FB*/
+
+    /**
+     * Initialize input / output plane pointers */
+    p_in_Y += PlaneIn[0].u_topleft;
+    p_in_U += PlaneIn[1].u_topleft;
+    p_in_V += PlaneIn[2].u_topleft;
+
+    p_out0 = PlaneOut[0].pac_data;
+    p_out1 = PlaneOut[1].pac_data;
+    p_out2 = PlaneOut[2].pac_data;
+
+    /**
+     * Depending on time, initialize Framing frame to use */
+    if(Framing->previousClipTime == -1)
+    {
+        Framing->previousClipTime = pProgress->uiOutputTime;
+    }
+
+    /**
+     * If the current clip time has reach the duration of one frame of the framing picture
+     * we need to step to next framing picture */
+#if 0
+    if(((M4xVSS_FramingContext*)userData)->b_animated == M4OSA_TRUE)
+    {
+        while((((M4xVSS_FramingContext*)userData)->current_gif_time + currentFraming->duration)\
+         < pProgress->uiOutputTime)
+        {
+#ifdef DECODE_GIF_ON_SAVING
+            ((M4xVSS_FramingContext*)userData)->clipTime = pProgress->uiOutputTime;
+            err = M4xVSS_internalDecodeGIF(userData);
+            if(M4NO_ERROR != err)
+            {
+                M4OSA_TRACE1_1("M4VSS3GPP_externalVideoEffectFraming:\
+                 Error in M4xVSS_internalDecodeGIF: 0x%x", err);
+                return err;
+            }
+            if(currentFraming->duration != 0)
+            {
+                ((M4xVSS_FramingContext*)userData)->current_gif_time += currentFraming->duration;
+            }
+            else
+            {
+                ((M4xVSS_FramingContext*)userData)->current_gif_time \
+                 += pProgress->uiOutputTime - Framing->previousClipTime;
+            }
+            Framing = (M4xVSS_FramingStruct *)((M4xVSS_FramingContext*)userData)->aFramingCtx;
+            currentFraming = (M4xVSS_FramingStruct *)Framing;
+            FramingRGB = Framing->FramingRgb->pac_data;
+#else
+            Framing->pCurrent = currentFraming->pNext;
+            currentFraming = Framing->pCurrent;
+#endif /*DECODE_GIF_ON_SAVING*/
+        }
+    }
+#endif
+
+    Framing->previousClipTime = pProgress->uiOutputTime;
+    FramingRGB = currentFraming->FramingRgb->pac_data;
+    topleft[0] = currentFraming->topleft_x;
+    topleft[1] = currentFraming->topleft_y;
+
+    for( x=0 ;x < PlaneIn[0].u_height ; x++)
+    {
+        for( y=0 ;y < PlaneIn[0].u_width ; y++)
+        {
+            /**
+             * To handle framing with input size != output size
+             * Framing is applyed if coordinates matches between framing/topleft and input plane */
+            if( y < (topleft[0] + currentFraming->FramingYuv[0].u_width)  &&
+                y >= topleft[0] &&
+                x < (topleft[1] + currentFraming->FramingYuv[0].u_height) &&
+                x >= topleft[1])
+            {
+                /*Alpha blending support*/
+                M4OSA_Float alphaBlending = 1;
+                M4xVSS_internalEffectsAlphaBlending*  alphaBlendingStruct =\
+                 (M4xVSS_internalEffectsAlphaBlending*)\
+                    ((M4xVSS_FramingContext*)userData)->alphaBlendingStruct;
+
+                if(alphaBlendingStruct != M4OSA_NULL)
+                {
+                    if(pProgress->uiProgress >= 0 && pProgress->uiProgress \
+                    < (M4OSA_UInt32)(alphaBlendingStruct->m_fadeInTime*10))
+                    {
+                        alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle\
+                         - alphaBlendingStruct->m_start)\
+                            *pProgress->uiProgress/(alphaBlendingStruct->m_fadeInTime*10));
+                        alphaBlending += alphaBlendingStruct->m_start;
+                        alphaBlending /= 100;
+                    }
+                    else if(pProgress->uiProgress >= (M4OSA_UInt32)(alphaBlendingStruct->\
+                    m_fadeInTime*10) && pProgress->uiProgress < 1000\
+                     - (M4OSA_UInt32)(alphaBlendingStruct->m_fadeOutTime*10))
+                    {
+                        alphaBlending = (M4OSA_Float)\
+                        ((M4OSA_Float)alphaBlendingStruct->m_middle/100);
+                    }
+                    else if(pProgress->uiProgress >= 1000 - (M4OSA_UInt32)\
+                    (alphaBlendingStruct->m_fadeOutTime*10))
+                    {
+                        alphaBlending = ((M4OSA_Float)(alphaBlendingStruct->m_middle \
+                        - alphaBlendingStruct->m_end))*(1000 - pProgress->uiProgress)\
+                        /(alphaBlendingStruct->m_fadeOutTime*10);
+                        alphaBlending += alphaBlendingStruct->m_end;
+                        alphaBlending /= 100;
+                    }
+                }
+                /**/
+
+                if((*(FramingRGB)==transparent1) && (*(FramingRGB+1)==transparent2))
+                {
+                    *( p_out0+y+x*PlaneOut[0].u_stride)=(*(p_in_Y+y+x*PlaneIn[0].u_stride));
+                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+                        (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride));
+                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+                        (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride));
+                }
+                else
+                {
+                    *( p_out0+y+x*PlaneOut[0].u_stride)=
+                        (*(currentFraming->FramingYuv[0].pac_data+(y-topleft[0])\
+                            +(x-topleft[1])*currentFraming->FramingYuv[0].u_stride))*alphaBlending;
+                    *( p_out0+y+x*PlaneOut[0].u_stride)+=
+                        (*(p_in_Y+y+x*PlaneIn[0].u_stride))*(1-alphaBlending);
+                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+                        (*(currentFraming->FramingYuv[1].pac_data+((y-topleft[0])>>1)\
+                            +((x-topleft[1])>>1)*currentFraming->FramingYuv[1].u_stride))\
+                                *alphaBlending;
+                    *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)+=
+                        (*(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride))*(1-alphaBlending);
+                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+                        (*(currentFraming->FramingYuv[2].pac_data+((y-topleft[0])>>1)\
+                            +((x-topleft[1])>>1)*currentFraming->FramingYuv[2].u_stride))\
+                                *alphaBlending;
+                    *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)+=
+                        (*(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride))*(1-alphaBlending);
+                }
+                if( PlaneIn[0].u_width < (topleft[0] + currentFraming->FramingYuv[0].u_width) &&
+                    y == PlaneIn[0].u_width-1)
+                {
+                    FramingRGB = FramingRGB + 2 \
+                        * (topleft[0] + currentFraming->FramingYuv[0].u_width \
+                            - PlaneIn[0].u_width + 1);
+                }
+                else
+                {
+                    FramingRGB = FramingRGB + 2;
+                }
+            }
+            /**
+             * Just copy input plane to output plane */
+            else
+            {
+                *( p_out0+y+x*PlaneOut[0].u_stride)=*(p_in_Y+y+x*PlaneIn[0].u_stride);
+                *( p_out1+(y>>1)+(x>>1)*PlaneOut[1].u_stride)=
+                    *(p_in_U+(y>>1)+(x>>1)*PlaneIn[1].u_stride);
+                *( p_out2+(y>>1)+(x>>1)*PlaneOut[2].u_stride)=
+                    *(p_in_V+(y>>1)+(x>>1)*PlaneIn[2].u_stride);
+            }
+        }
+    }
+
+#ifdef DECODE_GIF_ON_SAVING
+#if 0
+    if(pProgress->bIsLast == M4OSA_TRUE
+        && (M4OSA_Bool)((M4xVSS_FramingContext*)userData)->b_IsFileGif == M4OSA_TRUE)
+    {
+        M4xVSS_internalDecodeGIF_Cleaning((M4xVSS_FramingContext*)userData);
+    }
+#endif
+#endif /*DECODE_GIF_ON_SAVING*/
+
+    return M4VIFI_OK;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4VSS3GPP_externalVideoEffectFifties(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function make a video look as if it was taken in the fifties
+ * @note
+ * @param    pUserData       (IN) Context
+ * @param    pPlaneIn        (IN) Input YUV420 planar
+ * @param    pPlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:            No error
+ * @return  M4ERR_PARAMETER:    pFiftiesData, pPlaneOut or pProgress are NULL (DEBUG only)
+ ******************************************************************************
+ */
+M4OSA_ERR M4VSS3GPP_externalVideoEffectFifties( M4OSA_Void *pUserData,
+                                                M4VIFI_ImagePlane *pPlaneIn,
+                                                M4VIFI_ImagePlane *pPlaneOut,
+                                                M4VSS3GPP_ExternalProgress *pProgress,
+                                                M4OSA_UInt32 uiEffectKind )
+{
+    M4VIFI_UInt32 x, y, xShift;
+    M4VIFI_UInt8 *pInY = pPlaneIn[0].pac_data;
+    M4VIFI_UInt8 *pOutY, *pInYbegin;
+    M4VIFI_UInt8 *pInCr,* pOutCr;
+    M4VIFI_Int32 plane_number;
+
+    /* Internal context*/
+    M4xVSS_FiftiesStruct* p_FiftiesData = (M4xVSS_FiftiesStruct *)pUserData;
+
+    /* Check the inputs (debug only) */
+    M4OSA_DEBUG_IF2((pFiftiesData == M4OSA_NULL),M4ERR_PARAMETER,
+         "xVSS: p_FiftiesData is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+    M4OSA_DEBUG_IF2((pPlaneOut == M4OSA_NULL),M4ERR_PARAMETER,
+         "xVSS: p_PlaneOut is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+    M4OSA_DEBUG_IF2((pProgress == M4OSA_NULL),M4ERR_PARAMETER,
+        "xVSS: p_Progress is M4OSA_NULL in M4VSS3GPP_externalVideoEffectFifties");
+
+    /* Initialize input / output plane pointers */
+    pInY += pPlaneIn[0].u_topleft;
+    pOutY = pPlaneOut[0].pac_data;
+    pInYbegin  = pInY;
+
+    /* Initialize the random */
+    if(p_FiftiesData->previousClipTime < 0)
+    {
+        M4OSA_randInit();
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+    }
+
+    /* Choose random values if we have reached the duration of a partial effect */
+    else if( (pProgress->uiOutputTime - p_FiftiesData->previousClipTime)\
+         > p_FiftiesData->fiftiesEffectDuration)
+    {
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->shiftRandomValue), (pPlaneIn[0].u_height) >> 4);
+        M4OSA_rand((M4OSA_Int32 *)&(p_FiftiesData->stripeRandomValue), (pPlaneIn[0].u_width)<< 2);
+        p_FiftiesData->previousClipTime = pProgress->uiOutputTime;
+    }
+
+    /* Put in Sepia the chrominance */
+    for (plane_number = 1; plane_number < 3; plane_number++)
+    {
+        pInCr  = pPlaneIn[plane_number].pac_data  + pPlaneIn[plane_number].u_topleft;
+        pOutCr = pPlaneOut[plane_number].pac_data + pPlaneOut[plane_number].u_topleft;
+
+        for (x = 0; x < pPlaneOut[plane_number].u_height; x++)
+        {
+            if (1 == plane_number)
+                M4OSA_memset((M4OSA_MemAddr8)pOutCr, pPlaneIn[plane_number].u_width,
+                     117); /* U value */
+            else
+                M4OSA_memset((M4OSA_MemAddr8)pOutCr, pPlaneIn[plane_number].u_width,
+                     139); /* V value */
+
+            pInCr  += pPlaneIn[plane_number].u_stride;
+            pOutCr += pPlaneOut[plane_number].u_stride;
+        }
+    }
+
+    /* Compute the new pixels values */
+    for( x = 0 ; x < pPlaneIn[0].u_height ; x++)
+    {
+        M4VIFI_UInt8 *p_outYtmp, *p_inYtmp;
+
+        /* Compute the xShift (random value) */
+        if (0 == (p_FiftiesData->shiftRandomValue % 5 ))
+            xShift = (x + p_FiftiesData->shiftRandomValue ) % (pPlaneIn[0].u_height - 1);
+        else
+            xShift = (x + (pPlaneIn[0].u_height - p_FiftiesData->shiftRandomValue) ) \
+                % (pPlaneIn[0].u_height - 1);
+
+        /* Initialize the pointers */
+        p_outYtmp = pOutY + 1;                                    /* yShift of 1 pixel */
+        p_inYtmp  = pInYbegin + (xShift * pPlaneIn[0].u_stride);  /* Apply the xShift */
+
+        for( y = 0 ; y < pPlaneIn[0].u_width ; y++)
+        {
+            /* Set Y value */
+            if (xShift > (pPlaneIn[0].u_height - 4))
+                *p_outYtmp = 40;        /* Add some horizontal black lines between the
+                                        two parts of the image */
+            else if ( y == p_FiftiesData->stripeRandomValue)
+                *p_outYtmp = 90;        /* Add a random vertical line for the bulk */
+            else
+                *p_outYtmp = *p_inYtmp;
+
+
+            /* Go to the next pixel */
+            p_outYtmp++;
+            p_inYtmp++;
+
+            /* Restart at the beginning of the line for the last pixel*/
+            if (y == (pPlaneIn[0].u_width - 2))
+                p_outYtmp = pOutY;
+        }
+
+        /* Go to the next line */
+        pOutY += pPlaneOut[0].u_stride;
+    }
+
+    return M4VIFI_OK;
+}
+
+/**
+ ******************************************************************************
+ * M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom( )
+ * @brief    Zoom in/out video effect functions.
+ * @note    The external video function is used only if VideoEffectType is set to
+ * M4VSS3GPP_kVideoEffectType_ZoomIn or M4VSS3GPP_kVideoEffectType_ZoomOut.
+ *
+ * @param   pFunctionContext    (IN) The function context, previously set by the integrator
+ * @param    pInputPlanes        (IN) Input YUV420 image: pointer to an array of three valid
+ *                                    image planes (Y, U and V)
+ * @param    pOutputPlanes        (IN/OUT) Output (filtered) YUV420 image: pointer to an array of
+ *                                        three valid image planes (Y, U and V)
+ * @param    pProgress            (IN) Set of information about the video transition progress.
+ * @return    M4NO_ERROR:            No error
+ * @return    M4ERR_PARAMETER:    At least one parameter is M4OSA_NULL (debug only)
+ ******************************************************************************
+ */
+
+M4OSA_ERR M4VSS3GPP_externalVideoEffectZoom(
+    M4OSA_Void *pFunctionContext,
+    M4VIFI_ImagePlane *pInputPlanes,
+    M4VIFI_ImagePlane *pOutputPlanes,
+    M4VSS3GPP_ExternalProgress *pProgress,
+    M4OSA_UInt32 uiEffectKind
+)
+{
+    M4OSA_UInt32 boxWidth;
+    M4OSA_UInt32 boxHeight;
+    M4OSA_UInt32 boxPosX;
+    M4OSA_UInt32 boxPosY;
+    M4OSA_UInt32 ratio = 0;
+    /*  * 1.189207 between ratio */
+    /* zoom between x1 and x16 */
+    M4OSA_UInt32 ratiotab[17] ={1024,1218,1448,1722,2048,2435,2896,3444,4096,4871,5793,\
+                                6889,8192,9742,11585,13777,16384};
+    M4OSA_UInt32 ik;
+
+    M4VIFI_ImagePlane boxPlane[3];
+
+    if(M4xVSS_kVideoEffectType_ZoomOut == (M4OSA_UInt32)pFunctionContext)
+    {
+        //ratio = 16 - (15 * pProgress->uiProgress)/1000;
+        ratio = 16 - pProgress->uiProgress / 66 ;
+    }
+    else if(M4xVSS_kVideoEffectType_ZoomIn == (M4OSA_UInt32)pFunctionContext)
+    {
+        //ratio = 1 + (15 * pProgress->uiProgress)/1000;
+        ratio = 1 + pProgress->uiProgress / 66 ;
+    }
+
+    for(ik=0;ik<3;ik++){
+
+        boxPlane[ik].u_stride = pInputPlanes[ik].u_stride;
+        boxPlane[ik].pac_data = pInputPlanes[ik].pac_data;
+
+        boxHeight = ( pInputPlanes[ik].u_height << 10 ) / ratiotab[ratio];
+        boxWidth = ( pInputPlanes[ik].u_width << 10 ) / ratiotab[ratio];
+        boxPlane[ik].u_height = (boxHeight)&(~1);
+        boxPlane[ik].u_width = (boxWidth)&(~1);
+
+        boxPosY = (pInputPlanes[ik].u_height >> 1) - (boxPlane[ik].u_height >> 1);
+        boxPosX = (pInputPlanes[ik].u_width >> 1) - (boxPlane[ik].u_width >> 1);
+        boxPlane[ik].u_topleft = boxPosY * boxPlane[ik].u_stride + boxPosX;
+    }
+
+    M4VIFI_ResizeBilinearYUV420toYUV420(M4OSA_NULL, (M4VIFI_ImagePlane*)&boxPlane, pOutputPlanes);
+
+    /**
+     * Return */
+    return(M4NO_ERROR);
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_AlphaMagic( M4OSA_Void *userData,
+ *                                    M4VIFI_ImagePlane PlaneIn1[3],
+ *                                    M4VIFI_ImagePlane PlaneIn2[3],
+ *                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                    M4OSA_UInt32 uiTransitionKind)
+ *
+ * @brief    This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param    userData        (IN) Contains a pointer on a settings structure
+ * @param    PlaneIn1        (IN) Input YUV420 planar from video 1
+ * @param    PlaneIn2        (IN) Input YUV420 planar from video 2
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiTransitionKind(IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_AlphaMagic( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                             M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                             M4VSS3GPP_ExternalProgress *pProgress, M4OSA_UInt32 uiTransitionKind)
+{
+
+    M4OSA_ERR err;
+
+    M4xVSS_internal_AlphaMagicSettings* alphaContext;
+    M4VIFI_Int32 alphaProgressLevel;
+
+    M4VIFI_ImagePlane* planeswap;
+    M4VIFI_UInt32 x,y;
+
+    M4VIFI_UInt8 *p_out0;
+    M4VIFI_UInt8 *p_out1;
+    M4VIFI_UInt8 *p_out2;
+    M4VIFI_UInt8 *alphaMask;
+    /* "Old image" */
+    M4VIFI_UInt8 *p_in1_Y;
+    M4VIFI_UInt8 *p_in1_U;
+    M4VIFI_UInt8 *p_in1_V;
+    /* "New image" */
+    M4VIFI_UInt8 *p_in2_Y;
+    M4VIFI_UInt8 *p_in2_U;
+    M4VIFI_UInt8 *p_in2_V;
+
+    err = M4NO_ERROR;
+
+    alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
+
+    alphaProgressLevel = (pProgress->uiProgress * 255)/1000;
+
+    if( alphaContext->isreverse != M4OSA_FALSE)
+    {
+        alphaProgressLevel = 255 - alphaProgressLevel;
+        planeswap = PlaneIn1;
+        PlaneIn1 = PlaneIn2;
+        PlaneIn2 = planeswap;
+    }
+
+    p_out0 = PlaneOut[0].pac_data;
+    p_out1 = PlaneOut[1].pac_data;
+    p_out2 = PlaneOut[2].pac_data;
+
+    alphaMask = alphaContext->pPlane->pac_data;
+
+    /* "Old image" */
+    p_in1_Y = PlaneIn1[0].pac_data;
+    p_in1_U = PlaneIn1[1].pac_data;
+    p_in1_V = PlaneIn1[2].pac_data;
+    /* "New image" */
+    p_in2_Y = PlaneIn2[0].pac_data;
+    p_in2_U = PlaneIn2[1].pac_data;
+    p_in2_V = PlaneIn2[2].pac_data;
+
+     /**
+     * For each column ... */
+    for( y=0; y<PlaneOut->u_height; y++ )
+    {
+        /**
+         * ... and each row of the alpha mask */
+        for( x=0; x<PlaneOut->u_width; x++ )
+        {
+            /**
+             * If the value of the current pixel of the alpha mask is > to the current time
+             * ( current time is normalized on [0-255] ) */
+            if( alphaProgressLevel < alphaMask[x+y*PlaneOut->u_width] )
+            {
+                /* We keep "old image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
+            }
+            else
+            {
+                /* We take "new image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
+            }
+        }
+    }
+
+    return(err);
+}
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_AlphaMagicBlending( M4OSA_Void *userData,
+ *                                    M4VIFI_ImagePlane PlaneIn1[3],
+ *                                    M4VIFI_ImagePlane PlaneIn2[3],
+ *                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                    M4OSA_UInt32 uiTransitionKind)
+ *
+ * @brief    This function apply a color effect on an input YUV420 planar frame
+ * @note
+ * @param    userData        (IN) Contains a pointer on a settings structure
+ * @param    PlaneIn1        (IN) Input YUV420 planar from video 1
+ * @param    PlaneIn2        (IN) Input YUV420 planar from video 2
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiTransitionKind(IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_AlphaMagicBlending( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                     M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                                     M4VSS3GPP_ExternalProgress *pProgress,
+                                     M4OSA_UInt32 uiTransitionKind)
+{
+    M4OSA_ERR err;
+
+    M4xVSS_internal_AlphaMagicSettings* alphaContext;
+    M4VIFI_Int32 alphaProgressLevel;
+    M4VIFI_Int32 alphaBlendLevelMin;
+    M4VIFI_Int32 alphaBlendLevelMax;
+    M4VIFI_Int32 alphaBlendRange;
+
+    M4VIFI_ImagePlane* planeswap;
+    M4VIFI_UInt32 x,y;
+    M4VIFI_Int32 alphaMaskValue;
+
+    M4VIFI_UInt8 *p_out0;
+    M4VIFI_UInt8 *p_out1;
+    M4VIFI_UInt8 *p_out2;
+    M4VIFI_UInt8 *alphaMask;
+    /* "Old image" */
+    M4VIFI_UInt8 *p_in1_Y;
+    M4VIFI_UInt8 *p_in1_U;
+    M4VIFI_UInt8 *p_in1_V;
+    /* "New image" */
+    M4VIFI_UInt8 *p_in2_Y;
+    M4VIFI_UInt8 *p_in2_U;
+    M4VIFI_UInt8 *p_in2_V;
+
+
+    err = M4NO_ERROR;
+
+    alphaContext = (M4xVSS_internal_AlphaMagicSettings*)userData;
+
+    alphaProgressLevel = (pProgress->uiProgress * 255)/1000;
+
+    if( alphaContext->isreverse != M4OSA_FALSE)
+    {
+        alphaProgressLevel = 255 - alphaProgressLevel;
+        planeswap = PlaneIn1;
+        PlaneIn1 = PlaneIn2;
+        PlaneIn2 = planeswap;
+    }
+
+    alphaBlendLevelMin = alphaProgressLevel-alphaContext->blendingthreshold;
+
+    alphaBlendLevelMax = alphaProgressLevel+alphaContext->blendingthreshold;
+
+    alphaBlendRange = (alphaContext->blendingthreshold)*2;
+
+    p_out0 = PlaneOut[0].pac_data;
+    p_out1 = PlaneOut[1].pac_data;
+    p_out2 = PlaneOut[2].pac_data;
+
+    alphaMask = alphaContext->pPlane->pac_data;
+
+    /* "Old image" */
+    p_in1_Y = PlaneIn1[0].pac_data;
+    p_in1_U = PlaneIn1[1].pac_data;
+    p_in1_V = PlaneIn1[2].pac_data;
+    /* "New image" */
+    p_in2_Y = PlaneIn2[0].pac_data;
+    p_in2_U = PlaneIn2[1].pac_data;
+    p_in2_V = PlaneIn2[2].pac_data;
+
+    /* apply Alpha Magic on each pixel */
+       for( y=0; y<PlaneOut->u_height; y++ )
+    {
+        for( x=0; x<PlaneOut->u_width; x++ )
+        {
+            alphaMaskValue = alphaMask[x+y*PlaneOut->u_width];
+            if( alphaBlendLevelMax < alphaMaskValue )
+            {
+                /* We keep "old image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in1_Y+x+y*PlaneIn1[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in1_U+(x>>1)+(y>>1)*PlaneIn1[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride);
+            }
+            else if( (alphaBlendLevelMin < alphaMaskValue)&&
+                    (alphaMaskValue <= alphaBlendLevelMax ) )
+            {
+                /* We blend "old and new image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=(M4VIFI_UInt8)
+                    (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_Y+x+y*PlaneIn1[0].u_stride))
+                        +(alphaBlendLevelMax-alphaMaskValue)\
+                            *( *(p_in2_Y+x+y*PlaneIn2[0].u_stride)) )/alphaBlendRange );
+
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=(M4VIFI_UInt8)\
+                    (( (alphaMaskValue-alphaBlendLevelMin)*( *(p_in1_U+(x>>1)+(y>>1)\
+                        *PlaneIn1[1].u_stride))
+                            +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_U+(x>>1)+(y>>1)\
+                                *PlaneIn2[1].u_stride)) )/alphaBlendRange );
+
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    (M4VIFI_UInt8)(( (alphaMaskValue-alphaBlendLevelMin)\
+                        *( *(p_in1_V+(x>>1)+(y>>1)*PlaneIn1[2].u_stride))
+                                +(alphaBlendLevelMax-alphaMaskValue)*( *(p_in2_V+(x>>1)+(y>>1)\
+                                    *PlaneIn2[2].u_stride)) )/alphaBlendRange );
+
+            }
+            else
+            {
+                /* We take "new image" in output plane */
+                *( p_out0+x+y*PlaneOut[0].u_stride)=*(p_in2_Y+x+y*PlaneIn2[0].u_stride);
+                *( p_out1+(x>>1)+(y>>1)*PlaneOut[1].u_stride)=
+                    *(p_in2_U+(x>>1)+(y>>1)*PlaneIn2[1].u_stride);
+                *( p_out2+(x>>1)+(y>>1)*PlaneOut[2].u_stride)=
+                    *(p_in2_V+(x>>1)+(y>>1)*PlaneIn2[2].u_stride);
+            }
+        }
+    }
+
+    return(err);
+}
+
+#define M4XXX_SampleAddress(plane, x, y)  ( (plane).pac_data + (plane).u_topleft + (y)\
+     * (plane).u_stride + (x) )
+
+static void M4XXX_CopyPlane(M4VIFI_ImagePlane* dest, M4VIFI_ImagePlane* source)
+{
+    M4OSA_UInt32    height, width, sourceStride, destStride, y;
+    M4OSA_MemAddr8    sourceWalk, destWalk;
+
+    /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
+     recomputed from memory. */
+    height = dest->u_height;
+    width = dest->u_width;
+
+    sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*source, 0, 0);
+    sourceStride = source->u_stride;
+
+    destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(*dest, 0, 0);
+    destStride = dest->u_stride;
+
+    for (y=0; y<height; y++)
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8)destWalk, (M4OSA_MemAddr8)sourceWalk, width);
+        destWalk += destStride;
+        sourceWalk += sourceStride;
+    }
+}
+
+static M4OSA_ERR M4xVSS_VerticalSlideTransition(M4VIFI_ImagePlane* topPlane,
+                                                M4VIFI_ImagePlane* bottomPlane,
+                                                M4VIFI_ImagePlane *PlaneOut,
+                                                M4OSA_UInt32    shiftUV)
+{
+    M4OSA_UInt32 i;
+
+    /* Do three loops, one for each plane type, in order to avoid having too many buffers
+    "hot" at the same time (better for cache). */
+    for (i=0; i<3; i++)
+    {
+        M4OSA_UInt32    topPartHeight, bottomPartHeight, width, sourceStride, destStride, y;
+        M4OSA_MemAddr8    sourceWalk, destWalk;
+
+        /* cache the vars used in the loop so as to avoid them being repeatedly fetched and
+         recomputed from memory. */
+        if (0 == i) /* Y plane */
+        {
+            bottomPartHeight = 2*shiftUV;
+        }
+        else /* U and V planes */
+        {
+            bottomPartHeight = shiftUV;
+        }
+        topPartHeight = PlaneOut[i].u_height - bottomPartHeight;
+        width = PlaneOut[i].u_width;
+
+        sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(topPlane[i], 0, bottomPartHeight);
+        sourceStride = topPlane[i].u_stride;
+
+        destWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
+        destStride = PlaneOut[i].u_stride;
+
+        /* First the part from the top source clip frame. */
+        for (y=0; y<topPartHeight; y++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalk, (M4OSA_MemAddr8)sourceWalk, width);
+            destWalk += destStride;
+            sourceWalk += sourceStride;
+        }
+
+        /* and now change the vars to copy the part from the bottom source clip frame. */
+        sourceWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(bottomPlane[i], 0, 0);
+        sourceStride = bottomPlane[i].u_stride;
+
+        /* destWalk is already at M4XXX_SampleAddress(PlaneOut[i], 0, topPartHeight) */
+
+        for (y=0; y<bottomPartHeight; y++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalk, (M4OSA_MemAddr8)sourceWalk, width);
+            destWalk += destStride;
+            sourceWalk += sourceStride;
+        }
+    }
+    return M4NO_ERROR;
+}
+
+static M4OSA_ERR M4xVSS_HorizontalSlideTransition(M4VIFI_ImagePlane* leftPlane,
+                                                  M4VIFI_ImagePlane* rightPlane,
+                                                  M4VIFI_ImagePlane *PlaneOut,
+                                                  M4OSA_UInt32    shiftUV)
+{
+    M4OSA_UInt32 i, y;
+    /* If we shifted by exactly 0, or by the width of the target image, then we would get the left
+    frame or the right frame, respectively. These cases aren't handled too well by the general
+    handling, since they result in 0-size memcopies, so might as well particularize them. */
+
+    if (0 == shiftUV)    /* output left frame */
+    {
+        for (i = 0; i<3; i++) /* for each YUV plane */
+        {
+            M4XXX_CopyPlane(&(PlaneOut[i]), &(leftPlane[i]));
+        }
+
+        return M4NO_ERROR;
+    }
+
+    if (PlaneOut[1].u_width == shiftUV) /* output right frame */
+    {
+        for (i = 0; i<3; i++) /* for each YUV plane */
+        {
+            M4XXX_CopyPlane(&(PlaneOut[i]), &(rightPlane[i]));
+        }
+
+        return M4NO_ERROR;
+    }
+
+
+    /* Do three loops, one for each plane type, in order to avoid having too many buffers
+    "hot" at the same time (better for cache). */
+    for (i=0; i<3; i++)
+    {
+        M4OSA_UInt32    height, leftPartWidth, rightPartWidth;
+        M4OSA_UInt32    leftStride,    rightStride,    destStride;
+        M4OSA_MemAddr8    leftWalk,    rightWalk,    destWalkLeft, destWalkRight;
+
+        /* cache the vars used in the loop so as to avoid them being repeatedly fetched
+        and recomputed from memory. */
+        height = PlaneOut[i].u_height;
+
+        if (0 == i) /* Y plane */
+        {
+            rightPartWidth = 2*shiftUV;
+        }
+        else /* U and V planes */
+        {
+            rightPartWidth = shiftUV;
+        }
+        leftPartWidth = PlaneOut[i].u_width - rightPartWidth;
+
+        leftWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(leftPlane[i], rightPartWidth, 0);
+        leftStride = leftPlane[i].u_stride;
+
+        rightWalk = (M4OSA_MemAddr8)M4XXX_SampleAddress(rightPlane[i], 0, 0);
+        rightStride = rightPlane[i].u_stride;
+
+        destWalkLeft = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], 0, 0);
+        destWalkRight = (M4OSA_MemAddr8)M4XXX_SampleAddress(PlaneOut[i], leftPartWidth, 0);
+        destStride = PlaneOut[i].u_stride;
+
+        for (y=0; y<height; y++)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalkLeft, (M4OSA_MemAddr8)leftWalk, leftPartWidth);
+            leftWalk += leftStride;
+
+            M4OSA_memcpy((M4OSA_MemAddr8)destWalkRight, (M4OSA_MemAddr8)rightWalk, rightPartWidth);
+            rightWalk += rightStride;
+
+            destWalkLeft += destStride;
+            destWalkRight += destStride;
+        }
+    }
+
+    return M4NO_ERROR;
+}
+
+
+M4OSA_ERR M4xVSS_SlideTransition( M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                  M4VIFI_ImagePlane PlaneIn2[3], M4VIFI_ImagePlane *PlaneOut,
+                                  M4VSS3GPP_ExternalProgress *pProgress,
+                                  M4OSA_UInt32 uiTransitionKind)
+{
+    M4xVSS_internal_SlideTransitionSettings* settings =
+         (M4xVSS_internal_SlideTransitionSettings*)userData;
+    M4OSA_UInt32    shiftUV;
+
+    M4OSA_TRACE1_0("inside M4xVSS_SlideTransition");
+    if ((M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
+        || (M4xVSS_SlideTransition_LeftOutRightIn == settings->direction) )
+    {
+        /* horizontal slide */
+        shiftUV = ((PlaneOut[1]).u_width * pProgress->uiProgress)/1000;
+        M4OSA_TRACE1_2("M4xVSS_SlideTransition upper: shiftUV = %d,progress = %d",
+            shiftUV,pProgress->uiProgress );
+        if (M4xVSS_SlideTransition_RightOutLeftIn == settings->direction)
+        {
+            /* Put the previous clip frame right, the next clip frame left, and reverse shiftUV
+            (since it's a shift from the left frame) so that we start out on the right
+            (i.e. not left) frame, it
+            being from the previous clip. */
+            return M4xVSS_HorizontalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
+                 (PlaneOut[1]).u_width - shiftUV);
+        }
+        else /* Left out, right in*/
+        {
+            return M4xVSS_HorizontalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
+        }
+    }
+    else
+    {
+        /* vertical slide */
+        shiftUV = ((PlaneOut[1]).u_height * pProgress->uiProgress)/1000;
+        M4OSA_TRACE1_2("M4xVSS_SlideTransition bottom: shiftUV = %d,progress = %d",shiftUV,
+            pProgress->uiProgress );
+        if (M4xVSS_SlideTransition_TopOutBottomIn == settings->direction)
+        {
+            /* Put the previous clip frame top, the next clip frame bottom. */
+            return M4xVSS_VerticalSlideTransition(PlaneIn1, PlaneIn2, PlaneOut, shiftUV);
+        }
+        else /* Bottom out, top in */
+        {
+            return M4xVSS_VerticalSlideTransition(PlaneIn2, PlaneIn1, PlaneOut,
+                (PlaneOut[1]).u_height - shiftUV);
+        }
+    }
+
+    /* Note: it might be worthwhile to do some parameter checking, see if dimensions match, etc.,
+    at least in debug mode. */
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4xVSS_FadeBlackTransition(M4OSA_Void *pFunctionContext,
+ *                                                    M4VIFI_ImagePlane *PlaneIn,
+ *                                                    M4VIFI_ImagePlane *PlaneOut,
+ *                                                    M4VSS3GPP_ExternalProgress *pProgress,
+ *                                                    M4OSA_UInt32 uiEffectKind)
+ *
+ * @brief    This function apply a fade to black and then a fade from black
+ * @note
+ * @param    pFunctionContext(IN) Contains which color to apply (not very clean ...)
+ * @param    PlaneIn            (IN) Input YUV420 planar
+ * @param    PlaneOut        (IN/OUT) Output YUV420 planar
+ * @param    pProgress        (IN/OUT) Progress indication (0-100)
+ * @param    uiEffectKind    (IN) Unused
+ *
+ * @return    M4VIFI_OK:    No error
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_FadeBlackTransition(M4OSA_Void *userData, M4VIFI_ImagePlane PlaneIn1[3],
+                                     M4VIFI_ImagePlane PlaneIn2[3],
+                                     M4VIFI_ImagePlane *PlaneOut,
+                                     M4VSS3GPP_ExternalProgress *pProgress,
+                                     M4OSA_UInt32 uiTransitionKind)
+{
+    M4OSA_Int32 tmp = 0;
+    M4OSA_ERR err = M4NO_ERROR;
+
+
+    if((pProgress->uiProgress) < 500)
+    {
+        /**
+         * Compute where we are in the effect (scale is 0->1024) */
+        tmp = (M4OSA_Int32)((1.0 - ((M4OSA_Float)(pProgress->uiProgress*2)/1000)) * 1024 );
+
+        /**
+         * Apply the darkening effect */
+        err = M4VFL_modifyLumaWithScale( (M4ViComImagePlane*)PlaneIn1,
+             (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition: M4VFL_modifyLumaWithScale returns\
+                 error 0x%x, returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
+            return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+        }
+    }
+    else
+    {
+        /**
+         * Compute where we are in the effect (scale is 0->1024). */
+        tmp = (M4OSA_Int32)( (((M4OSA_Float)(((pProgress->uiProgress-500)*2))/1000)) * 1024 );
+
+        /**
+         * Apply the darkening effect */
+        err = M4VFL_modifyLumaWithScale((M4ViComImagePlane*)PlaneIn2,
+             (M4ViComImagePlane*)PlaneOut, tmp, M4OSA_NULL);
+        if (M4NO_ERROR != err)
+        {
+            M4OSA_TRACE1_1("M4xVSS_FadeBlackTransition:\
+                 M4VFL_modifyLumaWithScale returns error 0x%x,\
+                     returning M4VSS3GPP_ERR_LUMA_FILTER_ERROR", err);
+            return M4VSS3GPP_ERR_LUMA_FILTER_ERROR;
+        }
+    }
+
+
+    return M4VIFI_OK;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext,
+ *                                                        M4OSA_Void* pBufferIn,
+ *                                                        M4OSA_Void* pBufferOut,
+ *                                                        M4OSA_UInt32* convertedSize)
+ *
+ * @brief    This function convert from the customer format to UTF8
+ * @note
+ * @param    pContext        (IN)    The integrator own context
+ * @param    pBufferIn        (IN)    Buffer to convert
+ * @param    pBufferOut        (OUT)    Converted buffer
+ * @param    convertedSize    (OUT)    Size of the converted buffer
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertToUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+                                       M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    pBufferOut = pBufferIn;
+    if(xVSS_context->UTFConversionContext.pConvToUTF8Fct != M4OSA_NULL
+        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
+
+        M4OSA_memset((M4OSA_MemAddr8)xVSS_context->UTFConversionContext.pTempOutConversionBuffer
+            ,(M4OSA_UInt32)xVSS_context->UTFConversionContext.m_TempOutConversionSize,0);
+
+        err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
+            (M4OSA_UInt8*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                 (M4OSA_UInt32*)&ConvertedSize);
+        if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
+        {
+            M4OSA_TRACE2_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+
+            /*free too small buffer*/
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer);
+
+            /*re-allocate the buffer*/
+            xVSS_context->UTFConversionContext.pTempOutConversionBuffer    =
+                 (M4OSA_Void*)M4OSA_malloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
+                     (M4OSA_Char *)"M4xVSS_internalConvertToUTF8: UTF conversion buffer");
+            if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertToUTF8");
+                return M4ERR_ALLOC;
+            }
+            xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
+
+            M4OSA_memset((M4OSA_MemAddr8)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer,(M4OSA_UInt32)xVSS_context->\
+                    UTFConversionContext.m_TempOutConversionSize,0);
+
+            err = xVSS_context->UTFConversionContext.pConvToUTF8Fct((M4OSA_Void*)pBufferIn,
+                (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                    (M4OSA_UInt32*)&ConvertedSize);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+                return err;
+            }
+        }
+        else if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalConvertToUTF8: pConvToUTF8Fct return 0x%x",err);
+            return err;
+        }
+        /*decoded path*/
+        pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        (*convertedSize) = ConvertedSize;
+    }
+    return M4NO_ERROR;
+}
+
+
+/**
+ ******************************************************************************
+ * prototype    M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext)
+ *
+ * @brief    This function convert from UTF8 to the customer format
+ * @note
+ * @param    pContext    (IN) The integrator own context
+ * @param    pBufferIn        (IN)    Buffer to convert
+ * @param    pBufferOut        (OUT)    Converted buffer
+ * @param    convertedSize    (OUT)    Size of the converted buffer
+ *
+ * @return    M4NO_ERROR:    No error
+ * @return    M4ERR_PARAMETER: At least one of the function parameters is null
+ ******************************************************************************
+ */
+M4OSA_ERR M4xVSS_internalConvertFromUTF8(M4OSA_Context pContext, M4OSA_Void* pBufferIn,
+                                        M4OSA_Void* pBufferOut, M4OSA_UInt32* convertedSize)
+{
+    M4xVSS_Context* xVSS_context = (M4xVSS_Context*)pContext;
+    M4OSA_ERR err;
+
+    pBufferOut = pBufferIn;
+    if(xVSS_context->UTFConversionContext.pConvFromUTF8Fct != M4OSA_NULL
+        && xVSS_context->UTFConversionContext.pTempOutConversionBuffer != M4OSA_NULL)
+    {
+        M4OSA_UInt32 ConvertedSize = xVSS_context->UTFConversionContext.m_TempOutConversionSize;
+
+        M4OSA_memset((M4OSA_MemAddr8)xVSS_context->\
+            UTFConversionContext.pTempOutConversionBuffer,(M4OSA_UInt32)xVSS_context->\
+                UTFConversionContext.m_TempOutConversionSize,0);
+
+        err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct\
+            ((M4OSA_Void*)pBufferIn,(M4OSA_UInt8*)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer, (M4OSA_UInt32*)&ConvertedSize);
+        if(err == M4xVSSWAR_BUFFER_OUT_TOO_SMALL)
+        {
+            M4OSA_TRACE2_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+
+            /*free too small buffer*/
+            M4OSA_free((M4OSA_MemAddr32)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer);
+
+            /*re-allocate the buffer*/
+            xVSS_context->UTFConversionContext.pTempOutConversionBuffer    =
+                (M4OSA_Void*)M4OSA_malloc(ConvertedSize*sizeof(M4OSA_UInt8), M4VA,
+                     (M4OSA_Char *)"M4xVSS_internalConvertFromUTF8: UTF conversion buffer");
+            if(M4OSA_NULL == xVSS_context->UTFConversionContext.pTempOutConversionBuffer)
+            {
+                M4OSA_TRACE1_0("Allocation error in M4xVSS_internalConvertFromUTF8");
+                return M4ERR_ALLOC;
+            }
+            xVSS_context->UTFConversionContext.m_TempOutConversionSize = ConvertedSize;
+
+            M4OSA_memset((M4OSA_MemAddr8)xVSS_context->\
+                UTFConversionContext.pTempOutConversionBuffer,(M4OSA_UInt32)xVSS_context->\
+                    UTFConversionContext.m_TempOutConversionSize,0);
+
+            err = xVSS_context->UTFConversionContext.pConvFromUTF8Fct((M4OSA_Void*)pBufferIn,
+                (M4OSA_Void*)xVSS_context->UTFConversionContext.pTempOutConversionBuffer,
+                     (M4OSA_UInt32*)&ConvertedSize);
+            if(err != M4NO_ERROR)
+            {
+                M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+                return err;
+            }
+        }
+        else if(err != M4NO_ERROR)
+        {
+            M4OSA_TRACE1_1("M4xVSS_internalConvertFromUTF8: pConvFromUTF8Fct return 0x%x",err);
+            return err;
+        }
+        /*decoded path*/
+        pBufferOut = xVSS_context->UTFConversionContext.pTempOutConversionBuffer;
+        (*convertedSize) = ConvertedSize;
+    }
+
+
+    return M4NO_ERROR;
+}
diff --git a/libvideoeditor/vss/src/glvaudioresampler.c b/libvideoeditor/vss/src/glvaudioresampler.c
new file mode 100755
index 0000000..852329e
--- /dev/null
+++ b/libvideoeditor/vss/src/glvaudioresampler.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file    glvaudioresampler.c
+ * @brief
+ * @note
+ ******************************************************************************
+ */
+/**
+ * OSAL headers */
+#include "M4OSA_Memory.h"        /**< OSAL memory management */
+#include "M4OSA_Debug.h"        /**< OSAL debug management */
+#include "M4OSA_CoreID.h"
+#include "gLVAudioResampler.h"
+
+
+static void resampleStereo16(int32_t* out, int16_t* input, long outFrameCount,
+                                LVAudioResampler *resampler) ;
+static void resampleMono16(int32_t* out, int16_t* input, long outFrameCount,
+                             LVAudioResampler *resampler) ;
+
+int32_t LVAudioResamplerCreate(int bitDepth, int inChannelCount,
+        int32_t sampleRate, int quality)
+{
+    int32_t context;
+    LVAudioResampler *resampler;
+
+    resampler = (LVAudioResampler *)M4OSA_malloc(sizeof(LVAudioResampler), M4VSS3GPP,
+         (M4OSA_Char *)"LVAudioResampler");
+    context = (int32_t)resampler;
+
+    if (quality == DEFAULT)
+        quality = LOW_QUALITY;
+
+
+    switch (quality) {
+    default:
+    case LOW_QUALITY:
+        resampler->mQuality = LOW_QUALITY;
+        LVResampler_LowQualityInit(bitDepth, inChannelCount, sampleRate, context);
+        break;
+    case MED_QUALITY:
+        resampler->mQuality = MED_QUALITY;
+        break;
+    case HIGH_QUALITY:
+        resampler->mQuality = HIGH_QUALITY;
+        break;
+    }
+
+    return (context);
+}
+
+static int32_t Interp(int32_t x0, int32_t x1, uint32_t f) {
+    int32_t t_datta;
+    t_datta = x0 + (((x1 - x0) * (int32_t)(f >> kPreInterpShift)) >> kNumInterpBits);
+    return t_datta;
+}
+static void Advance(long* index, uint32_t* frac, uint32_t inc) {
+    *frac += inc;
+    *index += (long)(*frac >> kNumPhaseBits);
+    *frac &= kPhaseMask;
+}
+
+void LVResampler_LowQualityInit(int bitDepth, int inChannelCount,
+        int32_t sampleRate, int32_t context )
+{
+    LVAudioResampler *resampler = (LVAudioResampler *) context;
+    resampler->mBitDepth = bitDepth;
+    resampler->mChannelCount = inChannelCount;
+    resampler->mSampleRate = sampleRate;
+    resampler->mInSampleRate = sampleRate;
+    resampler->mInputIndex = 0;
+    resampler->mPhaseFraction = 0;
+    // sanity check on format
+    if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2))
+    {
+        //LOGE("Unsupported sample format, %d bits, %d channels", bitDepth,
+        //  inChannelCount);
+        // LOG_ASSERT(0);
+    }
+    // initialize common members
+    resampler->mVolume[0] =
+        resampler->mVolume[1] = 0;
+    resampler->mBuffer.frameCount = 0;
+    // save format for quick lookup
+    if (inChannelCount == 1)
+    {
+        resampler->mFormat = 1;//MONO_16_BIT;
+    }
+    else
+    {
+        resampler->mFormat = 2;//STEREO_16_BIT;
+    }
+}
+
+void LVAudiosetSampleRate(int32_t context,int32_t inSampleRate)
+{
+    LVAudioResampler *resampler = (LVAudioResampler *)context;
+    long temp;
+    temp = kPhaseMultiplier;
+
+    resampler->mInSampleRate = inSampleRate;
+    resampler->mPhaseIncrement = (uint32_t)((temp / resampler->mSampleRate)* inSampleRate );
+}
+void LVAudiosetVolume(int32_t context, int16_t left, int16_t right)
+{
+    LVAudioResampler *resampler = (LVAudioResampler *)context;
+    // TODO: Implement anti-zipper filter
+    resampler->mVolume[0] = left;
+    resampler->mVolume[1] = right;
+}
+
+
+
+static  int16_t clamp16(int32_t sample)
+{
+    if ((sample>>15) ^ (sample>>31))
+        sample = 0x7FFF ^ (sample>>31);
+    return sample;
+}
+
+
+static void DitherAndClamp(int32_t* out, int32_t const *sums, long c)
+{
+    long i;
+        //ditherAndClamp((int32_t*)reSampledBuffer, pTmpBuffer, outBufferSize/2);
+    for ( i=0 ; i<c ; i++)
+    {
+        int32_t l = *sums++;
+        int32_t r = *sums++;
+        int32_t nl = l >> 12;
+        int32_t nr = r >> 12;
+        l = clamp16(nl);
+        r = clamp16(nr);
+        *out++ = (r<<16) | (l & 0xFFFF);
+    }
+
+}
+
+void LVAudioresample_LowQuality(int16_t* out,
+                                int16_t* input,
+                                long outFrameCount,
+                                int32_t context)
+{
+    LVAudioResampler *resampler = (LVAudioResampler *)context;
+
+    int32_t     *tempBuff = (int32_t *)M4OSA_malloc(
+                (outFrameCount * sizeof(int32_t) * 2),
+                M4VSS3GPP, (M4OSA_Char *)"tempBuff");
+
+    M4OSA_memset((M4OSA_MemAddr8)tempBuff,
+                (outFrameCount * sizeof(int32_t) * 2), 0);
+
+    switch (resampler->mChannelCount)
+    {
+        case 1:
+             resampleMono16(tempBuff, input, outFrameCount, resampler);
+            break;
+        case 2:
+            resampleStereo16(tempBuff, input, outFrameCount, resampler);
+            break;
+    }
+
+    // Dither and Clamp
+    DitherAndClamp((int32_t*)out, tempBuff, outFrameCount);
+
+    M4OSA_free((M4OSA_MemAddr32)tempBuff);
+}
+
+void resampleStereo16(int32_t* out, int16_t* input,long outFrameCount,
+                        LVAudioResampler *resampler)
+{
+
+    int32_t vl = resampler->mVolume[0];
+    int32_t vr = resampler->mVolume[1];
+
+    long inputIndex = resampler->mInputIndex;
+    uint32_t phaseFraction = resampler->mPhaseFraction;
+    uint32_t phaseIncrement = resampler->mPhaseIncrement;
+    long outputIndex = 0;
+
+
+    long outputSampleCount = outFrameCount * 2;
+    long inFrameCount = (outFrameCount* resampler->mInSampleRate)/resampler->mSampleRate;
+    int16_t *in;
+
+    resampler->mBuffer.i16 = input;
+
+    // LOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d\n",
+    //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
+
+    while (outputIndex < outputSampleCount)
+    {
+        resampler->mBuffer.frameCount = inFrameCount;
+        resampler->mX0L = 0;
+        resampler->mX0R = 0;
+        inputIndex = 0;
+
+        in = resampler->mBuffer.i16;
+
+        // handle boundary case
+        while (inputIndex == 0) {
+            // LOGE("boundary case\n");
+            out[outputIndex++] += vl * Interp(resampler->mX0L, in[0], phaseFraction);
+            out[outputIndex++] += vr * Interp(resampler->mX0R, in[1], phaseFraction);
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+            if (outputIndex == outputSampleCount)
+                break;
+        }
+
+        // process input samples
+        while (outputIndex < outputSampleCount && inputIndex < resampler->mBuffer.frameCount) {
+            out[outputIndex++] += vl * Interp(in[inputIndex*2-2],
+                    in[inputIndex*2], phaseFraction);
+            out[outputIndex++] += vr * Interp(in[inputIndex*2-1],
+                    in[inputIndex*2+1], phaseFraction);
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+        }
+
+        resampler->mX0L = resampler->mBuffer.i16[resampler->mBuffer.frameCount*2-2];
+        resampler->mX0R = resampler->mBuffer.i16[resampler->mBuffer.frameCount*2-1];
+    }
+
+resampleStereo16_exit:
+    // save state
+    resampler->mInputIndex = inputIndex;
+    resampler->mPhaseFraction = phaseFraction;
+}
+
+
+void resampleMono16(int32_t* out, int16_t* input,long outFrameCount, LVAudioResampler *resampler/*,
+        AudioBufferProvider* provider*/)
+{
+
+    int32_t vl = resampler->mVolume[0];
+    int32_t vr = resampler->mVolume[1];
+    int16_t *in;
+
+    long inputIndex = resampler->mInputIndex;
+    uint32_t phaseFraction = resampler->mPhaseFraction;
+    uint32_t phaseIncrement = resampler->mPhaseIncrement;
+    long outputIndex = 0;
+    long outputSampleCount = outFrameCount * 2;
+    long inFrameCount = (outFrameCount*resampler->mInSampleRate)/resampler->mSampleRate;
+
+    resampler->mBuffer.i16 = input;
+    resampler->mBuffer.i8 = (int8_t *)input;
+    resampler->mBuffer.raw = (void *)input;
+
+    // LOGE("starting resample %d frames, inputIndex=%d, phaseFraction=%d, phaseIncrement=%d\n",
+    //      outFrameCount, inputIndex, phaseFraction, phaseIncrement);
+    while (outputIndex < outputSampleCount) {
+        // buffer is empty, fetch a new one
+        while (resampler->mBuffer.frameCount == 0) {
+            resampler->mBuffer.frameCount = inFrameCount;
+            //provider->getNextBuffer(&mBuffer);
+
+            if (resampler->mBuffer.raw == M4OSA_NULL) {
+                resampler->mInputIndex = inputIndex;
+                resampler->mPhaseFraction = phaseFraction;
+                goto resampleMono16_exit;
+            }
+            resampler->mX0L = 0;
+            // LOGE("New buffer fetched: %d frames\n", mBuffer.frameCount);
+            if (resampler->mBuffer.frameCount >  inputIndex)
+                break;
+
+            inputIndex -= resampler->mBuffer.frameCount;
+            resampler->mX0L = resampler->mBuffer.i16[resampler->mBuffer.frameCount-1];
+            //provider->releaseBuffer(&resampler->mBuffer);
+            // mBuffer.frameCount == 0 now so we reload a new buffer
+        }
+
+        in = resampler->mBuffer.i16;
+
+        // handle boundary case
+        while (inputIndex == 0) {
+            // LOGE("boundary case\n");
+            int32_t sample = Interp(resampler->mX0L, in[0], phaseFraction);
+            out[outputIndex++] += vl * sample;
+            out[outputIndex++] += vr * sample;
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+            if (outputIndex == outputSampleCount)
+                break;
+        }
+
+        // process input samples
+        while (outputIndex < outputSampleCount && inputIndex < resampler->mBuffer.frameCount) {
+            int32_t sample = Interp(in[inputIndex-1], in[inputIndex],
+                    phaseFraction);
+            out[outputIndex++] += vl * sample;
+            out[outputIndex++] += vr * sample;
+            Advance(&inputIndex, &phaseFraction, phaseIncrement);
+        }
+
+        // LOGE("loop done - outputIndex=%d, inputIndex=%d\n", outputIndex, inputIndex);
+        // if done with buffer, save samples
+        if (inputIndex >= resampler->mBuffer.frameCount) {
+            inputIndex -= resampler->mBuffer.frameCount;
+
+            // LOGE("buffer done, new input index %d", inputIndex);
+            resampler->mX0L = resampler->mBuffer.i16[resampler->mBuffer.frameCount-1];
+        }
+    }
+
+resampleMono16_exit:
+    // save state
+    resampler->mInputIndex = inputIndex;
+    resampler->mPhaseFraction = phaseFraction;
+}
+
diff --git a/libvideoeditor/vss/stagefrightshells/Android.mk b/libvideoeditor/vss/stagefrightshells/Android.mk
new file mode 100755
index 0000000..7ae3545
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/Android.mk
@@ -0,0 +1 @@
+include $(call all-subdir-makefiles)

diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h
new file mode 100755
index 0000000..73f3958
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditor3gpReader.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditor3gpReader.cpp
+* @brief  StageFright shell 3GP Reader
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_3GPREADER_H
+#define VIDEOEDITOR_3GPREADER_H
+
+#include "M4READER_Common.h"
+
+M4OSA_ERR VideoEditor3gpReader_getInterface(
+        M4READER_MediaType *pMediaType,
+        M4READER_GlobalInterface **pRdrGlobalInterface,
+        M4READER_DataInterface **pRdrDataInterface);
+
+#endif /* VIDEOEDITOR_3GPREADER_H */
+
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h
new file mode 100755
index 0000000..3a0418d
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioDecoder.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorAudioDecoder.cpp
+* @brief  StageFright shell Audio Decoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_AUDIODECODER_H
+#define VIDEOEDITOR_AUDIODECODER_H
+
+#include "M4AD_Common.h"
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AAC(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface);
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRNB(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface);
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRWB(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface);
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_MP3(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface);
+
+#endif /* VIDEOEDITOR_AUDIODECODER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h
new file mode 100755
index 0000000..009605a
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorAudioEncoder.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorAudioEncoder.cpp
+* @brief  StageFright shell Audio Encoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_AUDIOENCODER_H
+#define VIDEOEDITOR_AUDIOENCODER_H
+
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Memory.h"
+#include "M4ENCODER_AudioCommon.h"
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AAC(
+        M4ENCODER_AudioFormat* pFormat,
+        M4ENCODER_AudioGlobalInterface** pEncoderInterface);
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AMRNB(
+        M4ENCODER_AudioFormat* pFormat,
+        M4ENCODER_AudioGlobalInterface** pEncoderInterface);
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_MP3(
+        M4ENCODER_AudioFormat* pFormat,
+        M4ENCODER_AudioGlobalInterface** pEncoderInterface);
+
+#endif /* VIDEOEDITOR_AUDIOENCODER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h
new file mode 100755
index 0000000..62433d0
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorBuffer.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorBuffer.c
+* @brief  StageFright shell Buffer
+*************************************************************************
+*/
+#ifndef   VIDEOEDITOR_BUFFER_H
+#define   VIDEOEDITOR_BUFFER_H
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_Memory.h"
+#include "M4OSA_CharStar.h"
+#include "M4_Utils.h"
+
+#include "LV_Macros.h"
+
+/*--- Core id for VIDEOEDITOR Buffer allocations  ---*/
+#define VIDEOEDITOR_BUFFER_EXTERNAL 0x012F
+
+/* ----- errors  -----*/
+#define M4ERR_NO_BUFFER_AVAILABLE \
+    M4OSA_ERR_CREATE(M4_ERR,VIDEOEDITOR_BUFFER_EXTERNAL,0x000001)
+#define M4ERR_NO_BUFFER_MATCH \
+    M4OSA_ERR_CREATE(M4_ERR,VIDEOEDITOR_BUFFER_EXTERNAL,0x000002)
+
+typedef enum {
+    VIDEOEDITOR_BUFFER_kEmpty = 0,
+    VIDEOEDITOR_BUFFER_kFilled,
+} VIDEOEDITOR_BUFFER_State;
+
+/**
+ ************************************************************************
+ * Structure    LVOMX_BUFFER_Buffer
+ * @brief       One OMX Buffer and data related to it
+ ************************************************************************
+*/
+typedef struct {
+    M4OSA_Void* pData;              /**< Pointer to the data*/
+    M4OSA_UInt32 size;
+    VIDEOEDITOR_BUFFER_State state; /**< Buffer state */
+    M4OSA_UInt32 idx;               /**< Index of the buffer inside the pool */
+    M4_MediaTime    buffCTS;        /**< Time stamp of the buffer */
+} VIDEOEDITOR_BUFFER_Buffer;
+
+/**
+ ************************************************************************
+ * Structure    LVOMX_BUFFER_Pool
+ * @brief       Structure to manage buffers
+ ************************************************************************
+*/
+typedef struct {
+    VIDEOEDITOR_BUFFER_Buffer* pNXPBuffer;
+    M4OSA_UInt32 NB;
+    M4OSA_Char* poolName;
+} VIDEOEDITOR_BUFFER_Pool;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif //__cplusplus
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+ *         M4OSA_UInt32 nbBuffers)
+ * @brief   Allocate a pool of nbBuffers buffers
+ *
+ * @param   ppool      : IN The buffer pool to create
+ * @param   nbBuffers  : IN The number of buffers in the pool
+ * @param   poolName   : IN a name given to the pool
+ * @return  Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+        M4OSA_UInt32 nbBuffers, M4OSA_Char* poolName);
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(LVOMX_BUFFER_Pool* ppool)
+ * @brief   Deallocate a buffer pool
+ *
+ * @param   ppool      : IN The buffer pool to free
+ * @return  Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool);
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+ *         VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+ * @brief   Returns a buffer in a given state
+ *
+ * @param   ppool      : IN The buffer pool
+ * @param   desiredState : IN The buffer state
+ * @param   pNXPBuffer : IN The selected buffer
+ * @return  Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+        VIDEOEDITOR_BUFFER_State desiredState,
+        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer);
+
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_initPoolBuffers(VIDEOEDITOR_BUFFER_Pool* ppool,
+        M4OSA_UInt32 lSize);
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_getOldestBuffer(VIDEOEDITOR_BUFFER_Pool *pool,
+        VIDEOEDITOR_BUFFER_State desiredState,
+        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer);
+
+#ifdef __cplusplus
+}
+#endif //__cplusplus
+#endif /*VIDEOEDITOR_BUFFER_H*/
+
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h
new file mode 100755
index 0000000..03bb41d
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorMp3Reader.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorMp3Reader.cpp
+* @brief  StageFright shell MP3 Reader
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_MP3READER_H
+#define VIDEOEDITOR_MP3READER_H
+
+#include "M4READER_Common.h"
+
+M4OSA_ERR VideoEditorMp3Reader_getInterface(
+        M4READER_MediaType *pMediaType,
+        M4READER_GlobalInterface **pRdrGlobalInterface,
+        M4READER_DataInterface **pRdrDataInterface);
+
+#endif /* VIDEOEDITOR_MP3READER_H */
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h
new file mode 100755
index 0000000..e06fcbc
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorUtils.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorUtils.cpp
+* @brief  StageFright shell Utilities
+*************************************************************************
+*/
+#ifndef ANDROID_UTILS_H_
+#define ANDROID_UTILS_H_
+
+/*******************
+ *     HEADERS     *
+ *******************/
+
+#include "M4OSA_Debug.h"
+
+#include "utils/Log.h"
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+/**
+ *************************************************************************
+ * VIDEOEDITOR_CHECK(test, errCode)
+ * @note This macro displays an error message and goes to function cleanUp label
+ *       if the test fails.
+ *************************************************************************
+ */
+#define VIDEOEDITOR_CHECK(test, errCode) \
+{ \
+    if( !(test) ) { \
+        LOGV("!!! %s (L%d) check failed : " #test ", yields error 0x%.8x", \
+            __FILE__, __LINE__, errCode); \
+        err = (errCode); \
+        goto cleanUp; \
+    } \
+}
+
+/**
+ *************************************************************************
+ * SAFE_FREE(p)
+ * @note This macro calls free and makes sure the pointer is set to NULL.
+ *************************************************************************
+ */
+#define SAFE_FREE(p) \
+{ \
+    if(M4OSA_NULL != (p)) { \
+        M4OSA_free((M4OSA_MemAddr32)(p)) ; \
+        (p) = M4OSA_NULL ; \
+    } \
+}
+
+/**
+ *************************************************************************
+ * SAFE_MALLOC(p, type, count, comment)
+ * @note This macro allocates a buffer, checks for success and fills the buffer
+ *       with 0.
+ *************************************************************************
+ */
+#define SAFE_MALLOC(p, type, count, comment) \
+{ \
+    (p) = (type*)M4OSA_malloc(sizeof(type)*(count), 0xFF,(M4OSA_Char*)comment);\
+    VIDEOEDITOR_CHECK(M4OSA_NULL != (p), M4ERR_ALLOC); \
+    M4OSA_memset((M4OSA_MemAddr8)(p), sizeof(type)*(count), 0); \
+}
+
+
+    /********************
+     *    UTILITIES     *
+     ********************/
+
+
+namespace android {
+
+/*--------------------------*/
+/* DISPLAY METADATA CONTENT */
+/*--------------------------*/
+void displayMetaData(const sp<MetaData> meta);
+
+// Build the AVC codec spcific info from the StageFright encoders output
+status_t buildAVCCodecSpecificData(uint8_t **outputData, size_t *outputSize,
+        const uint8_t *data, size_t size, MetaData *param);
+
+}//namespace android
+
+
+#endif //ANDROID_UTILS_H_
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h
new file mode 100755
index 0000000..b27b596
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorVideoDecoder.cpp
+* @brief  StageFright shell video decoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_VIDEODECODER_H
+#define VIDEOEDITOR_VIDEODECODER_H
+
+#include "M4DECODER_Common.h"
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_MPEG4(
+        M4DECODER_VideoType *pDecoderType,
+        M4OSA_Context *pDecoderInterface);
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_H264(
+        M4DECODER_VideoType *pDecoderType,
+        M4OSA_Context *pDecoderInterface);
+
+#endif // VIDEOEDITOR_VIDEODECODER_H
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
new file mode 100755
index 0000000..224a6d9
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoDecoder_internal.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+*************************************************************************
+* @file   VideoEditorVideoDecoder_Internal.h
+* @brief  StageFright shell video decoder internal header file*
+*************************************************************************
+*/
+
+#include "M4OSA_Types.h"
+#include "M4OSA_Debug.h"
+#include "M4OSA_Memory.h"
+#include "M4_Common.h"
+#include "M4OSA_CoreID.h"
+
+#include "M4DA_Types.h"
+#include "M4READER_Common.h"
+#include "M4VIFI_FiltersAPI.h"
+#include "M4TOOL_VersionInfo.h"
+#include "M4DECODER_Common.h"
+#include "M4OSA_Semaphore.h"
+#include "VideoEditorBuffer.h"
+#include "M4VD_Tools.h"
+
+#include <utils/RefBase.h>
+#include <OMX_Video.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+
+#define VIDEOEDITOR_VIDEC_SHELL_VER_MAJOR     0
+#define VIDEOEDITOR_VIDEC_SHELL_VER_MINOR     0
+#define VIDEOEDITOR_VIDEC_SHELL_VER_REVISION  1
+
+/* ERRORS */
+#define M4ERR_SF_DECODER_RSRC_FAIL M4OSA_ERR_CREATE(M4_ERR, 0xFF, 0x0001)
+
+namespace android {
+
+typedef enum {
+    VIDEOEDITOR_kMpeg4VideoDec,
+    VIDEOEDITOR_kH263VideoDec,
+    VIDEOEDITOR_kH264VideoDec
+} VIDEOEDITOR_CodecType;
+
+
+/*typedef struct{
+    M4OSA_UInt32 stream_byte;
+    M4OSA_UInt32 stream_index;
+    M4OSA_MemAddr8 in;
+
+} VIDEOEDITOR_VIDEO_Bitstream_ctxt;*/
+
+typedef M4VS_Bitstream_ctxt VIDEOEDITOR_VIDEO_Bitstream_ctxt;
+
+typedef struct {
+
+    /** Stagefrigth params */
+    OMXClient               mClient; /**< OMX Client session instance. */
+    sp<MediaSource>         mVideoDecoder; /**< Stagefright decoder instance */
+    sp<MediaSource>         mReaderSource; /**< Reader access > */
+
+    /* READER */
+    M4READER_DataInterface  *m_pReader;
+    M4_AccessUnit           *m_pNextAccessUnitToDecode;
+
+    /* STREAM PARAMS */
+    M4_VideoStreamHandler*  m_pVideoStreamhandler;
+
+    /* User filter params. */
+    M4VIFI_PlanConverterFunctionType *m_pFilter;
+    M4OSA_Void              *m_pFilterUserData;
+
+    M4_MediaTime            m_lastDecodedCTS;
+    M4_MediaTime            m_lastRenderCts;
+    M4OSA_Bool              mReachedEOS;
+    VIDEOEDITOR_CodecType   mDecoderType;
+    M4DECODER_VideoSize     m_VideoSize;
+    M4DECODER_MPEG4_DecoderConfigInfo m_Dci; /**< Decoder Config info */
+    VIDEOEDITOR_BUFFER_Pool *m_pDecBufferPool; /**< Decoded buffer pool */
+    OMX_COLOR_FORMATTYPE    decOuputColorFormat;
+
+    M4OSA_UInt32            mNbInputFrames;
+    M4OSA_Double            mFirstInputCts;
+    M4OSA_Double            mLastInputCts;
+    M4OSA_UInt32            mNbRenderedFrames;
+    M4OSA_Double            mFirstRenderedCts;
+    M4OSA_Double            mLastRenderedCts;
+    M4OSA_UInt32            mNbOutputFrames;
+    M4OSA_Double            mFirstOutputCts;
+    M4OSA_Double            mLastOutputCts;
+
+} VideoEditorVideoDecoder_Context;
+
+} //namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h
new file mode 100755
index 0000000..8ba9cd0
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/inc/VideoEditorVideoEncoder.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorVideoEncoder.cpp
+* @brief  StageFright shell video encoder
+*************************************************************************
+*/
+#ifndef VIDEOEDITOR_VIDEOENCODER_H
+#define VIDEOEDITOR_VIDEOENCODER_H
+
+#include "M4ENCODER_common.h"
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H263(M4ENCODER_Format* pFormat,
+        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_MPEG4(M4ENCODER_Format* pFormat,
+        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H264(M4ENCODER_Format* pFormat,
+        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode);
+
+#endif //VIDEOEDITOR_VIDEOENCODER_H
diff --git a/libvideoeditor/vss/stagefrightshells/src/Android.mk b/libvideoeditor/vss/stagefrightshells/src/Android.mk
new file mode 100755
index 0000000..297bfa7
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/Android.mk
@@ -0,0 +1,79 @@
+#
+# Copyright (C) 2011 NXP Software
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+    VideoEditorVideoDecoder.cpp \
+    VideoEditorAudioDecoder.cpp \
+    VideoEditorMp3Reader.cpp \
+    VideoEditor3gpReader.cpp \
+    VideoEditorUtils.cpp \
+    VideoEditorBuffer.c \
+    VideoEditorVideoEncoder.cpp \
+    VideoEditorAudioEncoder.cpp
+
+LOCAL_C_INCLUDES += \
+    $(TOP)/frameworks/base/core/jni \
+    $(TOP)/frameworks/base/include \
+    $(TOP)/frameworks/base/include/media \
+    $(TOP)/frameworks/base/media/libmediaplayerservice \
+    $(TOP)/frameworks/base/media/libstagefright \
+    $(TOP)/frameworks/base/media/libstagefright/include \
+    $(TOP)/frameworks/base/media/libstagefright/rtsp \
+    $(JNI_H_INCLUDE) \
+    $(call include-path-for, corecg graphics) \
+    $(TOP)/external/opencore/extern_libs_v2/khronos/openmax/include \
+    $(TOP)/external/opencore/android \
+    $(TOP)/vendor/qcom/proprietary/qdsp6/mm-core/omxcore/inc \
+    $(TOP)/frameworks/base/core/jni/mediaeditor \
+    $(TOP)/frameworks/media/libvideoeditor/vss/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/common/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/mcs/inc \
+    $(TOP)/frameworks/media/libvideoeditor/lvpp \
+    $(TOP)/frameworks/media/libvideoeditor/osal/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/stagefrightshells/inc
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libutils \
+    libandroid_runtime \
+    libnativehelper \
+    libmedia \
+    libbinder \
+    libstagefright \
+    libstagefright_omx \
+    libsurfaceflinger_client \
+    libvideoeditorplayer
+
+LOCAL_CFLAGS += \
+
+
+
+LOCAL_LDFLAGS += -fuse-ld=bfd
+
+LOCAL_STATIC_LIBRARIES := \
+    libvideoeditor_osal \
+    libstagefright_color_conversion
+
+
+LOCAL_MODULE:= libvideoeditor_stagefrightshells
+
+LOCAL_MODULE_TAGS := eng development
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp
new file mode 100755
index 0000000..70a5a81
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditor3gpReader.cpp
@@ -0,0 +1,1967 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ * 
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditor3gpReader.cpp
+* @brief  StageFright shell 3GP Reader
+*************************************************************************
+*/
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_3GPREADER"
+
+/**
+ * HEADERS
+ *
+ */
+#define VIDEOEDITOR_BITSTREAM_PARSER
+
+#include "M4OSA_Debug.h"
+#include "VideoEditor3gpReader.h"
+#include "M4SYS_AccessUnit.h"
+#include "VideoEditorUtils.h"
+#include "M4READER_3gpCom.h"
+#include "M4_Common.h"
+#include "M4OSA_FileWriter.h"
+
+#ifdef VIDEOEDITOR_BITSTREAM_PARSER
+#include "M4OSA_CoreID.h"
+#include "M4OSA_Error.h"
+#include "M4OSA_Memory.h"
+#include "M4_Utils.h"
+#endif
+
+#include "ESDS.h"
+#include "utils/Log.h"
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+/**
+ * SOURCE CLASS
+ */
+namespace android {
+/**
+ * ENGINE INTERFACE
+ */
+
+/**
+ ************************************************************************
+ * @brief   Array of AMR NB/WB bitrates
+ * @note    Array to match the mode and the bit rate
+ ************************************************************************
+*/
+const M4OSA_UInt32 VideoEditor3gpReader_AmrBitRate [2 /* 8kHz / 16kHz     */]
+                                                   [9 /* the bitrate mode */] =
+{
+    {4750, 5150, 5900,  6700,  7400,  7950,  10200, 12200, 0},
+    {6600, 8850, 12650, 14250, 15850, 18250, 19850, 23050, 23850}
+};
+
+/**
+ *******************************************************************************
+ * structure VideoEditor3gpReader_Context
+ * @brief:This structure defines the context of the StageFright 3GP shell Reader
+ *******************************************************************************
+*/
+typedef struct {
+    sp<DataSource>              mDataSource;
+    sp<MediaExtractor>          mExtractor;
+    sp<MediaSource>             mAudioSource;
+    sp<MediaSource>             mVideoSource;
+    M4_StreamHandler*           mAudioStreamHandler;
+    M4_StreamHandler*           mVideoStreamHandler;
+    M4SYS_AccessUnit            mAudioAu;
+    M4SYS_AccessUnit            mVideoAu;
+    M4OSA_Time                  mMaxDuration;
+    int32_t                     mFileSize;
+    M4_StreamType               mStreamType;
+    M4OSA_UInt32                mStreamId;
+    int32_t                     mTracks;
+    int32_t                     mCurrTrack;
+    M4OSA_Bool                  mAudioSeeking;
+    M4OSA_Time                  mAudioSeekTime;
+    M4OSA_Bool                  mVideoSeeking;
+    M4OSA_Time                  mVideoSeekTime;
+
+} VideoEditor3gpReader_Context;
+
+#ifdef VIDEOEDITOR_BITSTREAM_PARSER
+/**
+ ************************************************************************
+ * structure    VideoEditor3gpReader_BitStreamParserContext
+ * @brief       Internal BitStreamParser context
+ ************************************************************************
+*/
+typedef struct {
+    M4OSA_UInt32*   mPbitStream;   /**< bitstream pointer (32bits aligned) */
+    M4OSA_Int32     mSize;         /**< bitstream size in bytes */
+    M4OSA_Int32     mIndex;        /**< byte index */
+    M4OSA_Int32     mBitIndex;     /**< bit index */
+    M4OSA_Int32     mStructSize;   /**< size of structure */
+} VideoEditor3gpReader_BitStreamParserContext;
+
+/**
+ *******************************************************************************
+ * @brief   Allocates the context and initializes internal data.
+ * @param   pContext    (OUT)  Pointer to the BitStreamParser context to create.
+ * @param   bitStream   A pointer to the bitstream
+ * @param   size        The size of the bitstream in bytes
+ *******************************************************************************
+*/
+static void VideoEditor3gpReader_BitStreamParserInit(void** pContext,
+        void* pBitStream, M4OSA_Int32 size) {
+    VideoEditor3gpReader_BitStreamParserContext* pStreamContext;
+
+    *pContext=M4OSA_NULL;
+    pStreamContext = (VideoEditor3gpReader_BitStreamParserContext*)M4OSA_malloc(
+        sizeof(VideoEditor3gpReader_BitStreamParserContext), M4READER_3GP,
+            (M4OSA_Char*)"3GP BitStreamParser Context");
+    if (M4OSA_NULL == pStreamContext) {
+        return;
+    }
+    pStreamContext->mPbitStream=(M4OSA_UInt32*)pBitStream;
+    pStreamContext->mSize=size;
+    pStreamContext->mIndex=0;
+    pStreamContext->mBitIndex=0;
+    pStreamContext->mStructSize =
+        sizeof(VideoEditor3gpReader_BitStreamParserContext);
+
+    *pContext=pStreamContext;
+}
+/**
+ **********************************************************************
+ * @brief   Clean up context
+ * @param   pContext    (IN/OUT)  BitStreamParser context.
+ **********************************************************************
+*/
+static void VideoEditor3gpReader_BitStreamParserCleanUp(void* pContext) {
+    M4OSA_free((M4OSA_Int32*)pContext);
+}
+/**
+ *****************************************************************************
+ * @brief   Read the next <length> bits in the bitstream.
+ * @note    The function does not update the bitstream pointer.
+ * @param   pContext    (IN/OUT) BitStreamParser context.
+ * @param   length      (IN) The number of bits to extract from the bitstream
+ * @return  the read bits
+ *****************************************************************************
+*/
+static M4OSA_UInt32 VideoEditor3gpReader_BitStreamParserShowBits(void* pContext,
+        M4OSA_Int32 length) {
+    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+
+    M4OSA_UInt32 u_mask;
+    M4OSA_UInt32 retval;
+    M4OSA_Int32 i_ovf;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0,
+        "VideoEditor3gpReader_BitStreamParserShowBits:invalid context pointer");
+
+    retval=(M4OSA_UInt32)GET_MEMORY32(pStreamContext->\
+        mPbitStream[ pStreamContext->mIndex ]);
+    i_ovf = pStreamContext->mBitIndex + length - 32;
+    u_mask = (length >= 32) ? 0xffffffff: (1 << length) - 1;
+
+    /* do we have enough bits availble in the current word(32bits)*/
+    if (i_ovf <= 0) {
+        retval=(retval >> (- i_ovf)) & u_mask;
+    } else {
+        M4OSA_UInt32 u_nextword = (M4OSA_UInt32)GET_MEMORY32(
+            pStreamContext->mPbitStream[ pStreamContext->mIndex + 1 ]);
+        M4OSA_UInt32 u_msb_mask, u_msb_value, u_lsb_mask, u_lsb_value;
+
+        u_msb_mask = ((1 << (32 - pStreamContext->mBitIndex)) - 1) << i_ovf;
+        u_msb_value = retval << i_ovf;
+        u_lsb_mask = (1 << i_ovf) - 1;
+        u_lsb_value = u_nextword >> (32 - i_ovf);
+        retval= (u_msb_value & u_msb_mask ) | (u_lsb_value & u_lsb_mask);
+    }
+    /* return the bits...*/
+    return retval;
+}
+/**
+ ************************************************************************
+ * @brief   Increment the bitstream pointer of <length> bits.
+ * @param   pContext    (IN/OUT) BitStreamParser context.
+ * @param   length      (IN) The number of bit to shift the bitstream
+ ************************************************************************
+*/
+static void VideoEditor3gpReader_BitStreamParserFlushBits(void* pContext,
+        M4OSA_Int32 length) {
+    VideoEditor3gpReader_BitStreamParserContext* pStreamContext=(
+        VideoEditor3gpReader_BitStreamParserContext*)pContext;
+    M4OSA_Int32 val;
+
+    if (M4OSA_NULL == pStreamContext) {
+        return;
+    }
+    val=pStreamContext->mBitIndex + length;
+    /* update the bits...*/
+    pStreamContext->mBitIndex += length;
+
+    if (val - 32 >= 0) {
+        /* update the bits...*/
+        pStreamContext->mBitIndex -= 32;
+        /* update the words*/
+        pStreamContext->mIndex++;
+    }
+}
+
+static M4OSA_UInt32 VideoEditor3gpReader_BitStreamParserGetBits(
+        void* pContext,M4OSA_Int32 bitPos, M4OSA_Int32 bitLength) {
+    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+
+    M4OSA_Int32 bitLocation, bitIndex;
+    M4OSA_UInt32 retval=0;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0,
+        "VideoEditor3gpReader_BitStreamParserGetBits: invalid context pointer");
+
+    /* computes the word location*/
+    bitLocation=bitPos/32;
+    bitIndex=(bitPos) % 32;
+
+    if (bitLocation < pStreamContext->mSize) {
+        M4OSA_UInt32 u_mask;
+        M4OSA_Int32 i_ovf = bitIndex + bitLength - 32;
+        retval=(M4OSA_UInt32)GET_MEMORY32(
+            pStreamContext->mPbitStream[ bitLocation ]);
+
+        u_mask = (bitLength >= 32) ? 0xffffffff: (1 << bitLength) - 1;
+
+        if (i_ovf <= 0) {
+            retval=(retval >> (- i_ovf)) & u_mask;
+        } else {
+            M4OSA_UInt32 u_nextword = (M4OSA_UInt32)GET_MEMORY32(
+                pStreamContext->mPbitStream[ bitLocation + 1 ]);
+            M4OSA_UInt32 u_msb_mask, u_msb_value, u_lsb_mask, u_lsb_value;
+
+            u_msb_mask = ((1 << (32 - bitIndex)) - 1) << i_ovf;
+            u_msb_value = retval << i_ovf;
+            u_lsb_mask = (1 << i_ovf) - 1;
+            u_lsb_value = u_nextword >> (32 - i_ovf);
+            retval= (u_msb_value & u_msb_mask ) | (u_lsb_value & u_lsb_mask);
+        }
+    }
+    return retval;
+}
+
+static void VideoEditor3gpReader_BitStreamParserRestart(void* pContext) {
+    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+
+    if (M4OSA_NULL == pStreamContext) {
+        return;
+    }
+    /* resets the bitstream pointers*/
+    pStreamContext->mIndex=0;
+    pStreamContext->mBitIndex=0;
+}
+/**
+ *******************************************************************************
+ * @brief  Get a pointer to the current byte pointed by the bitstream pointer.
+ * @note   It should be used carefully as the pointer is in the bitstream itself
+ *         and no copy is made.
+ * @param  pContext    (IN/OUT)  BitStreamParser context.
+ * @return Pointer to the current location in the bitstream
+ *******************************************************************************
+*/
+static M4OSA_UInt8*  VideoEditor3gpReader_GetCurrentbitStreamPointer(
+        void* pContext) {
+    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0, "invalid context pointer");
+
+    return (M4OSA_UInt8*)((M4OSA_UInt8*)pStreamContext->mPbitStream + \
+        pStreamContext->mIndex * sizeof(M4OSA_UInt32) + \
+        pStreamContext->mBitIndex/8) ;
+}
+
+static M4OSA_Int32 VideoEditor3gpReader_BitStreamParserGetSize(void* pContext) {
+    VideoEditor3gpReader_BitStreamParserContext* pStreamContext =
+        (VideoEditor3gpReader_BitStreamParserContext*)pContext;
+    M4OSA_DEBUG_IF1((M4OSA_NULL==pStreamContext), 0, "invalid context pointer");
+
+    return pStreamContext->mSize;
+}
+
+
+static void VideoEditor3gpReader_MPEG4BitStreamParserInit(void** pContext,
+        void* pBitStream, M4OSA_Int32 size) {
+    VideoEditor3gpReader_BitStreamParserInit(pContext, pBitStream, size);
+}
+static M4OSA_Int32 VideoEditor3gpReader_GetMpegLengthFromInteger(void* pContext,
+        M4OSA_UInt32 val) {
+    M4OSA_UInt32 length=0;
+    M4OSA_UInt32 numBytes=0;
+    M4OSA_UInt32 b=0;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL==pContext), 0, "invalid context pointer");
+
+    /* the length is encoded as a sequence of bytes. The highest bit is used
+    to indicate that the length continues on the next byte.
+
+    The length can be: 0x80 0x80 0x80 0x22
+    of just            0x22 (highest bit not set)
+
+    */
+
+    do {
+        b=(val & ((0xff)<< (8 * numBytes)))>> (8 * numBytes);
+        length=(length << 7) | (b & 0x7f);
+        numBytes++;
+    } while ((b & 0x80) && numBytes < 4);
+
+    return length;
+}
+
+/**
+ *******************************************************************************
+ * @brief  Decode an MPEG4 Systems descriptor size from an encoded SDL size data
+ * @note   The value is read from the current bitstream location.
+ * @param  pContext    (IN/OUT)  BitStreamParser context.
+ * @return Size in a human readable form
+ *******************************************************************************
+*/
+static M4OSA_Int32 VideoEditor3gpReader_GetMpegLengthFromStream(void* pContext){
+    M4OSA_UInt32 length=0;
+    M4OSA_UInt32 numBytes=0;
+    M4OSA_UInt32 b=0;
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL==pContext), 0, "invalid context pointer");
+
+    /* the length is encoded as a sequence of bytes. The highest bit is used
+    to indicate that the length continues on the next byte.
+
+    The length can be: 0x80 0x80 0x80 0x22
+    of just            0x22 (highest bit not set)
+    */
+
+    do {
+        b=VideoEditor3gpReader_BitStreamParserShowBits(pContext, 8);
+        VideoEditor3gpReader_BitStreamParserFlushBits(pContext, 8);
+        length=(length << 7) | (b & 0x7f);
+        numBytes++;
+    } while ((b & 0x80) && numBytes < 4);
+
+    return length;
+}
+#endif /* VIDEOEDITOR_BITSTREAM_PARSER */
+/**
+************************************************************************
+* @brief    create an instance of the 3gp reader
+ * @note    allocates the context
+ *
+ * @param   pContext:       (OUT)   pointer on a reader context
+ *
+ * @return  M4NO_ERROR              there is no error
+ * @return  M4ERR_ALLOC             a memory allocation has failed
+ * @return  M4ERR_PARAMETER         at least one parameter is not valid
+************************************************************************
+*/
+
+M4OSA_ERR VideoEditor3gpReader_create(M4OSA_Context *pContext) {
+    VideoEditor3gpReader_Context* pC = NULL;
+    M4OSA_ERR err = M4NO_ERROR;
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext , M4ERR_PARAMETER);
+
+    LOGV("VideoEditor3gpReader_create begin");
+
+    /* Context allocation & initialization */
+    SAFE_MALLOC(pC, VideoEditor3gpReader_Context, 1, "VideoEditor3gpReader");
+
+    memset(pC, sizeof(VideoEditor3gpReader_Context), 0);
+
+    pC->mAudioStreamHandler  = M4OSA_NULL;
+    pC->mAudioAu.dataAddress = M4OSA_NULL;
+    pC->mVideoStreamHandler  = M4OSA_NULL;
+    pC->mVideoAu.dataAddress = M4OSA_NULL;
+
+    pC->mAudioSeeking = M4OSA_FALSE;
+    pC->mAudioSeekTime = 0;
+
+    pC->mVideoSeeking = M4OSA_FALSE;
+    pC->mVideoSeekTime = 0;
+
+    M4OSA_INT64_FROM_INT32(pC->mMaxDuration, 0);
+    *pContext=pC;
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditor3gpReader_create no error");
+    } else {
+        LOGV("VideoEditor3gpReader_create ERROR 0x%X", err);
+    }
+    LOGV("VideoEditor3gpReader_create end ");
+    return err;
+}
+
+/**
+**************************************************************************
+* @brief    destroy the instance of the 3gp reader
+* @note after this call the context is invalid
+* @param    context:        (IN)    Context of the reader
+* @return   M4NO_ERROR              there is no error
+* @return   M4ERR_PARAMETER         pContext parameter is not properly set
+**************************************************************************
+*/
+
+M4OSA_ERR VideoEditor3gpReader_destroy(M4OSA_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditor3gpReader_Context* pC = M4OSA_NULL;
+
+    LOGV("VideoEditor3gpReader_destroy begin");
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    pC = (VideoEditor3gpReader_Context*)pContext;
+
+    SAFE_FREE(pC->mAudioAu.dataAddress);
+    pC->mAudioAu.dataAddress = M4OSA_NULL;
+    SAFE_FREE(pC->mVideoAu.dataAddress);
+    pC->mVideoAu.dataAddress = M4OSA_NULL;
+    SAFE_FREE(pC);
+    pContext = M4OSA_NULL;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditor3gpReader_destroy no error");
+    }
+    else
+    {
+        LOGV("VideoEditor3gpReader_destroy ERROR 0x%X", err);
+    }
+
+    LOGV("VideoEditor3gpReader_destroy end ");
+    return err;
+}
+
+/**
+************************************************************************
+* @brief    open the reader and initializes its created instance
+* @note     this function open the media file
+* @param    context:            (IN)    Context of the reader
+* @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying
+*                                       the media to open
+* @return   M4NO_ERROR                  there is no error
+* @return   M4ERR_PARAMETER             the context is NULL
+************************************************************************
+*/
+
+M4OSA_ERR VideoEditor3gpReader_open(M4OSA_Context pContext,
+        M4OSA_Void* pFileDescriptor) {
+    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)pContext;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditor3gpReader_open start ");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC),  M4ERR_PARAMETER,
+        "VideoEditor3gpReader_open: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_open: invalid pointer pFileDescriptor");
+
+    LOGV("VideoEditor3gpReader_open Datasource start %s",
+        (char*)pFileDescriptor);
+    pC->mDataSource = DataSource::CreateFromURI((char*)pFileDescriptor);
+
+    if (pC->mDataSource == NULL) {
+        LOGV("VideoEditor3gpReader_open Datasource error");
+        return M4ERR_PARAMETER;
+    }
+
+    pC->mExtractor = MediaExtractor::Create(pC->mDataSource,
+        MEDIA_MIMETYPE_CONTAINER_MPEG4);
+
+    if (pC->mExtractor == NULL) {
+        LOGV("VideoEditor3gpReader_open extractor error");
+        return M4ERR_PARAMETER;
+    }
+
+    LOGV("VideoEditor3gpReader_open end ");
+    return err;
+}
+
+/**
+************************************************************************
+* @brief    close the reader
+* @note     close the 3GP file
+* @param    context:        (IN)    Context of the reader
+* @return   M4NO_ERROR              there is no error
+* @return   M4ERR_PARAMETER         the context is NULL
+* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
+************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_close(M4OSA_Context context) {
+    VideoEditor3gpReader_Context *pC = (VideoEditor3gpReader_Context*)context;
+    M4READER_AudioSbrUserdata *pAudioSbrUserData;
+    M4_AccessUnit *pAU;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditor3gpReader_close begin");
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_close: invalid context pointer");
+
+    if (pC->mAudioStreamHandler) {
+        LOGV("VideoEditor3gpReader_close Audio");
+
+        if (M4OSA_NULL != pC->mAudioStreamHandler->m_pDecoderSpecificInfo) {
+            M4OSA_free((M4OSA_MemAddr32)pC->mAudioStreamHandler->\
+                m_pDecoderSpecificInfo);
+            pC->mAudioStreamHandler->m_decoderSpecificInfoSize = 0;
+            pC->mAudioStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
+        }
+
+        if ((M4DA_StreamTypeAudioAac == pC->mAudioStreamHandler->m_streamType)
+            && (M4OSA_NULL != pC->mAudioStreamHandler->m_pUserData)) {
+            pAudioSbrUserData = (M4READER_AudioSbrUserdata*)(\
+                pC->mAudioStreamHandler->m_pUserData);
+
+            pAU = (M4_AccessUnit*)pAudioSbrUserData->m_pFirstAU;
+            if (M4OSA_NULL != pAU) {
+                M4OSA_free((M4OSA_MemAddr32)pAU);
+            }
+
+            if (M4OSA_NULL != pAudioSbrUserData->m_pAacDecoderUserConfig) {
+                M4OSA_free((M4OSA_MemAddr32)pAudioSbrUserData->\
+                    m_pAacDecoderUserConfig);
+            }
+            M4OSA_free((M4OSA_MemAddr32)pAudioSbrUserData);
+            pC->mAudioStreamHandler->m_pUserData = M4OSA_NULL;
+        }
+
+        if (pC->mAudioStreamHandler->m_pESDSInfo != M4OSA_NULL) {
+            M4OSA_free((M4OSA_MemAddr32)pC->mAudioStreamHandler->m_pESDSInfo);
+            pC->mAudioStreamHandler->m_pESDSInfo = M4OSA_NULL;
+            pC->mAudioStreamHandler->m_ESDSInfoSize = 0;
+        }
+        /* Finally destroy the stream handler */
+        M4OSA_free((M4OSA_MemAddr32)pC->mAudioStreamHandler);
+        pC->mAudioStreamHandler = M4OSA_NULL;
+
+        pC->mAudioSource->stop();
+        pC->mAudioSource.clear();
+    }
+    if (pC->mVideoStreamHandler) {
+        LOGV("VideoEditor3gpReader_close Video ");
+
+        if(M4OSA_NULL != pC->mVideoStreamHandler->m_pDecoderSpecificInfo) {
+            M4OSA_free((M4OSA_MemAddr32)pC->mVideoStreamHandler->\
+                m_pDecoderSpecificInfo);
+            pC->mVideoStreamHandler->m_decoderSpecificInfoSize = 0;
+            pC->mVideoStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
+        }
+
+        if(M4OSA_NULL != pC->mVideoStreamHandler->m_pH264DecoderSpecificInfo) {
+            M4OSA_free((M4OSA_MemAddr32)pC->mVideoStreamHandler->\
+                m_pH264DecoderSpecificInfo);
+            pC->mVideoStreamHandler->m_H264decoderSpecificInfoSize = 0;
+            pC->mVideoStreamHandler->m_pH264DecoderSpecificInfo = M4OSA_NULL;
+        }
+
+        if(pC->mVideoStreamHandler->m_pESDSInfo != M4OSA_NULL) {
+            M4OSA_free((M4OSA_MemAddr32)pC->mVideoStreamHandler->m_pESDSInfo);
+            pC->mVideoStreamHandler->m_pESDSInfo = M4OSA_NULL;
+            pC->mVideoStreamHandler->m_ESDSInfoSize = 0;
+        }
+
+        /* Finally destroy the stream handler */
+        M4OSA_free((M4OSA_MemAddr32)pC->mVideoStreamHandler);
+        pC->mVideoStreamHandler = M4OSA_NULL;
+
+        pC->mVideoSource->stop();
+        pC->mVideoSource.clear();
+    }
+    pC->mDataSource.clear();
+
+    LOGV("VideoEditor3gpReader_close end");
+    return err;
+}
+
+/**
+************************************************************************
+* @brief    get an option from the 3gp reader
+* @note     it allows the caller to retrieve a property value:
+*
+* @param    context:        (IN)    Context of the reader
+* @param    optionId:       (IN)    indicates the option to get
+* @param    pValue:         (OUT)   pointer to structure or value (allocated
+*                                   by user) where option is stored
+*
+* @return   M4NO_ERROR              there is no error
+* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
+* @return   M4ERR_PARAMETER         at least one parameter is not properly set
+* @return   M4ERR_BAD_OPTION_ID     when the option ID is not a valid one
+* @return   M4ERR_VIDEO_NOT_H263    No video stream H263 in file.
+* @return   M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET
+*           Function 3gpReader_getNextStreamHandler must be called before
+************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_getOption(M4OSA_Context context,
+        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditor3gpReader_getOption begin %d", optionId);
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getOption: invalid pointer on value");
+
+    switch (optionId) {
+    case M4READER_kOptionID_Duration:
+        {
+            LOGV("VideoEditor3gpReader_getOption duration %d",pC->mMaxDuration);
+            M4OSA_TIME_SET(*(M4OSA_Time*)pValue, pC->mMaxDuration);
+        }
+        break;
+    case M4READER_kOptionID_Version:
+        /* not used */
+        LOGV("VideoEditor3gpReader_getOption: M4READER_kOptionID_Version");
+        break;
+
+    case M4READER_kOptionID_Copyright:
+        /* not used */
+        LOGV(">>>>>>>   M4READER_kOptionID_Copyright");
+        break;
+
+    case M4READER_kOptionID_CreationTime:
+        /* not used */
+        LOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_CreationTime");
+    break;
+
+    case M4READER_kOptionID_Bitrate:
+        {
+            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+
+            if (pC->mMaxDuration != 0) {
+                M4OSA_UInt32 ui32Tmp = (M4OSA_UInt32)pC->mMaxDuration;
+                *pBitrate = (M4OSA_UInt32)((M4OSA_Double)pC->mFileSize * \
+                    8000.0 / (M4OSA_Double)ui32Tmp);
+                LOGV("3gpReader_getOption bitrate:  %d", *pBitrate);
+            }
+            *pBitrate = 384000; //check
+            LOGV("VideoEditor3gpReader_getOption bitrate %ld", *pBitrate);
+        }
+    break;
+    case M4READER_3GP_kOptionID_H263Properties:
+        {
+#if 0
+            if(M4OSA_NULL == pC->mVideoStreamHandler) {
+                LOGV("VideoEditor3gpReader_getOption no videoStream retrieved");
+
+                err = M4ERR_NO_VIDEO_STREAM_RETRIEVED_YET;
+                break;
+            }
+            if((M4DA_StreamTypeVideoH263 != pC->mVideoStreamHandler->\
+                mStreamType) || (pC->mVideoStreamHandler->\
+                m_decoderSpecificInfoSize < 7)) {
+                LOGV("VideoEditor3gpReader_getOption DSI Size %d",
+                    pC->mVideoStreamHandler->m_decoderSpecificInfoSize);
+
+                err = M4ERR_VIDEO_NOT_H263;
+                break;
+            }
+
+            /* MAGICAL in the decoder confi H263: the 7th byte is the profile
+             * number, 6th byte is the level number */
+            ((M4READER_3GP_H263Properties *)pValue)->uiProfile =
+                pC->mVideoStreamHandler->m_pDecoderSpecificInfo[6];
+            ((M4READER_3GP_H263Properties *)pValue)->uiLevel =
+                pC->mVideoStreamHandler->m_pDecoderSpecificInfo[5];
+#endif
+            LOGV("VideoEditor3gpReader_getOption M4READER_3GP_kOptionID_\
+            H263Properties end");
+        }
+        break;
+    case M4READER_3GP_kOptionID_PurpleLabsDrm:
+        LOGV("VideoEditor3gpReaderOption M4READER_3GP_kOptionID_PurpleLabsDrm");
+        /* not used */
+        break;
+
+    case M4READER_kOptionID_GetNumberOfAudioAu:
+        /* not used */
+        LOGV("VideoEditor3gpReadeOption M4READER_kOptionID_GetNumberOfAudioAu");
+    break;
+
+    case M4READER_kOptionID_GetNumberOfVideoAu:
+        /* not used */
+        LOGV("VideoEditor3gpReader_getOption :GetNumberOfVideoAu");
+    break;
+
+    case M4READER_kOptionID_GetMetadata:
+        /* not used */
+        LOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_GetMetadata");
+    break;
+
+    case M4READER_kOptionID_3gpFtypBox:
+        /* used only for SEMC */
+        LOGV("VideoEditor3gpReader_getOption M4READER_kOptionID_3gpFtypBox");
+        err = M4ERR_BAD_OPTION_ID; //check this
+        break;
+
+#ifdef OPTIONID_GET_NEXT_VIDEO_CTS
+    case M4READER_3GP_kOptionID_getNextVideoCTS:
+        /* not used */
+        LOGV("VideoEditor3gpReader_getOption: getNextVideoCTS");
+        break;
+#endif
+    default:
+        {
+            err = M4ERR_BAD_OPTION_ID;
+            LOGV("VideoEditor3gpReader_getOption M4ERR_BAD_OPTION_ID");
+        }
+        break;
+    }
+    LOGV("VideoEditor3gpReader_getOption end: optionID: x%x", optionId);
+    return err;
+}
+/**
+************************************************************************
+* @brief    set an option on the 3gp reader
+* @note No option can be set yet.
+* @param    context:        (IN)    Context of the reader
+* @param    optionId:       (IN)    indicates the option to set
+* @param    pValue:         (IN)    pointer to structure or value (allocated
+*                                   by user) where option is stored
+* @return   M4NO_ERROR              there is no error
+* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
+* @return   M4ERR_PARAMETER         at least one parameter is not properly set
+* @return   M4ERR_BAD_OPTION_ID     when the option ID is not a valid one
+************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_setOption(M4OSA_Context context,
+        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pC), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+        "invalid value pointer");
+
+    LOGV("VideoEditor3gpReader_setOption begin %d",optionId);
+
+    switch(optionId) {
+        case M4READER_kOptionID_SetOsaFileReaderFctsPtr:
+        break;
+
+        case M4READER_3GP_kOptionID_AudioOnly:
+        break;
+
+        case M4READER_3GP_kOptionID_VideoOnly:
+        break;
+
+        case M4READER_3GP_kOptionID_FastOpenMode:
+        break;
+
+        case M4READER_kOptionID_MaxMetadataSize:
+        break;
+
+        default:
+        {
+            LOGV("VideoEditor3gpReader_setOption: returns M4ERR_BAD_OPTION_ID");
+            err = M4ERR_BAD_OPTION_ID;
+        }
+        break;
+    }
+    LOGV("VideoEditor3gpReader_setOption end ");
+    return err;
+}
+/**
+ ************************************************************************
+ * @brief   fill the access unit structure with initialization values
+ * @param   context:        (IN)     Context of the reader
+ * @param   pStreamHandler: (IN)     pointer to the stream handler to which
+ *                                   the access unit will be associated
+ * @param   pAccessUnit:    (IN/OUT) pointer to the access unit (allocated
+ *                                   by the caller) to initialize
+ * @return  M4NO_ERROR               there is no error
+ * @return  M4ERR_PARAMETER          at least one parameter is not properly set
+ ************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_fillAuStruct(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err= M4NO_ERROR;
+
+    M4OSA_DEBUG_IF1((pC == 0),             M4ERR_PARAMETER,
+        "VideoEditor3gpReader_fillAuStruct: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_fillAuStruc invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+        "VideoEditor3gpReader_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+    LOGV("VideoEditor3gpReader_fillAuStruct begin");
+
+    /* Initialize pAccessUnit structure */
+    pAccessUnit->m_size         = 0;
+    pAccessUnit->m_CTS          = 0;
+    pAccessUnit->m_DTS          = 0;
+    pAccessUnit->m_attribute    = 0;
+    pAccessUnit->m_dataAddress  = M4OSA_NULL;
+    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
+    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
+    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
+
+    LOGV("VideoEditor3gpReader_fillAuStruct end");
+    return M4NO_ERROR;
+}
+
+/**
+********************************************************************************
+* @brief    jump into the stream at the specified time
+* @note
+* @param    context:        (IN)   Context of the reader
+* @param    pStreamHandler  (IN)   the stream handler of the stream to make jump
+* @param    pTime           (I/O)IN  the time to jump to (in ms)
+*                                OUT the time to which the stream really jumped
+* @return   M4NO_ERROR             there is no error
+* @return   M4ERR_PARAMETER        at least one parameter is not properly set
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_jump(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime) {
+    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_AccessUnit* pAu;
+    M4OSA_Time time64;
+    M4OSA_Double timeDouble;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_jump: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_jump: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_jump: invalid time pointer");
+
+    LOGV("VideoEditor3gpReader_jump begin");
+
+    if (*pTime == (pStreamHandler->m_duration)) {
+		*pTime -= 1;
+	}
+    M4OSA_INT64_FROM_INT32(time64, *pTime);
+
+    LOGV("VideoEditor3gpReader_jump time us %ld ", time64);
+
+    if ((pC->mAudioStreamHandler != M4OSA_NULL) &&
+            (pStreamHandler->m_streamId == pC->mAudioStreamHandler->m_streamId))
+            {
+        pAu = &pC->mAudioAu;
+        pAu->CTS = time64;
+        pAu->DTS = time64;
+
+        time64 = time64 * 1000; /* Convert the time into micro sec */
+        pC->mAudioSeeking = M4OSA_TRUE;
+        pC->mAudioSeekTime = time64;
+        LOGV("VideoEditor3gpReader_jump AUDIO time us %ld ", time64);
+    } else if ((pC->mVideoStreamHandler != M4OSA_NULL) &&
+            (pStreamHandler->m_streamId == pC->mVideoStreamHandler->m_streamId))
+            {
+        pAu = &pC->mVideoAu;
+        pAu->CTS = time64;
+        pAu->DTS = time64;
+
+        time64 = time64 * 1000; /* Convert the time into micro sec */
+        pC->mVideoSeeking = M4OSA_TRUE;
+        pC->mVideoSeekTime = time64;
+        LOGV("VideoEditor3gpReader_jump VIDEO time us %ld ", time64);
+    } else {
+        LOGV("VideoEditor3gpReader_jump passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+    time64 = time64 / 1000; /* Convert the time into milli sec */
+    LOGV("VideoEditor3gpReader_jump time ms before seekset %ld ", time64);
+
+    M4OSA_INT64_TO_DOUBLE(timeDouble, time64);
+    *pTime = (M4OSA_Int32)timeDouble;
+
+    LOGV("VideoEditor3gpReader_jump end");
+    err = M4NO_ERROR;
+    return err;
+}
+/**
+********************************************************************************
+* @brief    reset the stream, that is seek it to beginning and make it ready
+* @note
+* @param    context:        (IN)    Context of the reader
+* @param    pStreamHandler  (IN)    The stream handler of the stream to reset
+* @return   M4NO_ERROR              there is no error
+* @return   M4ERR_PARAMETER         at least one parameter is not properly set
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_reset(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler) {
+    VideoEditor3gpReader_Context* pC = (VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_StreamID streamIdArray[2];
+    M4SYS_AccessUnit* pAu;
+    M4OSA_Time time64;
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_reset: invalid pointer to M4_StreamHandler");
+
+    M4OSA_INT64_FROM_INT32(time64, 0);
+
+    LOGV("VideoEditor3gpReader_reset begin");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
+        pAu = &pC->mAudioAu;
+    } else if (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler) {
+        pAu = &pC->mVideoAu;
+    } else {
+        LOGV("VideoEditor3gpReader_reset passed StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    pAu->CTS = time64;
+    pAu->DTS = time64;
+
+    LOGV("VideoEditor3gpReader_reset end");
+    return err;
+}
+
+/**
+********************************************************************************
+* @brief  Gets an access unit (AU) from the stream handler source.
+* @note   An AU is the smallest possible amount of data to be decoded by decoder
+*
+* @param    context:        (IN) Context of the reader
+* @param    pStreamHandler  (IN) The stream handler of the stream to make jump
+* @param    pAccessUnit     (IO) Pointer to access unit to fill with read data
+* @return   M4NO_ERROR           there is no error
+* @return   M4ERR_PARAMETER      at least one parameter is not properly set
+* @returns  M4ERR_ALLOC          memory allocation failed
+* @returns  M4WAR_NO_MORE_AU     there are no more access unit in the stream
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_getNextAu(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+    VideoEditor3gpReader_Context* pC=(VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_AccessUnit* pAu;
+    int64_t tempTime64 = 0;
+    MediaBuffer *mMediaBuffer = NULL;
+    MediaSource::ReadOptions options;
+    M4OSA_Bool flag = M4OSA_FALSE;
+    status_t error;
+
+    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getNextAu: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getNextAu: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getNextAu: invalid pointer to M4_AccessUnit");
+
+    LOGV("VideoEditor3gpReader_getNextAu begin");
+
+    if (pStreamHandler == (M4_StreamHandler*)pC->mAudioStreamHandler) {
+        LOGV("VideoEditor3gpReader_getNextAu audio stream");
+        pAu = &pC->mAudioAu;
+        if (pC->mAudioSeeking == M4OSA_TRUE) {
+            LOGV("VideoEditor3gpReader_getNextAu audio seek time: %ld",
+                pC->mAudioSeekTime);
+            options.setSeekTo(pC->mAudioSeekTime);
+            pC->mAudioSource->read(&mMediaBuffer, &options);
+
+            mMediaBuffer->meta_data()->findInt64(kKeyTime,
+                (int64_t*)&tempTime64);
+            options.clearSeekTo();
+            pC->mAudioSeeking = M4OSA_FALSE;
+            flag = M4OSA_TRUE;
+        } else {
+            LOGV("VideoEditor3gpReader_getNextAu audio no seek:");
+            pC->mAudioSource->read(&mMediaBuffer, &options);
+            if (mMediaBuffer != NULL) {
+                mMediaBuffer->meta_data()->findInt64(kKeyTime,
+                    (int64_t*)&tempTime64);
+            }
+        }
+    } else if (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler) {
+        LOGV("VideoEditor3gpReader_getNextAu video steram ");
+        pAu = &pC->mVideoAu;
+        if(pC->mVideoSeeking == M4OSA_TRUE) {
+            flag = M4OSA_TRUE;
+            LOGV("VideoEditor3gpReader_getNextAu seek: %ld",pC->mVideoSeekTime);
+            options.setSeekTo(pC->mVideoSeekTime,
+                MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+            do
+            {
+                if (mMediaBuffer != NULL) {
+                    LOGV("VideoEditor3gpReader_getNextAu free the MediaBuffer");
+                    mMediaBuffer->release();
+                }
+                error = pC->mVideoSource->read(&mMediaBuffer, &options);
+                LOGV("VE3gpReader_getNextAu MediaBuffer %x , error %d",
+                    mMediaBuffer, error);
+                if (mMediaBuffer != NULL)
+                {
+                    mMediaBuffer->meta_data()->findInt64(kKeyTime,
+                        (int64_t*)&tempTime64);
+                } else {
+                    break;
+                }
+                options.clearSeekTo();
+            } while(tempTime64 < pC->mVideoSeekTime);
+
+            LOGV("VE3gpReader_getNextAu: video  time with seek  = %lld:",
+                tempTime64);
+            pC->mVideoSeeking = M4OSA_FALSE;
+        } else {
+            LOGV("VideoEditor3gpReader_getNextAu video no seek:");
+            pC->mVideoSource->read(&mMediaBuffer, &options);
+
+            if(mMediaBuffer != NULL) {
+                mMediaBuffer->meta_data()->findInt64(kKeyTime,
+                    (int64_t*)&tempTime64);
+                LOGV("VE3gpReader_getNextAu: video no seek time = %lld:",
+                    tempTime64);
+            }else {
+                LOGV("VE3gpReader_getNextAu:video no seek time buffer is NULL");
+            }
+        }
+    } else {
+        LOGV("VideoEditor3gpReader_getNextAu M4ERR_PARAMETER");
+        return M4ERR_PARAMETER;
+    }
+
+    if (mMediaBuffer != NULL) {
+        if( (pAu->dataAddress == NULL) ||  (pAu->size < \
+            mMediaBuffer->range_length())) {
+            if(pAu->dataAddress != NULL) {
+                M4OSA_free((M4OSA_Int32*)pAu->dataAddress);
+                pAu->dataAddress = NULL;
+            }
+            LOGV("Buffer lenght = %d ,%d",(mMediaBuffer->range_length() +\
+                3) & ~0x3,(mMediaBuffer->range_length()));
+
+            pAu->dataAddress = (M4OSA_Int32*)M4OSA_malloc(
+                (mMediaBuffer->range_length() + 3) & ~0x3,M4READER_3GP,
+                    (M4OSA_Char*)"pAccessUnit->m_dataAddress" );
+            if(pAu->dataAddress == NULL) {
+                LOGV("VideoEditor3gpReader_getNextAu malloc failed");
+                return M4ERR_ALLOC;
+            }
+        }
+        pAu->size = mMediaBuffer->range_length();
+
+        memcpy((M4OSA_MemAddr8)pAu->dataAddress,
+            (const char *)mMediaBuffer->data() + mMediaBuffer->range_offset(),
+            mMediaBuffer->range_length());
+
+        if( (pStreamHandler == (M4_StreamHandler*)pC->mVideoStreamHandler)  &&
+            (pStreamHandler->m_streamType == M4DA_StreamTypeVideoMpeg4Avc) ) {
+            M4OSA_UInt32 size = mMediaBuffer->range_length();
+            M4OSA_UInt8 *lbuffer;
+
+            lbuffer = (M4OSA_UInt8 *) pAu->dataAddress;
+            LOGV("pAccessUnit->m_dataAddress size = %x",size);
+
+            lbuffer[0] = (size >> 24) & 0xFF;
+            lbuffer[1] = (size >> 16) & 0xFF;
+            lbuffer[2] = (size >> 8) & 0xFF;
+            lbuffer[3] = (size) & 0xFF;
+        }
+
+        pAu->CTS = tempTime64;
+
+        pAu->CTS = pAu->CTS / 1000; //converting the microsec to millisec
+        LOGV("VideoEditor3gpReader_getNextAu CTS = %ld",pAu->CTS);
+
+        pAu->DTS  = pAu->CTS;
+        pAu->attribute = M4SYS_kFragAttrOk;
+        mMediaBuffer->release();
+
+        pAccessUnit->m_dataAddress = (M4OSA_Int8*) pAu->dataAddress;
+        pAccessUnit->m_size = pAu->size;
+        pAccessUnit->m_maxsize = pAu->size;
+        pAccessUnit->m_CTS = pAu->CTS;
+        pAccessUnit->m_DTS = pAu->DTS;
+        pAccessUnit->m_attribute = pAu->attribute;
+
+    } else {
+        LOGV("VideoEditor3gpReader_getNextAu: M4WAR_NO_MORE_AU (EOS) reached");
+        pAccessUnit->m_size = 0;
+        err = M4WAR_NO_MORE_AU;
+    }
+    options.clearSeekTo();
+
+    pAu->nbFrag = 0;
+    mMediaBuffer = NULL;
+    LOGV("VideoEditor3gpReader_getNextAu end ");
+
+    return err;
+}
+/**
+ *******************************************************************************
+ * @brief   Split the AVC DSI in its different components and write it in
+ *          ONE memory buffer
+ * @note
+ * @param   pStreamHandler:         (IN/OUT) The MPEG4-AVC stream
+ * @param   pDecoderConfigLocal:    (IN) The DSI buffer
+ * @param   decoderConfigSizeLocal: (IN) The DSI buffer size
+ * @return  M4NO_ERROR              there is no error
+ * @return  ERR_FILE_SYNTAX_ERROR   pDecoderConfigLocal is NULL
+ *******************************************************************************
+*/
+static M4OSA_ERR VideoEditor3gpReader_AnalyseAvcDsi(
+        M4_StreamHandler *pStreamHandler, M4OSA_Int32* pDecoderConfigLocal,
+        M4OSA_Int32 decoderConfigSizeLocal) {
+    struct _avcSpecificInfo *pAvcSpecInfo = M4OSA_NULL;
+    M4OSA_UInt32 uiSpecInfoSize;
+    M4OSA_Context pBitParserContext = M4OSA_NULL;
+    M4OSA_MemAddr8 pPos;
+
+    /**
+     * First parsing to get the total allocation size (we must not do
+     * multiple malloc, but only one instead) */
+    {
+        M4OSA_Int32 val;
+        M4OSA_UInt32 i,j;
+        M4OSA_UInt8 nalUnitLength;
+        M4OSA_UInt8  numOfSequenceParameterSets;
+        M4OSA_UInt32 uiTotalSizeOfSPS = 0;
+        M4OSA_UInt8  numOfPictureParameterSets;
+        M4OSA_UInt32 uiTotalSizeOfPPS = 0;
+        M4OSA_UInt32 uiSize;
+        struct _avcSpecificInfo avcSpIf;
+
+        avcSpIf.m_nalUnitLength = 0;
+
+        if (M4OSA_NULL == pDecoderConfigLocal) {
+            return M4ERR_READER3GP_DECODER_CONFIG_ERROR;
+        }
+
+        VideoEditor3gpReader_MPEG4BitStreamParserInit(&pBitParserContext,
+            pDecoderConfigLocal, decoderConfigSizeLocal);
+
+        if (M4OSA_NULL == pBitParserContext) {
+            return M4ERR_ALLOC;
+        }
+
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+                                       /* 8 bits -- configuration version */
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+                                       /* 8 bits -- avc profile indication*/
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+                                       /* 8 bits -- profile compatibility */
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+                                       /* 8 bits -- avc level indication*/
+        val=VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext, 8);
+                       /* 6 bits reserved 111111b 2 bits length Size minus one*/
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+                                       /* m_nalUnitLength */
+
+        nalUnitLength = (M4OSA_UInt8)((val & 0x03) + 1);/*0b11111100*/
+        if (nalUnitLength > 4) {
+            pStreamHandler->m_decoderSpecificInfoSize = 0;
+            pStreamHandler->m_pDecoderSpecificInfo = M4OSA_NULL;
+            VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+        } else {
+            /**
+             * SPS table */
+            val=VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext,
+            8);/* 3 bits-reserved 111b-5 bits number of sequence parameter set*/
+            numOfSequenceParameterSets = val & 0x1F;
+            /*1F instead of E0*/ /*0b11100000*/ /*Number of seq parameter sets*/
+            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            for (i=0; i < numOfSequenceParameterSets; i++) {
+                /**
+                 * Get the size of this element */
+                uiSize =
+                    (M4OSA_UInt32)VideoEditor3gpReader_BitStreamParserShowBits(
+                    pBitParserContext, 16);
+                uiTotalSizeOfSPS += uiSize;
+                VideoEditor3gpReader_BitStreamParserFlushBits(
+                    pBitParserContext, 16);
+                /**
+                 *Read the element(dont keep it, we only want size right now) */
+                for (j=0; j<uiSize; j++) {
+                    VideoEditor3gpReader_BitStreamParserFlushBits(
+                        pBitParserContext, 8);
+                }
+            }
+
+            /**
+             * SPS table */
+            numOfPictureParameterSets=(M4OSA_UInt8)\
+                VideoEditor3gpReader_BitStreamParserShowBits(pBitParserContext,
+                    8);
+            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            for (i=0; i < numOfPictureParameterSets; i++) {
+                /**
+                 * Get the size of this element */
+                uiSize = (M4OSA_UInt32)
+                    VideoEditor3gpReader_BitStreamParserShowBits(
+                    pBitParserContext, 16);
+                uiTotalSizeOfPPS += uiSize;
+                VideoEditor3gpReader_BitStreamParserFlushBits(
+                    pBitParserContext, 16);
+                /**
+                 *Read the element(dont keep it,we only want size right now)*/
+                for (j=0; j<uiSize; j++) {
+                    VideoEditor3gpReader_BitStreamParserFlushBits(
+                        pBitParserContext, 8);
+                }
+            }
+
+            /**
+             * Compute the size of the full buffer */
+            uiSpecInfoSize = sizeof(struct _avcSpecificInfo) +
+                     numOfSequenceParameterSets * sizeof(struct _parameterSet)
+                     + /**< size of the table of SPS elements */
+                     numOfPictureParameterSets  * sizeof(struct _parameterSet)
+                     + /**< size of the table of PPS elements */
+                     uiTotalSizeOfSPS +
+                     uiTotalSizeOfPPS;
+            /**
+             * Allocate the buffer */
+            pAvcSpecInfo =(struct _avcSpecificInfo*)M4OSA_malloc(uiSpecInfoSize,
+                M4READER_3GP, (M4OSA_Char*)"MPEG-4 AVC DecoderSpecific");
+            if (M4OSA_NULL == pAvcSpecInfo) {
+                VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+                return M4ERR_ALLOC;
+            }
+
+            /**
+             * Set the pointers to the correct part of the buffer */
+            pAvcSpecInfo->m_nalUnitLength = nalUnitLength;
+            pAvcSpecInfo->m_numOfSequenceParameterSets =
+                numOfSequenceParameterSets;
+            pAvcSpecInfo->m_numOfPictureParameterSets  =
+                numOfPictureParameterSets;
+
+            /* We place the SPS param sets table after m_pPictureParameterSet */
+            pAvcSpecInfo->m_pSequenceParameterSet= (struct _parameterSet*)(
+                (M4OSA_MemAddr8)(&pAvcSpecInfo->m_pPictureParameterSet) +
+                sizeof(pAvcSpecInfo->m_pPictureParameterSet));
+            /*We place the PPS param sets table after the SPS param sets table*/
+            pAvcSpecInfo->m_pPictureParameterSet = (struct _parameterSet*)(
+                (M4OSA_MemAddr8)(pAvcSpecInfo->m_pSequenceParameterSet) +
+                (numOfSequenceParameterSets * sizeof(struct _parameterSet)));
+            /**< The data will be placed after the PPS param sets table */
+            pPos = (M4OSA_MemAddr8)pAvcSpecInfo->m_pPictureParameterSet +
+                (numOfPictureParameterSets * sizeof(struct _parameterSet));
+
+            /**
+             * reset the bit parser */
+            VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+        }
+    }
+
+    /**
+     * Second parsing to copy the data */
+    if (M4OSA_NULL != pAvcSpecInfo) {
+        M4OSA_Int32 i,j;
+
+        VideoEditor3gpReader_MPEG4BitStreamParserInit(&pBitParserContext,
+            pDecoderConfigLocal, decoderConfigSizeLocal);
+
+        if (M4OSA_NULL == pBitParserContext) {
+            M4OSA_free((M4OSA_MemAddr32)pAvcSpecInfo);
+            return M4ERR_ALLOC;
+        }
+
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            /* 8 bits -- configuration version */
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            /* 8 bits -- avc profile indication*/
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            /* 8 bits -- profile compatibility */
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            /* 8 bits -- avc level indication*/
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            /* m_nalUnitLength */
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+        /* 3 bits -- reserved 111b -- 5 bits number of sequence parameter set*/
+
+        for (i=0; i < pAvcSpecInfo->m_numOfSequenceParameterSets; i++) {
+            pAvcSpecInfo->m_pSequenceParameterSet[i].m_length =
+                (M4OSA_UInt16)VideoEditor3gpReader_BitStreamParserShowBits(
+                pBitParserContext, 16);
+            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext,16);
+
+            pAvcSpecInfo->m_pSequenceParameterSet[i].m_pParameterSetUnit =
+                (M4OSA_UInt8*)pPos;  /**< current position in the buffer */
+            pPos += pAvcSpecInfo->m_pSequenceParameterSet[i].m_length;
+                /**< increment the position in the buffer */
+            for (j=0; j<pAvcSpecInfo->m_pSequenceParameterSet[i].m_length;j++){
+                pAvcSpecInfo->m_pSequenceParameterSet[i].m_pParameterSetUnit[j]=
+                    (M4OSA_UInt8)VideoEditor3gpReader_BitStreamParserShowBits(
+                    pBitParserContext, 8);
+                VideoEditor3gpReader_BitStreamParserFlushBits(
+                    pBitParserContext, 8);
+            }
+        }
+
+        VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext, 8);
+            /* number of pîcture parameter set*/
+
+        for (i=0; i < pAvcSpecInfo->m_numOfPictureParameterSets; i++) {
+            pAvcSpecInfo->m_pPictureParameterSet[i].m_length =
+                (M4OSA_UInt16)VideoEditor3gpReader_BitStreamParserShowBits(
+                pBitParserContext, 16);
+            VideoEditor3gpReader_BitStreamParserFlushBits(pBitParserContext,16);
+
+            pAvcSpecInfo->m_pPictureParameterSet[i].m_pParameterSetUnit =
+                (M4OSA_UInt8*)pPos;   /**< current position in the buffer */
+            pPos += pAvcSpecInfo->m_pPictureParameterSet[i].m_length;
+                /**< increment the position in the buffer */
+            for (j=0; j<pAvcSpecInfo->m_pPictureParameterSet[i].m_length; j++) {
+                pAvcSpecInfo->m_pPictureParameterSet[i].m_pParameterSetUnit[j] =
+                    (M4OSA_UInt8)VideoEditor3gpReader_BitStreamParserShowBits(
+                    pBitParserContext, 8);
+                VideoEditor3gpReader_BitStreamParserFlushBits(
+                    pBitParserContext, 8);
+            }
+        }
+        VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+        pStreamHandler->m_decoderSpecificInfoSize = uiSpecInfoSize;
+        pStreamHandler->m_pDecoderSpecificInfo = (M4OSA_UInt8*)pAvcSpecInfo;
+    }
+    pStreamHandler->m_H264decoderSpecificInfoSize  =  decoderConfigSizeLocal;
+    pStreamHandler->m_pH264DecoderSpecificInfo  = (M4OSA_UInt8*)M4OSA_malloc(
+        decoderConfigSizeLocal, M4READER_3GP,
+        (M4OSA_Char*)"MPEG-4 AVC DecoderSpecific");
+    if (M4OSA_NULL == pStreamHandler->m_pH264DecoderSpecificInfo) {
+        goto cleanup;
+    }
+
+    M4OSA_memcpy((M4OSA_MemAddr8 ) pStreamHandler->m_pH264DecoderSpecificInfo,
+        (M4OSA_MemAddr8 )pDecoderConfigLocal,
+        pStreamHandler->m_H264decoderSpecificInfoSize);
+    return M4NO_ERROR;
+cleanup:
+    VideoEditor3gpReader_BitStreamParserCleanUp(pBitParserContext);
+    return M4ERR_READER3GP_DECODER_CONFIG_ERROR;
+}
+/**
+********************************************************************************
+* @brief    Get the next stream found in the 3gp file
+* @note
+* @param    context:     (IN)    Context of the reader
+* @param    pMediaFamily: OUT)   pointer to a user allocated
+*                                M4READER_MediaFamily that will be filled
+*                                with the media family of the found stream
+* @param    pStreamHandler:(OUT) pointer to StreamHandler that will be allocated
+*                                and filled with the found stream description
+* @return   M4NO_ERROR              there is no error
+* @return   M4ERR_BAD_CONTEXT       provided context is not a valid one
+* @return   M4ERR_PARAMETER         at least one parameter is not properly set
+* @return   M4WAR_NO_MORE_STREAM    no more available stream in the media
+********************************************************************************
+*/
+M4OSA_ERR VideoEditor3gpReader_getNextStreamHandler(M4OSA_Context context,
+        M4READER_MediaFamily *pMediaFamily,
+        M4_StreamHandler **pStreamHandler) {
+    VideoEditor3gpReader_Context* pC=(VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_StreamID streamIdArray[2];
+    M4SYS_StreamDescription streamDesc;
+    M4_AudioStreamHandler* pAudioStreamHandler;
+    M4_VideoStreamHandler* pVideoStreamHandler;
+    M4OSA_Int8 *DecoderSpecificInfo = M4OSA_NULL;
+    M4OSA_Int32 decoderSpecificInfoSize =0, maxAUSize = 0;
+
+    M4_StreamType streamType = M4DA_StreamTypeUnknown;
+    M4OSA_UInt8 temp, i, trackCount;
+    M4OSA_Bool haveAudio = M4OSA_FALSE;
+    M4OSA_Bool haveVideo = M4OSA_FALSE;
+    sp<MetaData> meta  = NULL;
+    int64_t Duration = 0;
+    M4OSA_UInt8* DecoderSpecific = M4OSA_NULL ;
+    uint32_t type;
+    const void *data;
+    size_t size;
+    const void *codec_specific_data;
+    size_t codec_specific_data_size;
+    M4OSA_Int32  ptempTime;
+
+    LOGV("VideoEditor3gpReader_getNextStreamHandler begin");
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getNextStreamHandler: invalid context");
+    M4OSA_DEBUG_IF1((pMediaFamily   == 0), M4ERR_PARAMETER,
+        "getNextStreamHandler: invalid pointer to MediaFamily");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "getNextStreamHandler: invalid pointer to StreamHandler");
+
+    trackCount = pC->mExtractor->countTracks();
+    temp = pC->mCurrTrack;
+
+    if(temp >= trackCount) {
+        LOGV("VideoEditor3gpReader_getNextStreamHandler error = %d",
+            M4WAR_NO_MORE_STREAM);
+        return (M4WAR_NO_MORE_STREAM);
+    } else {
+        const char *mime;
+        meta = pC->mExtractor->getTrackMetaData(temp);
+        CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+        if (!haveVideo && !strncasecmp(mime, "video/", 6)) {
+            pC->mVideoSource = pC->mExtractor->getTrack(temp);
+            pC->mVideoSource->start();
+
+            *pMediaFamily = M4READER_kMediaFamilyVideo;
+            haveVideo = true;
+            LOGV("VideoEditor3gpReader_getNextStreamHandler getTrack called");
+            if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
+                streamType = M4DA_StreamTypeVideoMpeg4Avc;
+            } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_H263)) {
+                streamType = M4DA_StreamTypeVideoH263;
+            } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)) {
+                streamType = M4DA_StreamTypeVideoMpeg4;
+            } else {
+                LOGV("VideoEditor3gpReaderGetNextStreamHandler streamTypeNONE");
+            }
+            LOGV("VideoEditor3gpReader_getNextStreamHandler: stream type: %d ",
+                streamType);
+
+            if(streamType != M4DA_StreamTypeUnknown) {
+                pC->mStreamType = streamType;
+                pC->mStreamId = pC->mCurrTrack;
+
+                pVideoStreamHandler = (M4_VideoStreamHandler*)M4OSA_malloc
+                    (sizeof(M4_VideoStreamHandler), M4READER_3GP,
+                    (M4OSA_Char*)"M4_VideoStreamHandler");
+                if (M4OSA_NULL == pVideoStreamHandler) {
+                    return M4ERR_ALLOC;
+                }
+                pVideoStreamHandler->m_structSize=sizeof(M4_VideoStreamHandler);
+
+                meta->findInt32(kKeyWidth,
+                    (int32_t*)&(pVideoStreamHandler->m_videoWidth));
+                meta->findInt32(kKeyHeight,
+                    (int32_t*)&(pVideoStreamHandler->m_videoHeight));
+
+                (*pStreamHandler)  = (M4_StreamHandler*)(pVideoStreamHandler);
+                meta->findInt64(kKeyDuration,
+                    (int64_t*)&(Duration));
+                ((*pStreamHandler)->m_duration) =
+                    (int32_t)((Duration)/1000); // conversion to mS
+                pC->mMaxDuration = ((*pStreamHandler)->m_duration);
+                LOGV("VideoEditor3gpReader_getNextStreamHandler m_duration %d",
+                    (*pStreamHandler)->m_duration);
+
+                pC->mFileSize  = 0;
+
+                meta->findInt32(kKeyMaxInputSize, (int32_t*)&(maxAUSize));
+                if(maxAUSize == 0) {
+                    maxAUSize = 70000;
+                }
+                (*pStreamHandler)->m_maxAUSize = maxAUSize;
+                LOGV("<<<<<<<<<<   video: mMaxAUSize from MP4 extractor: %d",
+                    (*pStreamHandler)->m_maxAUSize);
+
+                //check this
+                pVideoStreamHandler->m_averageFrameRate = 15;
+                if( (M4DA_StreamTypeVideoH263       == streamType) ||
+                    (M4DA_StreamTypeVideoMpeg4Avc   == streamType)){
+                    ((M4_StreamHandler*)pVideoStreamHandler)->m_averageBitRate =
+                        384000;
+                }
+                pC->mVideoStreamHandler =
+                    (M4_StreamHandler*)(pVideoStreamHandler);
+
+                /* Get the DSI info */
+                if(M4DA_StreamTypeVideoH263 == streamType) {
+                    if (meta->findData(kKeyESDS, &type, &data, &size)) {
+                        ESDS esds((const char *)data, size);
+                        CHECK_EQ(esds.InitCheck(), OK);
+
+                        esds.getCodecSpecificInfo(
+                            &codec_specific_data, &codec_specific_data_size);
+                        (*pStreamHandler)->m_decoderSpecificInfoSize =
+                            codec_specific_data_size;
+                        if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+                            DecoderSpecific = (M4OSA_UInt8*)M4OSA_malloc(
+                                (*pStreamHandler)->m_decoderSpecificInfoSize,
+                                M4READER_3GP,(M4OSA_Char*)"H263 DSI");
+                            if (M4OSA_NULL == DecoderSpecific) {
+                                return M4ERR_ALLOC;
+                            }
+                            M4OSA_memcpy((M4OSA_MemAddr8)DecoderSpecific,
+                                (M4OSA_MemAddr8)codec_specific_data,
+                                codec_specific_data_size);
+                            (*pStreamHandler)->m_pDecoderSpecificInfo =
+                                DecoderSpecific;
+                        }
+                        else {
+                            (*pStreamHandler)->m_pDecoderSpecificInfo =
+                                M4OSA_NULL;
+                        }
+                    } else {
+                        LOGV("VE_getNextStreamHandler: H263 dsi not found");
+                        (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
+                        (*pStreamHandler)->m_decoderSpecificInfoSize = 0;
+                        (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
+                        (*pStreamHandler)->m_pH264DecoderSpecificInfo =
+                            M4OSA_NULL;
+                        (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
+                        (*pStreamHandler)->m_ESDSInfoSize = 0;
+                    }
+                }
+                else if(M4DA_StreamTypeVideoMpeg4Avc == streamType) {
+                    if(meta->findData(kKeyAVCC, &type, &data, &size)) {
+                        decoderSpecificInfoSize = size;
+                        if (decoderSpecificInfoSize != 0) {
+                            DecoderSpecificInfo = (M4OSA_Int8*)M4OSA_malloc(
+                                decoderSpecificInfoSize, M4READER_3GP,
+                                (M4OSA_Char*)"H264 DecoderSpecific" );
+                            if (M4OSA_NULL == DecoderSpecificInfo) {
+                                LOGV("VideoEditor3gp_getNextStream is NULL ");
+                                return M4ERR_ALLOC;
+                            }
+                            M4OSA_memcpy((M4OSA_MemAddr8)DecoderSpecificInfo,
+                                (M4OSA_MemAddr8)data, decoderSpecificInfoSize);
+                        } else {
+                            LOGV("DSI Size %d", decoderSpecificInfoSize);
+                            DecoderSpecificInfo = M4OSA_NULL;
+                        }
+                    }
+                    (*pStreamHandler)->m_pESDSInfo = M4OSA_NULL;
+                    (*pStreamHandler)->m_ESDSInfoSize = 0;
+
+                    err = VideoEditor3gpReader_AnalyseAvcDsi(*pStreamHandler,
+                    (M4OSA_Int32*)DecoderSpecificInfo, decoderSpecificInfoSize);
+
+                    if (M4NO_ERROR != err) {
+                        return err;
+                    }
+                    LOGV("decsize %d, h264decsize %d: %d", (*pStreamHandler)->\
+                        m_decoderSpecificInfoSize, (*pStreamHandler)->\
+                        m_H264decoderSpecificInfoSize);
+
+                    if(M4OSA_NULL != DecoderSpecificInfo) {
+                        M4OSA_free((M4OSA_MemAddr32)DecoderSpecificInfo);
+                        DecoderSpecificInfo = M4OSA_NULL;
+                    }
+                } else if( (M4DA_StreamTypeVideoMpeg4 == streamType) ) {
+                    if (meta->findData(kKeyESDS, &type, &data, &size)) {
+                        ESDS esds((const char *)data, size);
+                        CHECK_EQ(esds.InitCheck(), OK);
+
+                        (*pStreamHandler)->m_ESDSInfoSize = size;
+                        (*pStreamHandler)->m_pESDSInfo = (M4OSA_UInt8*)\
+                        M4OSA_malloc((*pStreamHandler)->m_ESDSInfoSize,
+                        M4READER_3GP, (M4OSA_Char*)"H263 DecoderSpecific" );
+                        if (M4OSA_NULL == (*pStreamHandler)->m_pESDSInfo) {
+                            return M4ERR_ALLOC;
+                        }
+                        M4OSA_memcpy((M4OSA_MemAddr8)(*pStreamHandler)->\
+                            m_pESDSInfo, (M4OSA_MemAddr8)data, size);
+
+                        esds.getCodecSpecificInfo(&codec_specific_data,
+                            &codec_specific_data_size);
+                        LOGV("VE MP4 dsisize: %d, %x", codec_specific_data_size,
+                            codec_specific_data);
+
+                        (*pStreamHandler)->m_decoderSpecificInfoSize =
+                            codec_specific_data_size;
+                        if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+                            DecoderSpecific = (M4OSA_UInt8*)M4OSA_malloc(
+                                (*pStreamHandler)->m_decoderSpecificInfoSize,
+                                M4READER_3GP, (M4OSA_Char*)" DecoderSpecific" );
+                            if (M4OSA_NULL == DecoderSpecific) {
+                                return M4ERR_ALLOC;
+                            }
+                            M4OSA_memcpy((M4OSA_MemAddr8)DecoderSpecific,
+                                (M4OSA_MemAddr8)codec_specific_data,
+                                codec_specific_data_size);
+                            (*pStreamHandler)->m_pDecoderSpecificInfo =
+                                DecoderSpecific;
+                        }
+                        else {
+                            (*pStreamHandler)->m_pDecoderSpecificInfo =
+                                M4OSA_NULL;
+                        }
+                        (*pStreamHandler)->m_pH264DecoderSpecificInfo =
+                            M4OSA_NULL;
+                        (*pStreamHandler)->m_H264decoderSpecificInfoSize = 0;
+                    }
+                } else {
+                    LOGV("VideoEditor3gpReader_getNextStream NO video stream");
+                    return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+                }
+            }
+            else {
+                LOGV("VideoEditor3gpReader_getNextStream NO video stream");
+                return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+            }
+
+        } else if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
+            LOGV("VideoEditor3gpReader_getNextStream audio getTrack called");
+            pC->mAudioSource = pC->mExtractor->getTrack(pC->mCurrTrack);
+            pC->mAudioSource->start();
+            *pMediaFamily = M4READER_kMediaFamilyAudio;
+
+            if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) {
+                streamType = M4DA_StreamTypeAudioAmrNarrowBand;
+            } else if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
+                streamType = M4DA_StreamTypeAudioAmrWideBand;
+            }
+            else if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
+                streamType = M4DA_StreamTypeAudioAac;
+            } else {
+                LOGV("VideoEditor3gpReader_getNextStrea streamtype Unknown ");
+            }
+            if(streamType != M4DA_StreamTypeUnknown) {
+                pC->mStreamType = streamType;
+                pC->mStreamId = pC->mCurrTrack;
+
+                LOGV("VE streamtype %d ,id %d",  streamType, pC->mCurrTrack);
+
+                pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_malloc
+                    (sizeof(M4_AudioStreamHandler), M4READER_3GP,
+                    (M4OSA_Char*)"M4_AudioStreamHandler");
+                if (M4OSA_NULL == pAudioStreamHandler) {
+                    return M4ERR_ALLOC;
+                }
+                pAudioStreamHandler->m_structSize=sizeof(M4_AudioStreamHandler);
+                pAudioStreamHandler->m_byteSampleSize   = 0;
+                pAudioStreamHandler->m_nbChannels       = 0;
+                pAudioStreamHandler->m_samplingFrequency= 0;
+                pAudioStreamHandler->m_byteFrameLength  = 0;
+
+                (*pStreamHandler) = (M4_StreamHandler*)(pAudioStreamHandler);
+                pC->mAudioStreamHandler =
+                    (M4_StreamHandler*)(pAudioStreamHandler);
+                (*pStreamHandler)->m_averageBitRate = 0;
+                haveAudio = true;
+                pC->mAudioStreamHandler=(M4_StreamHandler*)pAudioStreamHandler;
+                pC->mAudioStreamHandler->m_pESDSInfo = M4OSA_NULL;
+                pC->mAudioStreamHandler->m_ESDSInfoSize = 0;
+
+                meta->findInt32(kKeyMaxInputSize, (int32_t*)&(maxAUSize));
+                if(maxAUSize == 0) {
+                    maxAUSize = 70000;
+                }
+                (*pStreamHandler)->m_maxAUSize = maxAUSize;
+                LOGV("VE Audio mMaxAUSize from MP4 extractor: %d", maxAUSize);
+            }
+            if((M4DA_StreamTypeAudioAmrNarrowBand == streamType) ||
+                (M4DA_StreamTypeAudioAmrWideBand == streamType)) {
+                M4OSA_UInt32 freqIndex = 0; /**< AMR NB */
+                M4OSA_UInt32 modeSet;
+                M4OSA_UInt32 i;
+                M4OSA_Context pBitParserContext = M4OSA_NULL;
+
+                if(M4DA_StreamTypeAudioAmrWideBand == streamType) {
+                    freqIndex = 1; /**< AMR WB */
+                }
+
+                if (meta->findData(kKeyESDS, &type, &data, &size)) {
+                    ESDS esds((const char *)data, size);
+                    CHECK_EQ(esds.InitCheck(), OK);
+
+                    esds.getCodecSpecificInfo(&codec_specific_data,
+                        &codec_specific_data_size);
+                    (*pStreamHandler)->m_decoderSpecificInfoSize =
+                        codec_specific_data_size;
+
+                    if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+                        DecoderSpecific = (M4OSA_UInt8*)M4OSA_malloc(
+                            (*pStreamHandler)->m_decoderSpecificInfoSize,
+                            M4READER_3GP, (M4OSA_Char*)"H263 DecoderSpecific" );
+                        if (M4OSA_NULL == DecoderSpecific) {
+                            return M4ERR_ALLOC;
+                        }
+                        M4OSA_memcpy((M4OSA_MemAddr8)DecoderSpecific,
+                            (M4OSA_MemAddr8)codec_specific_data,
+                            codec_specific_data_size);
+                        (*pStreamHandler)->m_pDecoderSpecificInfo =
+                            DecoderSpecific;
+                    } else {
+                        (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
+                    }
+                } else {
+                    M4OSA_UChar AmrDsi[] =
+                        {'P','H','L','P',0x00, 0x00, 0x80, 0x00, 0x01,};
+                    (*pStreamHandler)->m_decoderSpecificInfoSize = 9;
+                    DecoderSpecific = (M4OSA_UInt8*)M4OSA_malloc(
+                        (*pStreamHandler)->m_decoderSpecificInfoSize,
+                        M4READER_3GP, (M4OSA_Char*)"H263 DecoderSpecific" );
+                    if (M4OSA_NULL == DecoderSpecific) {
+                        return M4ERR_ALLOC;
+                    }
+                    if(freqIndex ==0) {
+                        AmrDsi[8] = 0x01;
+                    } else {
+                        AmrDsi[8] = 0x02;
+                    }
+                    for(i = 0; i< 9; i++) {
+                        DecoderSpecific[i] = AmrDsi[i];
+                    }
+                    (*pStreamHandler)->m_pDecoderSpecificInfo = DecoderSpecific;
+                }
+                (*pStreamHandler)->m_averageBitRate =
+                    VideoEditor3gpReader_AmrBitRate[freqIndex][7];
+            } else if((M4DA_StreamTypeAudioAac == streamType)) {
+                if (meta->findData(kKeyESDS, &type, &data, &size)) {
+                    ESDS esds((const char *)data, size);
+                    CHECK_EQ(esds.InitCheck(), OK);
+
+                    (*pStreamHandler)->m_ESDSInfoSize = size;
+                    (*pStreamHandler)->m_pESDSInfo = (M4OSA_UInt8*)M4OSA_malloc(
+                        (*pStreamHandler)->m_ESDSInfoSize, M4READER_3GP,
+                        (M4OSA_Char*)"H263 DecoderSpecific" );
+                    if (M4OSA_NULL == (*pStreamHandler)->m_pESDSInfo) {
+                        return M4ERR_ALLOC;
+                    }
+                    M4OSA_memcpy((M4OSA_MemAddr8)(*pStreamHandler)->m_pESDSInfo,
+                    (M4OSA_MemAddr8)data, size);
+                    esds.getCodecSpecificInfo(&codec_specific_data,
+                        &codec_specific_data_size);
+
+                    LOGV("VEdsi %d,%x",codec_specific_data_size,
+                        codec_specific_data);
+
+                    (*pStreamHandler)->m_decoderSpecificInfoSize =
+                        codec_specific_data_size;
+                    if ((*pStreamHandler)->m_decoderSpecificInfoSize != 0) {
+                        DecoderSpecific = (M4OSA_UInt8*)M4OSA_malloc(
+                            (*pStreamHandler)->m_decoderSpecificInfoSize,
+                            M4READER_3GP, (M4OSA_Char*)"H263 DecoderSpecific" );
+                        if (M4OSA_NULL == DecoderSpecific) {
+                            return M4ERR_ALLOC;
+                        }
+                        M4OSA_memcpy((M4OSA_MemAddr8)DecoderSpecific,
+                            (M4OSA_MemAddr8)codec_specific_data,
+                            codec_specific_data_size);
+                        (*pStreamHandler)->m_pDecoderSpecificInfo =
+                            DecoderSpecific;
+                    } else {
+                        (*pStreamHandler)->m_pDecoderSpecificInfo = M4OSA_NULL;
+                    }
+                }
+            } else {
+                LOGV("VideoEditor3gpReader_getNextStream mStreamType: none ");
+                return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+            }
+        } else {
+            LOGV("VE noaudio-video stream:pC->mCurrTrack = %d ",pC->mCurrTrack);
+            pC->mCurrTrack++; //Increment current track to get the next track
+            return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+        }
+        LOGV("VE StreamType: %d, stremhandler %x",streamType, *pStreamHandler );
+        (*pStreamHandler)->m_streamType = streamType;
+        (*pStreamHandler)->m_streamId   = pC->mStreamId;
+        (*pStreamHandler)->m_pUserData  = M4OSA_NULL;
+        (*pStreamHandler)->m_structSize = sizeof(M4_StreamHandler);
+        (*pStreamHandler)->m_bStreamIsOK = M4OSA_TRUE;
+
+        meta->findInt64(kKeyDuration,
+            (int64_t*)&(Duration));
+
+        (*pStreamHandler)->m_duration = (int32_t)(Duration / 1000);
+
+        pC->mMaxDuration = ((*pStreamHandler)->m_duration);
+        LOGV("VE str duration duration: %d ", (*pStreamHandler)->m_duration);
+
+        /* In AAC case: Put the first AU in pAudioStreamHandler->m_pUserData
+         *since decoder has to know if stream contains SBR data(Implicit sig) */
+        if(M4DA_StreamTypeAudioAac == (*pStreamHandler)->m_streamType) {
+            M4READER_AudioSbrUserdata*  pAudioSbrUserdata;
+
+            pAudioSbrUserdata = (M4READER_AudioSbrUserdata*)M4OSA_malloc(
+                sizeof(M4READER_AudioSbrUserdata),M4READER_3GP,
+                (M4OSA_Char*)"M4READER_AudioSbrUserdata");
+            if (M4OSA_NULL == pAudioSbrUserdata) {
+                err = M4ERR_ALLOC;
+                goto Error;
+            }
+            (*pStreamHandler)->m_pUserData = pAudioSbrUserdata;
+            pAudioSbrUserdata->m_bIsSbrEnabled = M4OSA_FALSE;
+
+            pAudioSbrUserdata->m_pFirstAU = (M4_AccessUnit*)M4OSA_malloc(
+                sizeof(M4_AccessUnit),M4READER_3GP, (M4OSA_Char*)"1st AAC AU");
+            if (M4OSA_NULL == pAudioSbrUserdata->m_pFirstAU) {
+                pAudioSbrUserdata->m_pAacDecoderUserConfig = M4OSA_NULL;
+                err = M4ERR_ALLOC;
+                goto Error;
+            }
+            pAudioSbrUserdata->m_pAacDecoderUserConfig = (M4_AacDecoderConfig*)\
+                M4OSA_malloc(sizeof(M4_AacDecoderConfig),M4READER_3GP,
+                (M4OSA_Char*)"m_pAacDecoderUserConfig");
+            if (M4OSA_NULL == pAudioSbrUserdata->m_pAacDecoderUserConfig) {
+                err = M4ERR_ALLOC;
+                goto Error;
+            }
+        }
+        if(M4DA_StreamTypeAudioAac == (*pStreamHandler)->m_streamType) {
+            M4_AudioStreamHandler* pAudioStreamHandler =
+                (M4_AudioStreamHandler*)(*pStreamHandler);
+            M4READER_AudioSbrUserdata* pUserData = (M4READER_AudioSbrUserdata*)\
+                (pAudioStreamHandler->m_basicProperties.m_pUserData);
+
+            err = VideoEditor3gpReader_fillAuStruct(pC, (*pStreamHandler),
+                (M4_AccessUnit*)pUserData->m_pFirstAU);
+            if (M4NO_ERROR != err) {
+                goto Error;
+            }
+            err = VideoEditor3gpReader_getNextAu(pC, (*pStreamHandler),
+                (M4_AccessUnit*)pUserData->m_pFirstAU);
+            if (M4NO_ERROR != err) {
+                goto Error;
+            }
+            err = VideoEditor3gpReader_reset(pC, (*pStreamHandler));
+            if (M4NO_ERROR != err) {
+                goto Error;
+            }
+        }
+    }
+    pC->mCurrTrack++; //Increment the current track to get next track
+    LOGV("pC->mCurrTrack = %d",pC->mCurrTrack);
+
+    if (!haveAudio && !haveVideo) {
+        *pMediaFamily=M4READER_kMediaFamilyUnknown;
+        return M4ERR_READER_UNKNOWN_STREAM_TYPE;
+    }
+Error:
+    LOGV("VideoEditor3gpReader_getNextStreamHandler end error = %d",err);
+    return err;
+}
+
+M4OSA_ERR VideoEditor3gpReader_getPrevRapTime(M4OSA_Context context,
+    M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime)
+{
+    VideoEditor3gpReader_Context *pC = (VideoEditor3gpReader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    MediaBuffer *mMediaBuffer = M4OSA_NULL;
+    MediaSource::ReadOptions options;
+    M4OSA_Time time64;
+    int64_t tempTime64 = 0;
+    status_t error;
+
+    LOGV("VideoEditor3gpReader_getPrevRapTime begin");
+
+    M4OSA_DEBUG_IF1((pC == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getPrevRapTime: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getPrevRapTime invalid pointer to StreamHandler");
+    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
+        "VideoEditor3gpReader_getPrevRapTime: invalid time pointer");
+    if (*pTime == (pStreamHandler->m_duration)) {
+		*pTime -= 1;
+	}
+    M4OSA_INT64_FROM_INT32(time64, *pTime);
+    time64 = time64 * 1000;
+
+    LOGV("VideoEditor3gpReader_getPrevRapTime seek time: %ld",time64);
+    options.setSeekTo(time64, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC);
+    error = pC->mVideoSource->read(&mMediaBuffer, &options);
+    if (error != OK) {
+        //Can not get the previous Sync.
+        //Must be end of stream.
+		return M4WAR_NO_MORE_AU;
+    }
+
+    mMediaBuffer->meta_data()->findInt64(kKeyTime, (int64_t*)&tempTime64);
+    LOGV("VideoEditor3gpReader_getPrevRapTime read time %ld, %x", tempTime64,
+        mMediaBuffer);
+
+    (*pTime) =  (tempTime64) / 1000;
+
+    if(mMediaBuffer != M4OSA_NULL) {
+        LOGV(" mMediaBuffer size = %d length %d", mMediaBuffer->size(),
+            mMediaBuffer->range_length());
+        mMediaBuffer->release();
+        mMediaBuffer = M4OSA_NULL;
+    }
+    options.clearSeekTo();
+
+    if(error != OK) {
+        LOGV("VideoEditor3gpReader_getPrevRapTime end \
+            M4WAR_READER_INFORMATION_NOT_PRESENT");
+        return M4WAR_READER_INFORMATION_NOT_PRESENT;
+    } else {
+        LOGV("VideoEditor3gpReader_getPrevRapTime end: err %x", err);
+        err = M4NO_ERROR;
+        return err;
+    }
+}
+
+extern "C" {
+M4OSA_ERR VideoEditor3gpReader_getInterface(M4READER_MediaType *pMediaType,
+        M4READER_GlobalInterface **pRdrGlobalInterface,
+        M4READER_DataInterface **pRdrDataInterface) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pMediaType,      M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrGlobalInterface, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrDataInterface, M4ERR_PARAMETER);
+
+    LOGV("VideoEditor3gpReader_getInterface begin");
+    LOGV("VideoEditor3gpReader_getInterface %d 0x%x 0x%x", *pMediaType,
+        *pRdrGlobalInterface,*pRdrDataInterface);
+
+    SAFE_MALLOC(*pRdrGlobalInterface, M4READER_GlobalInterface, 1,
+        "VideoEditor3gpReader_getInterface");
+    SAFE_MALLOC(*pRdrDataInterface, M4READER_DataInterface, 1,
+        "VideoEditor3gpReader_getInterface");
+
+    *pMediaType = M4READER_kMediaType3GPP;
+
+    (*pRdrGlobalInterface)->m_pFctCreate       = VideoEditor3gpReader_create;
+    (*pRdrGlobalInterface)->m_pFctDestroy      = VideoEditor3gpReader_destroy;
+    (*pRdrGlobalInterface)->m_pFctOpen         = VideoEditor3gpReader_open;
+    (*pRdrGlobalInterface)->m_pFctClose        = VideoEditor3gpReader_close;
+    (*pRdrGlobalInterface)->m_pFctGetOption    = VideoEditor3gpReader_getOption;
+    (*pRdrGlobalInterface)->m_pFctSetOption    = VideoEditor3gpReader_setOption;
+    (*pRdrGlobalInterface)->m_pFctGetNextStream =
+        VideoEditor3gpReader_getNextStreamHandler;
+    (*pRdrGlobalInterface)->m_pFctFillAuStruct =
+        VideoEditor3gpReader_fillAuStruct;
+    (*pRdrGlobalInterface)->m_pFctStart        = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctStop         = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctJump         = VideoEditor3gpReader_jump;
+    (*pRdrGlobalInterface)->m_pFctReset        = VideoEditor3gpReader_reset;
+    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime =
+        VideoEditor3gpReader_getPrevRapTime;
+    (*pRdrDataInterface)->m_pFctGetNextAu      = VideoEditor3gpReader_getNextAu;
+    (*pRdrDataInterface)->m_readerContext      = M4OSA_NULL;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditor3gpReader_getInterface no error");
+    } else {
+        SAFE_FREE(*pRdrGlobalInterface);
+        SAFE_FREE(*pRdrDataInterface);
+
+        LOGV("VideoEditor3gpReader_getInterface ERROR 0x%X", err);
+    }
+    LOGV("VideoEditor3gpReader_getInterface end");
+    return err;
+}
+
+}  /* extern "C" */
+
+}  /* namespace android */
+
+
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp
new file mode 100755
index 0000000..2e88147
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioDecoder.cpp
@@ -0,0 +1,907 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorAudioDecoder.cpp
+* @brief  StageFright shell Audio Decoder
+*************************************************************************
+*/
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_AUDIODECODER"
+
+#include "M4OSA_Debug.h"
+#include "VideoEditorAudioDecoder.h"
+#include "VideoEditorUtils.h"
+#include "M4MCS_InternalTypes.h"
+
+#include "utils/Log.h"
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+
+/********************
+ *   DEFINITIONS    *
+ ********************/
+// Version
+#define VIDEOEDITOR_AUDIO_DECODER_VERSION_MAJOR 1
+#define VIDEOEDITOR_AUDIO_DECODER_VERSION_MINOR 0
+#define VIDEOEDITOR_AUDIO_DECODER_VERSION_REV   0
+
+// Force using software decoder as engine does not support prefetch
+#define VIDEOEDITOR_FORCECODEC kSoftwareCodecsOnly
+
+namespace android {
+
+struct VideoEditorAudioDecoderSource : public MediaSource {
+    public:
+        static sp<VideoEditorAudioDecoderSource> Create(
+                const sp<MetaData>& format);
+        virtual status_t start(MetaData *params = NULL);
+        virtual status_t stop();
+        virtual sp<MetaData> getFormat();
+        virtual status_t read(MediaBuffer **buffer,
+        const ReadOptions *options = NULL);
+        virtual int32_t storeBuffer(MediaBuffer *buffer);
+
+    protected:
+        virtual ~VideoEditorAudioDecoderSource();
+
+    private:
+        struct MediaBufferChain {
+            MediaBuffer* buffer;
+            MediaBufferChain* nextLink;
+        };
+        enum State {
+            CREATED,
+            STARTED,
+            ERROR
+        };
+        VideoEditorAudioDecoderSource(const sp<MetaData>& format);
+        sp<MetaData> mFormat;
+        MediaBufferChain* mFirstBufferLink;
+        MediaBufferChain* mLastBufferLink;
+        int32_t mNbBuffer;
+        bool mIsEOS;
+        State mState;
+};
+
+sp<VideoEditorAudioDecoderSource> VideoEditorAudioDecoderSource::Create(
+        const sp<MetaData>& format) {
+
+    sp<VideoEditorAudioDecoderSource> aSource =
+        new VideoEditorAudioDecoderSource(format);
+
+    return aSource;
+}
+
+VideoEditorAudioDecoderSource::VideoEditorAudioDecoderSource(
+        const sp<MetaData>& format):
+        mFormat(format),
+        mFirstBufferLink(NULL),
+        mLastBufferLink(NULL),
+        mNbBuffer(0),
+        mIsEOS(false),
+        mState(CREATED) {
+}
+
+VideoEditorAudioDecoderSource::~VideoEditorAudioDecoderSource() {
+
+    if( STARTED == mState ) {
+        stop();
+    }
+}
+
+status_t VideoEditorAudioDecoderSource::start(MetaData *meta) {
+    status_t err = OK;
+
+    if( CREATED != mState ) {
+        LOGV("VideoEditorAudioDecoderSource::start: invalid state %d", mState);
+        return UNKNOWN_ERROR;
+    }
+
+    mState = STARTED;
+
+cleanUp:
+    LOGV("VideoEditorAudioDecoderSource::start END (0x%x)", err);
+    return err;
+}
+
+status_t VideoEditorAudioDecoderSource::stop() {
+    status_t err = OK;
+    int32_t i = 0;
+
+    LOGV("VideoEditorAudioDecoderSource::stop begin");
+
+    if( STARTED != mState ) {
+        LOGV("VideoEditorAudioDecoderSource::stop: invalid state %d", mState);
+        return UNKNOWN_ERROR;
+    }
+
+    // Release the buffer chain
+    MediaBufferChain* tmpLink = NULL;
+    while( mFirstBufferLink ) {
+        i++;
+        tmpLink = mFirstBufferLink;
+        mFirstBufferLink = mFirstBufferLink->nextLink;
+        delete tmpLink;
+    }
+    LOGV("VideoEditorAudioDecoderSource::stop : %d buffer remained", i);
+    mFirstBufferLink = NULL;
+    mLastBufferLink = NULL;
+
+    mState = CREATED;
+
+    LOGV("VideoEditorAudioDecoderSource::stop END (0x%x)", err);
+    return err;
+}
+
+sp<MetaData> VideoEditorAudioDecoderSource::getFormat() {
+
+    LOGV("VideoEditorAudioDecoderSource::getFormat");
+    return mFormat;
+}
+
+status_t VideoEditorAudioDecoderSource::read(MediaBuffer **buffer,
+        const ReadOptions *options) {
+    MediaSource::ReadOptions readOptions;
+    status_t err = OK;
+    MediaBufferChain* tmpLink = NULL;
+
+    LOGV("VideoEditorAudioDecoderSource::read begin");
+
+    if ( STARTED != mState ) {
+        LOGV("VideoEditorAudioDecoderSource::read invalid state %d", mState);
+        return UNKNOWN_ERROR;
+    }
+
+    // Get a buffer from the chain
+    if( NULL == mFirstBufferLink ) {
+        *buffer = NULL;
+        if( mIsEOS ) {
+            LOGV("VideoEditorAudioDecoderSource::read : EOS");
+            return ERROR_END_OF_STREAM;
+        } else {
+            LOGV("VideoEditorAudioDecoderSource::read : no buffer available");
+            return UNKNOWN_ERROR;
+        }
+    }
+    *buffer = mFirstBufferLink->buffer;
+
+    tmpLink = mFirstBufferLink;
+    mFirstBufferLink = mFirstBufferLink->nextLink;
+    if( NULL == mFirstBufferLink ) {
+        mLastBufferLink = NULL;
+    }
+    delete tmpLink;
+    mNbBuffer--;
+
+    LOGV("VideoEditorAudioDecoderSource::read END (0x%x)", err);
+    return err;
+}
+
+int32_t VideoEditorAudioDecoderSource::storeBuffer(MediaBuffer *buffer) {
+    status_t err = OK;
+
+    LOGV("VideoEditorAudioDecoderSource::storeBuffer begin");
+
+    // A NULL input buffer means that the end of stream was reached
+    if( NULL == buffer ) {
+        mIsEOS = true;
+    } else {
+        MediaBufferChain* newLink = new MediaBufferChain;
+        newLink->buffer = buffer;
+        newLink->nextLink = NULL;
+        if( NULL != mLastBufferLink ) {
+            mLastBufferLink->nextLink = newLink;
+        } else {
+            mFirstBufferLink = newLink;
+        }
+        mLastBufferLink = newLink;
+        mNbBuffer++;
+    }
+    LOGV("VideoEditorAudioDecoderSource::storeBuffer END");
+    return mNbBuffer;
+}
+
+/********************
+ *      TOOLS       *
+ ********************/
+
+M4OSA_ERR VideoEditorAudioDecoder_getBits(M4OSA_Int8* pData,
+        M4OSA_UInt32 dataSize, M4OSA_UInt8 nbBits, M4OSA_Int32* pResult,
+        M4OSA_UInt32* pOffset) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 startByte = 0;
+    M4OSA_UInt32 startBit = 0;
+    M4OSA_UInt32 endByte = 0;
+    M4OSA_UInt32 endBit = 0;
+    M4OSA_UInt32 currentByte = 0;
+    M4OSA_UInt32 result = 0;
+    M4OSA_UInt32 ui32Tmp = 0;
+    M4OSA_UInt32 ui32Mask = 0;
+
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pData, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pOffset, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(32 >= nbBits, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK((*pOffset + nbBits) <= 8*dataSize, M4ERR_PARAMETER);
+
+    LOGV("VideoEditorAudioDecoder_getBits begin");
+
+    startByte   = (*pOffset) >> 3;
+    endByte     = (*pOffset + nbBits) >> 3;
+    startBit    = (*pOffset) % 8;
+    endBit      = (*pOffset + nbBits) % 8;
+    currentByte = startByte;
+
+    // Extract the requested nunber of bits from memory
+    while( currentByte <= endByte) {
+        ui32Mask = 0x000000FF;
+        if( currentByte == startByte ) {
+            ui32Mask >>= startBit;
+        }
+        ui32Tmp = ui32Mask & ((M4OSA_UInt32)pData[currentByte]);
+        if( currentByte == endByte ) {
+            ui32Tmp >>= (8-endBit);
+            result <<= endBit;
+        } else {
+            result <<= 8;
+        }
+        result |= ui32Tmp;
+        currentByte++;
+    }
+
+    *pResult = result;
+    *pOffset += nbBits;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_getBits no error");
+    } else {
+        LOGV("VideoEditorAudioDecoder_getBits ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_getBits end");
+    return err;
+}
+
+
+#define FREQ_TABLE_SIZE 16
+const M4OSA_UInt32 AD_AAC_FREQ_TABLE[FREQ_TABLE_SIZE] =
+    {96000, 88200, 64000, 48000, 44100,
+    32000, 24000, 22050, 16000, 12000, 11025, 8000, 7350, 0, 0, 0};
+
+
+M4OSA_ERR VideoEditorAudioDecoder_parse_AAC_DSI(M4OSA_Int8* pDSI,
+        M4OSA_UInt32 dsiSize, AAC_DEC_STREAM_PROPS* pProperties) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_UInt32 offset = 0;
+    M4OSA_Int32 result = 0;
+
+    LOGV("VideoEditorAudioDecoder_parse_AAC_DSI begin");
+
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pDSI, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pProperties, M4ERR_PARAMETER);
+
+    // Get the object type
+    err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 5, &result, &offset);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+    switch( result ) {
+        case 2:
+            pProperties->aPSPresent  = 0;
+            pProperties->aSBRPresent = 0;
+            break;
+        default:
+            LOGV("parse_AAC_DSI ERROR : object type %d is not supported",
+                result);
+            VIDEOEDITOR_CHECK(!"invalid AAC object type", M4ERR_BAD_OPTION_ID);
+            break;
+    }
+    pProperties->aAudioObjectType = (M4OSA_Int32)result;
+
+    // Get the frequency index
+    err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 4, &result, &offset);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+    VIDEOEDITOR_CHECK((0 <= result) && (FREQ_TABLE_SIZE > result),
+        M4ERR_PARAMETER);
+    pProperties->aSampFreq = AD_AAC_FREQ_TABLE[result];
+    pProperties->aExtensionSampFreq = 0;
+
+    // Get the number of channels
+    err = VideoEditorAudioDecoder_getBits(pDSI, dsiSize, 4, &result, &offset);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+    pProperties->aNumChan = (M4OSA_UInt32)result;
+
+    // Set the max PCM samples per channel
+    pProperties->aMaxPCMSamplesPerCh = (pProperties->aSBRPresent) ? 2048 : 1024;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_parse_AAC_DSI no error");
+    } else {
+        LOGV("VideoEditorAudioDecoder_parse_AAC_DSI ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_parse_AAC_DSI end");
+    return err;
+}
+
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+
+/**
+ ******************************************************************************
+ * structure VideoEditorAudioDecoder_Context
+ * @brief    This structure defines the context of the StageFright audio decoder
+ *           shell
+ ******************************************************************************
+*/
+typedef struct {
+    M4AD_Type                          mDecoderType;
+    M4_AudioStreamHandler*             mAudioStreamHandler;
+    sp<VideoEditorAudioDecoderSource>  mDecoderSource;
+    OMXClient                          mClient;
+    sp<MediaSource>                    mDecoder;
+    int32_t                            mNbOutputChannels;
+    uint32_t                           mNbInputFrames;
+    uint32_t                           mNbOutputFrames;
+} VideoEditorAudioDecoder_Context;
+
+M4OSA_ERR VideoEditorAudioDecoder_destroy(M4AD_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+
+    LOGV("VideoEditorAudioDecoder_destroy begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+    // Stop the graph
+    if( M4OSA_NULL != pDecoderContext->mDecoder.get() ) {
+        pDecoderContext->mDecoder->stop();
+    }
+
+    // Destroy the graph
+    pDecoderContext->mDecoderSource.clear();
+    pDecoderContext->mDecoder.clear();
+    pDecoderContext->mClient.disconnect();
+
+    SAFE_FREE(pDecoderContext);
+    pContext = M4OSA_NULL;
+    LOGV("VideoEditorAudioDecoder_destroy : DONE");
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_destroy no error");
+    } else {
+        LOGV("VideoEditorAudioDecoder_destroy ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_destroy : end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_create(M4AD_Type decoderType,
+        M4AD_Context* pContext, M4_AudioStreamHandler* pStreamHandler,
+        void* pUserData) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+    AAC_DEC_STREAM_PROPS aacProperties;
+    status_t result = OK;
+    sp<MetaData> decoderMetaData = NULL;
+    const char* mime = NULL;
+    uint32_t codecFlags = 0;
+
+    LOGV("VideoEditorAudioDecoder_create begin: decoderType %d", decoderType);
+
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,       M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler, M4ERR_PARAMETER);
+
+    // Context allocation & initialization
+    SAFE_MALLOC(pDecoderContext, VideoEditorAudioDecoder_Context, 1,
+        "AudioDecoder");
+    pDecoderContext->mDecoderType = decoderType;
+    pDecoderContext->mAudioStreamHandler = pStreamHandler;
+
+    pDecoderContext->mNbInputFrames  = 0;
+    pDecoderContext->mNbOutputFrames = 0;
+
+    LOGV("VideoEditorAudioDecoder_create : maxAUSize %d",
+        pDecoderContext->mAudioStreamHandler->m_basicProperties.m_maxAUSize);
+
+    // Create the meta data for the decoder
+    decoderMetaData = new MetaData;
+    switch( pDecoderContext->mDecoderType ) {
+        case M4AD_kTypeAMRNB:
+            // StageFright parameters
+            mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
+            // Engine parameters
+            pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 160;
+            pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 16;
+            pDecoderContext->mAudioStreamHandler->m_samplingFrequency = 8000;
+            pDecoderContext->mAudioStreamHandler->m_nbChannels = 1;
+            break;
+
+        case M4AD_kTypeAMRWB:
+            // StageFright parameters
+            mime = MEDIA_MIMETYPE_AUDIO_AMR_WB;
+
+            pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 160;
+            pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 16;
+            pDecoderContext->mAudioStreamHandler->m_samplingFrequency = 16000;
+            pDecoderContext->mAudioStreamHandler->m_nbChannels = 1;
+            break;
+
+        case M4AD_kTypeAAC:
+            // Reject ADTS & ADIF (or any incorrect type)
+            VIDEOEDITOR_CHECK(M4DA_StreamTypeAudioAac ==
+                pDecoderContext->mAudioStreamHandler->\
+                m_basicProperties.m_streamType,M4ERR_PARAMETER);
+
+            // StageFright parameters
+            mime = MEDIA_MIMETYPE_AUDIO_AAC;
+
+            decoderMetaData->setData(kKeyESDS, kTypeESDS,
+                pStreamHandler->m_basicProperties.m_pESDSInfo,
+                pStreamHandler->m_basicProperties.m_ESDSInfoSize);
+
+            // Engine parameters
+            // Retrieve sampling frequency and number of channels from the DSI
+            err = VideoEditorAudioDecoder_parse_AAC_DSI(
+                (M4OSA_Int8*)pStreamHandler->m_basicProperties.\
+                    m_pDecoderSpecificInfo,
+                pStreamHandler->m_basicProperties.m_decoderSpecificInfoSize,
+                &aacProperties);
+
+            VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+            pDecoderContext->mAudioStreamHandler->m_byteFrameLength = 1024;
+            pDecoderContext->mAudioStreamHandler->m_byteSampleSize = 16;
+            pDecoderContext->mAudioStreamHandler->m_samplingFrequency =
+                aacProperties.aSampFreq;
+            pDecoderContext->mAudioStreamHandler->m_nbChannels =
+                aacProperties.aNumChan;
+
+            // Copy the stream properties into userdata
+            if( M4OSA_NULL != pUserData ) {
+                M4OSA_memcpy((M4OSA_MemAddr8)pUserData,
+                    (M4OSA_MemAddr8)&aacProperties,
+                    sizeof(AAC_DEC_STREAM_PROPS));
+            }
+            break;
+
+        case M4AD_kTypeMP3:
+            // StageFright parameters
+            mime = MEDIA_MIMETYPE_AUDIO_MPEG;
+            break;
+
+        default:
+            VIDEOEDITOR_CHECK(!"AudioDecoder_open : incorrect input format",
+                M4ERR_STATE);
+            break;
+    }
+    decoderMetaData->setCString(kKeyMIMEType, mime);
+    decoderMetaData->setInt32(kKeySampleRate,
+        (int32_t)pDecoderContext->mAudioStreamHandler->m_samplingFrequency);
+    decoderMetaData->setInt32(kKeyChannelCount,
+        pDecoderContext->mAudioStreamHandler->m_nbChannels);
+    decoderMetaData->setInt64(kKeyDuration,
+        (int64_t)pDecoderContext->mAudioStreamHandler->\
+        m_basicProperties.m_duration);
+
+    // Create the decoder source
+    pDecoderContext->mDecoderSource = VideoEditorAudioDecoderSource::Create(
+        decoderMetaData);
+    VIDEOEDITOR_CHECK(NULL != pDecoderContext->mDecoderSource.get(),
+        M4ERR_STATE);
+
+    // Connect to the OMX client
+    result = pDecoderContext->mClient.connect();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Create the OMX codec
+#ifdef VIDEOEDITOR_FORCECODEC
+    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+#endif /* VIDEOEDITOR_FORCECODEC */
+
+    pDecoderContext->mDecoder = OMXCodec::Create(pDecoderContext->\
+        mClient.interface(),
+        decoderMetaData, false, pDecoderContext->mDecoderSource, NULL,
+            codecFlags);
+    VIDEOEDITOR_CHECK(NULL != pDecoderContext->mDecoder.get(), M4ERR_STATE);
+
+    // Get the output channels, the decoder might overwrite the input metadata
+    pDecoderContext->mDecoder->getFormat()->findInt32(kKeyChannelCount,
+        &pDecoderContext->mNbOutputChannels);
+    LOGV("VideoEditorAudioDecoder_create : output chan %d",
+        pDecoderContext->mNbOutputChannels);
+
+    // Start the decoder
+    result = pDecoderContext->mDecoder->start();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    *pContext = pDecoderContext;
+    LOGV("VideoEditorAudioDecoder_create : DONE");
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_create no error");
+    } else {
+        VideoEditorAudioDecoder_destroy(pDecoderContext);
+        *pContext = M4OSA_NULL;
+        LOGV("VideoEditorAudioDecoder_create ERROR 0x%X", err);
+    }
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_create_AAC(M4AD_Context* pContext,
+        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+    return VideoEditorAudioDecoder_create(
+        M4AD_kTypeAAC, pContext, pStreamHandler,pUserData);
+}
+
+
+M4OSA_ERR VideoEditorAudioDecoder_create_AMRNB(M4AD_Context* pContext,
+        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+    return VideoEditorAudioDecoder_create(
+        M4AD_kTypeAMRNB, pContext, pStreamHandler, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorAudioDecoder_create_AMRWB(M4AD_Context* pContext,
+        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+    return VideoEditorAudioDecoder_create(
+        M4AD_kTypeAMRWB, pContext, pStreamHandler, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorAudioDecoder_create_MP3(M4AD_Context* pContext,
+        M4_AudioStreamHandler* pStreamHandler, void* pUserData) {
+
+    return VideoEditorAudioDecoder_create(
+        M4AD_kTypeMP3, pContext, pStreamHandler, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_processInputBuffer(
+        M4AD_Context pContext, M4AD_Buffer* pInputBuffer) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+    MediaBuffer* buffer = NULL;
+    int32_t nbBuffer = 0;
+
+    LOGV("VideoEditorAudioDecoder_processInputBuffer begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+
+    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+    if( M4OSA_NULL != pInputBuffer ) {
+        buffer = new MediaBuffer((size_t)pInputBuffer->m_bufferSize);
+        M4OSA_memcpy((M4OSA_Int8*)buffer->data() + buffer->range_offset(),
+            pInputBuffer->m_dataAddress, pInputBuffer->m_bufferSize);
+    }
+    nbBuffer = pDecoderContext->mDecoderSource->storeBuffer(buffer);
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_processInputBuffer no error");
+    } else {
+        LOGV("VideoEditorAudioDecoder_processInputBuffer ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_processInputBuffer end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_processOutputBuffer(M4AD_Context pContext,
+        MediaBuffer* buffer, M4AD_Buffer* pOuputBuffer) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+    int32_t i32Tmp = 0;
+    int64_t i64Tmp = 0;
+    status_t result = OK;
+
+    LOGV("VideoEditorAudioDecoder_processOutputBuffer begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != buffer, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pOuputBuffer, M4ERR_PARAMETER);
+
+    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+    // Process the returned data
+    if( 0 == buffer->range_length() ) {
+        // Decoder has no data yet, nothing unusual
+        goto cleanUp;
+    }
+
+    pDecoderContext->mNbOutputFrames++;
+
+    if( pDecoderContext->mAudioStreamHandler->m_nbChannels ==
+        (M4OSA_UInt32)pDecoderContext->mNbOutputChannels ) {
+        // Just copy the PCMs
+        pOuputBuffer->m_bufferSize = (M4OSA_UInt32)buffer->range_length();
+        M4OSA_memcpy(pOuputBuffer->m_dataAddress,
+            ((M4OSA_MemAddr8)buffer->data())+buffer->range_offset(),
+            buffer->range_length());
+    } else if( pDecoderContext->mAudioStreamHandler->m_nbChannels <
+        (M4OSA_UInt32)pDecoderContext->mNbOutputChannels ) {
+        // The decoder forces stereo output, downsample
+        pOuputBuffer->m_bufferSize = (M4OSA_UInt32)(buffer->range_length()/2);
+        M4OSA_Int16* pDataIn  = ((M4OSA_Int16*)buffer->data()) +
+            buffer->range_offset();
+        M4OSA_Int16* pDataOut = (M4OSA_Int16*)pOuputBuffer->m_dataAddress;
+        M4OSA_Int16* pDataEnd = pDataIn + \
+            (buffer->range_length()/sizeof(M4OSA_Int16));
+        while( pDataIn < pDataEnd ) {
+            *pDataOut = *pDataIn;
+            pDataIn+=2;
+            pDataOut++;
+        }
+    } else {
+        // The decoder forces mono output, not supported
+        VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+    }
+
+cleanUp:
+    // Release the buffer
+    buffer->release();
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_processOutputBuffer no error");
+    } else {
+        pOuputBuffer->m_bufferSize = 0;
+        LOGV("VideoEditorAudioDecoder_processOutputBuffer ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_processOutputBuffer end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_step(M4AD_Context pContext,
+        M4AD_Buffer* pInputBuffer, M4AD_Buffer* pOutputBuffer,
+        M4OSA_Bool bJump) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+    status_t result = OK;
+    MediaBuffer* outputBuffer = NULL;
+
+    LOGV("VideoEditorAudioDecoder_step begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+    pDecoderContext->mNbInputFrames++;
+
+    // Push the input buffer to the decoder source
+    err = VideoEditorAudioDecoder_processInputBuffer(pDecoderContext,
+        pInputBuffer);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+    // Read
+    result = pDecoderContext->mDecoder->read(&outputBuffer, NULL);
+    if(OK != result) {
+        LOGE("VideoEditorAudioDecoder_step  result = %d",result);
+
+    }
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Convert the PCM buffer
+    err = VideoEditorAudioDecoder_processOutputBuffer(pDecoderContext,
+        outputBuffer, pOutputBuffer);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_step no error");
+    } else {
+        LOGV("VideoEditorAudioDecoder_step ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_step end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getVersion(M4_VersionInfo* pVersionInfo) {
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditorAudioDecoder_getVersion begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pVersionInfo, M4ERR_PARAMETER);
+
+    pVersionInfo->m_major      = VIDEOEDITOR_AUDIO_DECODER_VERSION_MAJOR;
+    pVersionInfo->m_minor      = VIDEOEDITOR_AUDIO_DECODER_VERSION_MINOR;
+    pVersionInfo->m_revision   = VIDEOEDITOR_AUDIO_DECODER_VERSION_REV;
+    pVersionInfo->m_structSize = sizeof(M4_VersionInfo);
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_getVersion no error");
+    } else {
+        LOGV("VideoEditorAudioDecoder_getVersion ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_getVersion end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_setOption(M4AD_Context pContext,
+        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+
+    LOGV("VideoEditorAudioDecoder_setOption begin 0x%X", optionID);
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+    switch( optionID ) {
+        case M4AD_kOptionID_UserParam:
+            LOGV("VideoEditorAudioDecodersetOption UserParam is not supported");
+            err = M4ERR_NOT_IMPLEMENTED;
+            break;
+        default:
+            LOGV("VideoEditorAudioDecoder_setOption  unsupported optionId 0x%X",
+                optionID);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+            break;
+    }
+
+cleanUp:
+    if( ((M4OSA_UInt32)M4NO_ERROR == err) || ((M4OSA_UInt32)M4ERR_NOT_IMPLEMENTED == err) ) {
+        LOGV("VideoEditorAudioDecoder_setOption error 0x%X", err);
+    } else {
+        LOGV("VideoEditorAudioDecoder_setOption ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_setOption end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getOption(M4AD_Context pContext,
+        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioDecoder_Context* pDecoderContext = M4OSA_NULL;
+
+    LOGV("VideoEditorAudioDecoder_getOption begin: optionID 0x%X", optionID);
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pDecoderContext = (VideoEditorAudioDecoder_Context*)pContext;
+
+    switch( optionID ) {
+        default:
+            LOGV("VideoEditorAudioDecoder_getOption unsupported optionId 0x%X",
+                optionID);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+            break;
+    }
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_getOption no error");
+    } else {
+        LOGV("VideoEditorAudioDecoder_getOption ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_getOption end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface(M4AD_Type decoderType,
+        M4AD_Type* pDecoderType, M4AD_Interface** pDecoderInterface) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pDecoderType, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pDecoderInterface, M4ERR_PARAMETER);
+
+    LOGV("VideoEditorAudioDecoder_getInterface begin %d 0x%x 0x%x",
+        decoderType, pDecoderType, pDecoderInterface);
+
+    SAFE_MALLOC(*pDecoderInterface, M4AD_Interface, 1,
+        "VideoEditorAudioDecoder");
+
+    *pDecoderType = decoderType;
+
+    switch( decoderType ) {
+        case M4AD_kTypeAMRNB:
+            (*pDecoderInterface)->m_pFctCreateAudioDec =
+                VideoEditorAudioDecoder_create_AMRNB;
+            break;
+        case M4AD_kTypeAMRWB:
+            (*pDecoderInterface)->m_pFctCreateAudioDec =
+                VideoEditorAudioDecoder_create_AMRWB;
+            break;
+        case M4AD_kTypeAAC:
+            (*pDecoderInterface)->m_pFctCreateAudioDec =
+                VideoEditorAudioDecoder_create_AAC;
+            break;
+        case M4AD_kTypeMP3:
+            (*pDecoderInterface)->m_pFctCreateAudioDec =
+                VideoEditorAudioDecoder_create_MP3;
+            break;
+        default:
+            LOGV("VEAD_getInterface ERROR: unsupported type %d", decoderType);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+        break;
+    }
+    (*pDecoderInterface)->m_pFctDestroyAudioDec   =
+        VideoEditorAudioDecoder_destroy;
+    (*pDecoderInterface)->m_pFctResetAudioDec     = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctStartAudioDec     = M4OSA_NULL;
+    (*pDecoderInterface)->m_pFctStepAudioDec      =
+        VideoEditorAudioDecoder_step;
+    (*pDecoderInterface)->m_pFctGetVersionAudioDec =
+        VideoEditorAudioDecoder_getVersion;
+    (*pDecoderInterface)->m_pFctSetOptionAudioDec =
+        VideoEditorAudioDecoder_setOption;
+    (*pDecoderInterface)->m_pFctGetOptionAudioDec =
+        VideoEditorAudioDecoder_getOption;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioDecoder_getInterface no error");
+    } else {
+        *pDecoderInterface = M4OSA_NULL;
+        LOGV("VideoEditorAudioDecoder_getInterface ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioDecoder_getInterface end");
+    return err;
+}
+
+
+extern "C" {
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AAC(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface) {
+    LOGV("TEST: AAC VideoEditorAudioDecoder_getInterface no error");
+    return VideoEditorAudioDecoder_getInterface(
+        M4AD_kTypeAAC, pDecoderType, pDecoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRNB(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface) {
+    LOGV("TEST: AMR VideoEditorAudioDecoder_getInterface no error");
+    return VideoEditorAudioDecoder_getInterface(
+        M4AD_kTypeAMRNB, pDecoderType, pDecoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_AMRWB(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface) {
+
+    return VideoEditorAudioDecoder_getInterface(
+        M4AD_kTypeAMRWB, pDecoderType, pDecoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioDecoder_getInterface_MP3(M4AD_Type* pDecoderType,
+        M4AD_Interface** pDecoderInterface) {
+
+    return VideoEditorAudioDecoder_getInterface(
+        M4AD_kTypeMP3, pDecoderType, pDecoderInterface);
+}
+
+}  // extern "C"
+
+}  // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp
new file mode 100755
index 0000000..718881f
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorAudioEncoder.cpp
@@ -0,0 +1,739 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorAudioEncoder.cpp
+* @brief  StageFright shell Audio Encoder
+*************************************************************************
+*/
+
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_AUDIOENCODER"
+
+#include "M4OSA_Debug.h"
+#include "VideoEditorAudioEncoder.h"
+#include "VideoEditorUtils.h"
+
+#include "utils/Log.h"
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+
+/*** DEFINITIONS ***/
+// Force using software encoder as engine does not support prefetch
+#define VIDEOEDITOR_FORCECODEC kSoftwareCodecsOnly
+
+namespace android {
+struct VideoEditorAudioEncoderSource : public MediaSource {
+    public:
+        static sp<VideoEditorAudioEncoderSource> Create();
+        virtual status_t start(MetaData *params = NULL);
+        virtual status_t stop();
+        virtual sp<MetaData> getFormat();
+        virtual status_t read(MediaBuffer **buffer,
+        const ReadOptions *options = NULL);
+        virtual int32_t storeBuffer(MediaBuffer *buffer);
+
+    protected:
+        virtual ~VideoEditorAudioEncoderSource();
+
+    private:
+        struct MediaBufferChain {
+            MediaBuffer* buffer;
+            MediaBufferChain* nextLink;
+        };
+        enum State {
+            CREATED,
+            STARTED,
+            ERROR
+        };
+        VideoEditorAudioEncoderSource();
+        MediaBufferChain* mFirstBufferLink;
+        MediaBufferChain* mLastBufferLink;
+        int32_t mNbBuffer;
+        State mState;
+};
+
+sp<VideoEditorAudioEncoderSource> VideoEditorAudioEncoderSource::Create() {
+
+    LOGV("VideoEditorAudioEncoderSource::Create");
+    sp<VideoEditorAudioEncoderSource> aSource =
+        new VideoEditorAudioEncoderSource();
+
+    return aSource;
+}
+
+VideoEditorAudioEncoderSource::VideoEditorAudioEncoderSource():
+        mFirstBufferLink(NULL),
+        mLastBufferLink(NULL),
+        mNbBuffer(0),
+        mState(CREATED) {
+    LOGV("VideoEditorAudioEncoderSource::VideoEditorAudioEncoderSource");
+}
+
+
+VideoEditorAudioEncoderSource::~VideoEditorAudioEncoderSource() {
+    LOGV("VideoEditorAudioEncoderSource::~VideoEditorAudioEncoderSource");
+
+    if( STARTED == mState ) {
+        stop();
+    }
+}
+
+status_t VideoEditorAudioEncoderSource::start(MetaData *meta) {
+    status_t err = OK;
+
+    LOGV("VideoEditorAudioEncoderSource::start");
+
+    if( CREATED != mState ) {
+        LOGV("VideoEditorAudioEncoderSource::start ERROR : invalid state %d",
+            mState);
+        return UNKNOWN_ERROR;
+    }
+
+    mState = STARTED;
+
+cleanUp:
+    LOGV("VideoEditorAudioEncoderSource::start END (0x%x)", err);
+    return err;
+}
+
+status_t VideoEditorAudioEncoderSource::stop() {
+    status_t err = OK;
+
+    LOGV("VideoEditorAudioEncoderSource::stop");
+
+    if( STARTED != mState ) {
+        LOGV("VideoEditorAudioEncoderSource::stop ERROR: invalid state %d",
+            mState);
+        return UNKNOWN_ERROR;
+    }
+
+    int32_t i = 0;
+    MediaBufferChain* tmpLink = NULL;
+    while( mFirstBufferLink ) {
+        i++;
+        tmpLink = mFirstBufferLink;
+        mFirstBufferLink = mFirstBufferLink->nextLink;
+        delete tmpLink;
+    }
+    LOGV("VideoEditorAudioEncoderSource::stop : %d buffer remained", i);
+    mFirstBufferLink = NULL;
+    mLastBufferLink = NULL;
+
+    mState = CREATED;
+
+    LOGV("VideoEditorAudioEncoderSource::stop END (0x%x)", err);
+    return err;
+}
+
+sp<MetaData> VideoEditorAudioEncoderSource::getFormat() {
+    LOGV("VideoEditorAudioEncoderSource::getFormat");
+
+   LOGV("VideoEditorAudioEncoderSource::getFormat :THIS IS NOT IMPLEMENTED");
+
+    return NULL;
+}
+
+status_t VideoEditorAudioEncoderSource::read(MediaBuffer **buffer,
+        const ReadOptions *options) {
+    MediaSource::ReadOptions readOptions;
+    status_t err = OK;
+    MediaBufferChain* tmpLink = NULL;
+
+    LOGV("VideoEditorAudioEncoderSource::read");
+
+    if ( STARTED != mState ) {
+        LOGV("VideoEditorAudioEncoderSource::read ERROR : invalid state %d",
+            mState);
+        return UNKNOWN_ERROR;
+    }
+
+    if( NULL == mFirstBufferLink ) {
+        *buffer = NULL;
+        LOGV("VideoEditorAudioEncoderSource::read : EOS");
+        return ERROR_END_OF_STREAM;
+    }
+    *buffer = mFirstBufferLink->buffer;
+
+    tmpLink = mFirstBufferLink;
+    mFirstBufferLink = mFirstBufferLink->nextLink;
+    if( NULL == mFirstBufferLink ) {
+        mLastBufferLink = NULL;
+    }
+    delete tmpLink;
+    mNbBuffer--;
+
+    LOGV("VideoEditorAudioEncoderSource::read END (0x%x)", err);
+    return err;
+}
+
+int32_t VideoEditorAudioEncoderSource::storeBuffer(MediaBuffer *buffer) {
+    status_t err = OK;
+
+    LOGV("VideoEditorAudioEncoderSource::storeBuffer");
+
+    MediaBufferChain* newLink = new MediaBufferChain;
+    newLink->buffer = buffer;
+    newLink->nextLink = NULL;
+    if( NULL != mLastBufferLink ) {
+        mLastBufferLink->nextLink = newLink;
+    } else {
+        mFirstBufferLink = newLink;
+    }
+    mLastBufferLink = newLink;
+    mNbBuffer++;
+
+    LOGV("VideoEditorAudioEncoderSource::storeBuffer END");
+    return mNbBuffer;
+}
+
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+/**
+ ******************************************************************************
+ * structure VideoEditorAudioEncoder_Context
+ * @brief    This structure defines the context of the StageFright audio
+ *           encoder shell
+ ******************************************************************************
+*/
+typedef struct {
+    M4ENCODER_AudioFormat             mFormat;
+    M4ENCODER_AudioParams*            mCodecParams;
+    M4ENCODER_AudioDecSpecificInfo    mDSI;
+    sp<VideoEditorAudioEncoderSource> mEncoderSource;
+    OMXClient                         mClient;
+    sp<MediaSource>                   mEncoder;
+    uint32_t                          mNbInputFrames;
+    uint32_t                          mNbOutputFrames;
+    int64_t                           mFirstOutputCts;
+    int64_t                           mLastOutputCts;
+} VideoEditorAudioEncoder_Context;
+
+M4OSA_ERR VideoEditorAudioEncoder_cleanup(M4OSA_Context pContext) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+    LOGV("VideoEditorAudioEncoder_cleanup begin");
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+    SAFE_FREE(pEncoderContext->mDSI.pInfo);
+    SAFE_FREE(pEncoderContext);
+    pContext = M4OSA_NULL;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_cleanup no error");
+    } else {
+        LOGV("VideoEditorAudioEncoder_cleanup ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_cleanup end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init(M4ENCODER_AudioFormat format,
+        M4OSA_Context* pContext, M4OSA_Void* pUserData) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+    LOGV(" VideoEditorAudioEncoder_init begin: format %d", format);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    SAFE_MALLOC(pEncoderContext, VideoEditorAudioEncoder_Context, 1,
+        "VideoEditorAudioEncoder");
+    pEncoderContext->mFormat = format;
+
+    *pContext = pEncoderContext;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_init no error");
+    } else {
+        VideoEditorAudioEncoder_cleanup(pEncoderContext);
+        *pContext = M4OSA_NULL;
+        LOGV("VideoEditorAudioEncoder_init ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_init end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init_AAC(M4OSA_Context* pContext,
+        M4OSA_Void* pUserData) {
+    return VideoEditorAudioEncoder_init(M4ENCODER_kAAC, pContext, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init_AMRNB(M4OSA_Context* pContext,
+        M4OSA_Void* pUserData) {
+    return VideoEditorAudioEncoder_init(M4ENCODER_kAMRNB, pContext, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_init_MP3(M4OSA_Context* pContext,
+        M4OSA_Void* pUserData) {
+    return VideoEditorAudioEncoder_init(M4ENCODER_kMP3, pContext, pUserData);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_close(M4OSA_Context pContext) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+    LOGV("VideoEditorAudioEncoder_close begin");
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+    SAFE_FREE(pEncoderContext->mCodecParams);
+
+    pEncoderContext->mEncoder->stop();
+    pEncoderContext->mEncoder.clear();
+    pEncoderContext->mClient.disconnect();
+    pEncoderContext->mEncoderSource.clear();
+
+    LOGV("AudioEncoder_close:IN %d frames,OUT %d frames from %lld to %lld",
+        pEncoderContext->mNbInputFrames,
+        pEncoderContext->mNbOutputFrames, pEncoderContext->mFirstOutputCts,
+        pEncoderContext->mLastOutputCts);
+
+    if( pEncoderContext->mNbInputFrames != pEncoderContext->mNbInputFrames ) {
+        LOGV("VideoEditorAudioEncoder_close:some frames were not encoded %d %d",
+            pEncoderContext->mNbInputFrames, pEncoderContext->mNbInputFrames);
+    }
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_close no error");
+    } else {
+        LOGV("VideoEditorAudioEncoder_close ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_close begin end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_open(M4OSA_Context pContext,
+        M4ENCODER_AudioParams *pParams, M4ENCODER_AudioDecSpecificInfo *pDSI,
+        M4OSA_Context pGrabberContext) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+    status_t result = OK;
+    sp<MetaData> encoderMetadata = NULL;
+    const char* mime = NULL;
+    int32_t iNbChannel = 0;
+    uint32_t codecFlags = 0;
+
+    LOGV("VideoEditorAudioEncoder_open begin");
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pParams,  M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pDSI,     M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+    pDSI->pInfo = M4OSA_NULL;
+    pDSI->infoSize = 0;
+
+    pEncoderContext->mNbInputFrames  = 0;
+    pEncoderContext->mNbOutputFrames = 0;
+    pEncoderContext->mFirstOutputCts = -1;
+    pEncoderContext->mLastOutputCts  = -1;
+
+    // Allocate & initialize the encoding parameters
+    LOGV("VideoEditorAudioEncoder_open : params F=%d CN=%d BR=%d F=%d",
+        pParams->Frequency, pParams->ChannelNum, pParams->Bitrate,
+        pParams->Format);
+    SAFE_MALLOC(pEncoderContext->mCodecParams, M4ENCODER_AudioParams, 1,
+        "VIDEOEDITOR CodecParams");
+    pEncoderContext->mCodecParams->Frequency  = pParams->Frequency;
+    pEncoderContext->mCodecParams->ChannelNum = pParams->ChannelNum;
+    pEncoderContext->mCodecParams->Bitrate    = pParams->Bitrate;
+    pEncoderContext->mCodecParams->Format     = pParams->Format;
+
+    // Check output format consistency
+    VIDEOEDITOR_CHECK(pEncoderContext->mCodecParams->Format ==
+        pEncoderContext->mFormat, M4ERR_PARAMETER);
+
+    /**
+     * StageFright graph building
+     */
+    // Create the meta data for the encoder
+    encoderMetadata = new MetaData;
+    switch( pEncoderContext->mCodecParams->Format ) {
+        case M4ENCODER_kAAC:
+        {
+            mime = MEDIA_MIMETYPE_AUDIO_AAC;
+            break;
+        }
+        case M4ENCODER_kAMRNB:
+        {
+            mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
+            break;
+        }
+        default:
+        {
+            VIDEOEDITOR_CHECK(!"AudioEncoder_open : incorrect input format",
+            M4ERR_PARAMETER);
+            break;
+        }
+    }
+    encoderMetadata->setCString(kKeyMIMEType, mime);
+    encoderMetadata->setInt32(kKeySampleRate,
+        (int32_t)pEncoderContext->mCodecParams->Frequency);
+    encoderMetadata->setInt32(kKeyBitRate,
+        (int32_t)pEncoderContext->mCodecParams->Bitrate);
+
+    switch( pEncoderContext->mCodecParams->ChannelNum ) {
+        case M4ENCODER_kMono:
+        {
+            iNbChannel = 1;
+            break;
+        }
+        case M4ENCODER_kStereo:
+        {
+            iNbChannel = 2;
+            break;
+        }
+        default:
+        {
+            VIDEOEDITOR_CHECK(!"AudioEncoder_open : incorrect channel number",
+                M4ERR_STATE);
+            break;
+        }
+    }
+    encoderMetadata->setInt32(kKeyChannelCount, iNbChannel);
+
+    // Create the encoder source
+    pEncoderContext->mEncoderSource = VideoEditorAudioEncoderSource::Create();
+    VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoderSource.get(),
+        M4ERR_STATE);
+
+    // Connect to the OMX client
+    result = pEncoderContext->mClient.connect();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Create the OMX codec
+#ifdef VIDEOEDITOR_FORCECODEC
+    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+#endif /* VIDEOEDITOR_FORCECODEC */
+    pEncoderContext->mEncoder = OMXCodec::Create(
+        pEncoderContext->mClient.interface(), encoderMetadata, true,
+        pEncoderContext->mEncoderSource, NULL, codecFlags);
+    VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoder.get(), M4ERR_STATE);
+
+    // Start the graph
+    result = pEncoderContext->mEncoder->start();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Get AAC DSI, this code can only work with software encoder
+    if( M4ENCODER_kAAC == pEncoderContext->mCodecParams->Format ) {
+        int32_t      isCodecConfig = 0;
+        MediaBuffer* buffer        = NULL;
+
+        // Read once to get the DSI
+        result = pEncoderContext->mEncoder->read(&buffer, NULL);
+        VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+        VIDEOEDITOR_CHECK(buffer->meta_data()->findInt32(kKeyIsCodecConfig,
+            &isCodecConfig) && isCodecConfig, M4ERR_STATE);
+
+        // Save the DSI
+        pEncoderContext->mDSI.infoSize = (M4OSA_UInt32)buffer->range_length();
+        SAFE_MALLOC(pEncoderContext->mDSI.pInfo, M4OSA_Int8,
+            pEncoderContext->mDSI.infoSize, "Encoder header");
+
+        M4OSA_memcpy(pEncoderContext->mDSI.pInfo,
+            (M4OSA_MemAddr8)(buffer->data())+buffer->range_offset(),
+            pEncoderContext->mDSI.infoSize);
+
+        buffer->release();
+        *pDSI = pEncoderContext->mDSI;
+    }
+    LOGV("VideoEditorAudioEncoder_open : DONE");
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_open no error");
+    } else {
+        VideoEditorAudioEncoder_close(pEncoderContext);
+        LOGV("VideoEditorAudioEncoder_open ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_open end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_processInputBuffer(M4OSA_Context pContext,
+        M4ENCODER_AudioBuffer* pInBuffer) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+    M4OSA_Int8* pData = M4OSA_NULL;
+    MediaBuffer* buffer = NULL;
+    int32_t nbBuffer = 0;
+
+    LOGV("VideoEditorAudioEncoder_processInputBuffer begin");
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+    switch( pEncoderContext->mCodecParams->ChannelNum ) {
+        case M4ENCODER_kMono:
+        case M4ENCODER_kStereo:
+            // Let the MediaBuffer own the data so we don't have to free it
+            buffer = new MediaBuffer((size_t)pInBuffer->pTableBufferSize[0]);
+            pData = (M4OSA_Int8*)buffer->data() + buffer->range_offset();
+            M4OSA_memcpy(pData, pInBuffer->pTableBuffer[0],
+                pInBuffer->pTableBufferSize[0]);
+            break;
+        default:
+            LOGV("VEAE_processInputBuffer unsupported channel configuration %d",
+                pEncoderContext->mCodecParams->ChannelNum);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+            break;
+    }
+
+    LOGV("VideoEditorAudioEncoder_processInputBuffer : store %d bytes",
+        buffer->range_length());
+    // Push the buffer to the source
+    nbBuffer = pEncoderContext->mEncoderSource->storeBuffer(buffer);
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_processInputBuffer no error");
+    } else {
+        if( NULL != buffer ) {
+            buffer->release();
+        }
+        LOGV("VideoEditorAudioEncoder_processInputBuffer ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_processInputBuffer end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_processOutputBuffer(M4OSA_Context pContext,
+        MediaBuffer* buffer, M4ENCODER_AudioBuffer* pOutBuffer) {
+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+    M4OSA_UInt32 Cts = 0;
+    int32_t i32Tmp = 0;
+    int64_t i64Tmp = 0;
+    status_t result = OK;
+
+    LOGV("VideoEditorAudioEncoder_processOutputBuffer begin");
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,   M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != buffer,     M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pOutBuffer, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+    // Process the returned AU
+    if( 0 == buffer->range_length() ) {
+        // Encoder has no data yet, nothing unusual
+        LOGV("VideoEditorAudioEncoder_processOutputBuffer : buffer is empty");
+        pOutBuffer->pTableBufferSize[0] = 0;
+        goto cleanUp;
+    }
+    if( buffer->meta_data()->findInt32(kKeyIsCodecConfig, &i32Tmp) && i32Tmp ) {
+        /* This should not happen with software encoder,
+         * DSI was retrieved beforehand */
+        VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_STATE);
+    } else {
+        // Check the CTS
+        VIDEOEDITOR_CHECK(buffer->meta_data()->findInt64(kKeyTime, &i64Tmp),
+            M4ERR_STATE);
+        Cts = (M4OSA_Int32)(i64Tmp/1000);
+
+        pEncoderContext->mNbOutputFrames++;
+        if( 0 > pEncoderContext->mFirstOutputCts ) {
+            pEncoderContext->mFirstOutputCts = i64Tmp;
+        }
+        pEncoderContext->mLastOutputCts = i64Tmp;
+
+        // Format the AU
+        M4OSA_memcpy(pOutBuffer->pTableBuffer[0],
+            (M4OSA_MemAddr8)(buffer->data())+buffer->range_offset(),
+            buffer->range_length());
+        pOutBuffer->pTableBufferSize[0] = (M4OSA_UInt32)buffer->range_length();
+    }
+
+cleanUp:
+    // Release the buffer
+    buffer->release();
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_processOutputBuffer no error");
+    } else {
+        LOGV("VideoEditorAudioEncoder_processOutputBuffer ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_processOutputBuffer end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_step(M4OSA_Context pContext,
+        M4ENCODER_AudioBuffer* pInBuffer, M4ENCODER_AudioBuffer* pOutBuffer) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+    status_t result = OK;
+    MediaBuffer* buffer = NULL;
+
+    LOGV("VideoEditorAudioEncoder_step begin");
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,   M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pInBuffer,  M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pOutBuffer, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+    pEncoderContext->mNbInputFrames++;
+
+    // Push the input buffer to the encoder source
+    err = VideoEditorAudioEncoder_processInputBuffer(pEncoderContext,pInBuffer);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+    // Read
+    result = pEncoderContext->mEncoder->read(&buffer, NULL);
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Provide the encoded AU to the writer
+    err = VideoEditorAudioEncoder_processOutputBuffer(pEncoderContext, buffer,
+        pOutBuffer);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_step no error");
+    } else {
+        LOGV("VideoEditorAudioEncoder_step ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_step end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getOption(M4OSA_Context pContext,
+        M4OSA_OptionID optionID, M4OSA_DataOption* optionValue) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorAudioEncoder_Context* pEncoderContext = M4OSA_NULL;
+
+    LOGV("VideoEditorAudioEncoder_getOption begin optionID 0x%X", optionID);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorAudioEncoder_Context*)pContext;
+
+    switch( optionID ) {
+        default:
+            LOGV("VideoEditorAudioEncoder_getOption: unsupported optionId 0x%X",
+                optionID);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+            break;
+    }
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_getOption no error");
+    } else {
+        LOGV("VideoEditorAudioEncoder_getOption ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorAudioEncoder_getOption end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface(
+        M4ENCODER_AudioFormat format, M4ENCODER_AudioFormat* pFormat,
+        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+    M4OSA_ERR err = M4NO_ERROR;
+
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pFormat,           M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pEncoderInterface, M4ERR_PARAMETER);
+
+    LOGV("VideoEditorAudioEncoder_getInterface 0x%x 0x%x",pFormat,
+        pEncoderInterface);
+    SAFE_MALLOC(*pEncoderInterface, M4ENCODER_AudioGlobalInterface, 1,
+        "AudioEncoder");
+
+    *pFormat = format;
+
+    switch( format ) {
+        case M4ENCODER_kAAC:
+        {
+            (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_AAC;
+            break;
+        }
+        case M4ENCODER_kAMRNB:
+        {
+            (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_AMRNB;
+            break;
+        }
+        case M4ENCODER_kMP3:
+        {
+            (*pEncoderInterface)->pFctInit = VideoEditorAudioEncoder_init_MP3;
+            break;
+        }
+        default:
+        {
+            LOGV("VideoEditorAudioEncoder_getInterface: unsupported format %d",
+                format);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+        break;
+        }
+    }
+    (*pEncoderInterface)->pFctCleanUp      = VideoEditorAudioEncoder_cleanup;
+    (*pEncoderInterface)->pFctOpen         = VideoEditorAudioEncoder_open;
+    (*pEncoderInterface)->pFctClose        = VideoEditorAudioEncoder_close;
+    (*pEncoderInterface)->pFctStep         = VideoEditorAudioEncoder_step;
+    (*pEncoderInterface)->pFctGetOption    = VideoEditorAudioEncoder_getOption;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorAudioEncoder_getInterface no error");
+    } else {
+        *pEncoderInterface = M4OSA_NULL;
+        LOGV("VideoEditorAudioEncoder_getInterface ERROR 0x%X", err);
+    }
+    return err;
+}
+extern "C" {
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AAC(
+        M4ENCODER_AudioFormat* pFormat,
+        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+    return VideoEditorAudioEncoder_getInterface(
+        M4ENCODER_kAAC, pFormat, pEncoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_AMRNB(
+        M4ENCODER_AudioFormat* pFormat,
+        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+
+    return VideoEditorAudioEncoder_getInterface(
+        M4ENCODER_kAMRNB, pFormat, pEncoderInterface);
+}
+
+M4OSA_ERR VideoEditorAudioEncoder_getInterface_MP3(
+        M4ENCODER_AudioFormat* pFormat,
+        M4ENCODER_AudioGlobalInterface** pEncoderInterface) {
+    LOGV("VideoEditorAudioEncoder_getInterface_MP3 no error");
+
+    return VideoEditorAudioEncoder_getInterface(
+        M4ENCODER_kMP3, pFormat, pEncoderInterface);
+}
+
+}  // extern "C"
+
+}  // namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c b/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c
new file mode 100755
index 0000000..9f50a58
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorBuffer.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorBuffer.c
+* @brief  StageFright shell Buffer
+*************************************************************************
+*/
+#undef M4OSA_TRACE_LEVEL
+#define M4OSA_TRACE_LEVEL 1
+
+#include "VideoEditorBuffer.h"
+#include "utils/Log.h"
+
+#define VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE 40
+
+#define VIDEOEDITOR_SAFE_FREE(p) \
+{ \
+    if(M4OSA_NULL != p) \
+    { \
+        M4OSA_free((M4OSA_MemAddr32)p); \
+        p = M4OSA_NULL; \
+    } \
+}
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+ *                                         M4OSA_UInt32 nbBuffers)
+ * @brief   Allocate a pool of nbBuffers buffers
+ *
+ * @param   ppool      : IN The buffer pool to create
+ * @param   nbBuffers  : IN The number of buffers in the pool
+ * @param   poolName   : IN a name given to the pool
+ * @return  Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_allocatePool(VIDEOEDITOR_BUFFER_Pool** ppool,
+        M4OSA_UInt32 nbBuffers, M4OSA_Char* poolName)
+{
+    M4OSA_ERR lerr = M4NO_ERROR;
+    VIDEOEDITOR_BUFFER_Pool* pool;
+
+    LOGV("VIDEOEDITOR_BUFFER_allocatePool : ppool = 0x%x nbBuffers = %d ",
+        ppool, nbBuffers);
+
+    pool = M4OSA_NULL;
+    pool = (VIDEOEDITOR_BUFFER_Pool*)M4OSA_malloc(
+            sizeof(VIDEOEDITOR_BUFFER_Pool), VIDEOEDITOR_BUFFER_EXTERNAL,
+            (M4OSA_Char*)("VIDEOEDITOR_BUFFER_allocatePool: pool"));
+    if (M4OSA_NULL == pool)
+    {
+        lerr = M4ERR_ALLOC;
+        goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
+    }
+
+    LOGV("VIDEOEDITOR_BUFFER_allocatePool : Allocating Pool buffers");
+    pool->pNXPBuffer = M4OSA_NULL;
+    pool->pNXPBuffer = (VIDEOEDITOR_BUFFER_Buffer*)M4OSA_malloc(
+                            sizeof(VIDEOEDITOR_BUFFER_Buffer)*nbBuffers,
+                            VIDEOEDITOR_BUFFER_EXTERNAL,
+                            (M4OSA_Char*)("BUFFER_allocatePool: pNXPBuffer"));
+    if(M4OSA_NULL == pool->pNXPBuffer)
+    {
+        lerr = M4ERR_ALLOC;
+        goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
+    }
+
+    LOGV("VIDEOEDITOR_BUFFER_allocatePool : Allocating Pool name buffer");
+    pool->poolName = M4OSA_NULL;
+    pool->poolName = (M4OSA_Char*)M4OSA_malloc(
+        VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE,VIDEOEDITOR_BUFFER_EXTERNAL,
+        (M4OSA_Char*)("VIDEOEDITOR_BUFFER_allocatePool: poolname"));
+    if(pool->poolName == M4OSA_NULL)
+    {
+        lerr = M4ERR_ALLOC;
+        goto VIDEOEDITOR_BUFFER_allocatePool_Cleanup;
+    }
+
+    LOGV("VIDEOEDITOR_BUFFER_allocatePool : Assigning Pool name buffer");
+
+    M4OSA_memset(pool->poolName, VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE, 0);
+    M4OSA_memcpy(pool->poolName, poolName,
+        VIDEOEDITOR_BUFFEPOOL_MAX_NAME_SIZE-1);
+
+    pool->NB = nbBuffers;
+
+VIDEOEDITOR_BUFFER_allocatePool_Cleanup:
+    if(M4NO_ERROR != lerr)
+    {
+        VIDEOEDITOR_SAFE_FREE(pool->pNXPBuffer);
+        VIDEOEDITOR_SAFE_FREE(pool->poolName);
+        VIDEOEDITOR_SAFE_FREE(pool);
+    }
+    *ppool = pool;
+    LOGV("VIDEOEDITOR_BUFFER_allocatePool END");
+
+    return lerr;
+}
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool)
+ * @brief   Deallocate a buffer pool
+ *
+ * @param   ppool      : IN The buffer pool to free
+ * @return  Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_freePool(VIDEOEDITOR_BUFFER_Pool* ppool)
+{
+    M4OSA_ERR err;
+    M4OSA_UInt32  j = 0;
+
+    LOGV("VIDEOEDITOR_BUFFER_freePool : ppool = 0x%x", ppool);
+
+    err = M4NO_ERROR;
+
+    for (j = 0; j < ppool->NB; j++)
+    {
+        if(M4OSA_NULL != ppool->pNXPBuffer[j].pData)
+        {
+            M4OSA_free((M4OSA_MemAddr32)ppool->pNXPBuffer[j].pData);
+            ppool->pNXPBuffer[j].pData = M4OSA_NULL;
+        }
+    }
+
+    if(ppool != M4OSA_NULL)
+    {
+        SAFE_FREE(ppool->pNXPBuffer);
+        SAFE_FREE(ppool->poolName);
+        SAFE_FREE(ppool);
+    }
+
+    return(err);
+}
+
+/**
+ ************************************************************************
+ M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+ *         VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+ * @brief   Returns a buffer in a given state
+ *
+ * @param   ppool      : IN The buffer pool
+ * @param   desiredState : IN The buffer state
+ * @param   pNXPBuffer : IN The selected buffer
+ * @return  Error code
+ ************************************************************************
+*/
+M4OSA_ERR VIDEOEDITOR_BUFFER_getBuffer(VIDEOEDITOR_BUFFER_Pool* ppool,
+        VIDEOEDITOR_BUFFER_State desiredState,
+        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+{
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_Bool bFound = M4OSA_FALSE;
+    M4OSA_UInt32 i, ibuf;
+
+    LOGV("VIDEOEDITOR_BUFFER_getBuffer from %s in state=%d",
+        ppool->poolName, desiredState);
+
+    ibuf = 0;
+
+    for (i=0; i < ppool->NB; i++)
+    {
+        bFound = (ppool->pNXPBuffer[i].state == desiredState);
+        if (bFound)
+        {
+            ibuf = i;
+            break;
+        }
+    }
+
+    if(!bFound)
+    {
+        LOGV("VIDEOEDITOR_BUFFER_getBuffer No buffer available in state %d",
+            desiredState);
+        *pNXPBuffer = M4OSA_NULL;
+        return M4ERR_NO_BUFFER_AVAILABLE;
+    }
+
+    /* case where a buffer has been found */
+    *pNXPBuffer = &(ppool->pNXPBuffer[ibuf]);
+
+    LOGV("VIDEOEDITOR_BUFFER_getBuffer: idx = %d", ibuf);
+
+    return(err);
+}
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_initPoolBuffers(VIDEOEDITOR_BUFFER_Pool* pool,
+    M4OSA_UInt32 lSize)
+{
+    M4OSA_ERR     err = M4NO_ERROR;
+    M4OSA_UInt32  index, j;
+
+    /**
+     * Initialize all the buffers in the pool */
+    for(index = 0; index< pool->NB; index++)
+    {
+        pool->pNXPBuffer[index].pData = M4OSA_NULL;
+        pool->pNXPBuffer[index].pData = (M4OSA_Void*)M4OSA_malloc(
+            lSize, VIDEOEDITOR_BUFFER_EXTERNAL,
+            (M4OSA_Char*)("BUFFER_initPoolBuffers: Buffer data"));
+        if(M4OSA_NULL == pool->pNXPBuffer[index].pData)
+        {
+            for (j = 0; j < index; j++)
+            {
+                if(M4OSA_NULL != pool->pNXPBuffer[index].pData)
+                {
+                    M4OSA_free((M4OSA_MemAddr32)pool->pNXPBuffer[index].pData);
+                    pool->pNXPBuffer[index].pData = M4OSA_NULL;
+                }
+            }
+            err = M4ERR_ALLOC;
+            return err;
+        }
+        pool->pNXPBuffer[index].size = 0;
+        pool->pNXPBuffer[index].state = VIDEOEDITOR_BUFFER_kEmpty;
+        pool->pNXPBuffer[index].idx = index;
+        pool->pNXPBuffer[index].buffCTS = -1;
+    }
+    return err;
+}
+
+M4OSA_ERR VIDEOEDITOR_BUFFER_getOldestBuffer(VIDEOEDITOR_BUFFER_Pool *pool,
+        VIDEOEDITOR_BUFFER_State desiredState,
+        VIDEOEDITOR_BUFFER_Buffer** pNXPBuffer)
+{
+    M4OSA_ERR     err = M4NO_ERROR;
+    M4OSA_UInt32  index, j;
+    M4_MediaTime  candidateTimeStamp = (M4_MediaTime)0x7ffffff;
+    M4OSA_Bool    bFound = M4OSA_FALSE;
+
+    *pNXPBuffer = M4OSA_NULL;
+    for(index = 0; index< pool->NB; index++)
+    {
+        if(pool->pNXPBuffer[index].state == desiredState)
+        {
+            if(pool->pNXPBuffer[index].buffCTS <= candidateTimeStamp)
+            {
+                bFound = M4OSA_TRUE;
+                candidateTimeStamp = pool->pNXPBuffer[index].buffCTS;
+                    *pNXPBuffer = &(pool->pNXPBuffer[index]);
+            }
+        }
+    }
+    if(M4OSA_FALSE == bFound)
+    {
+        LOGV("VIDEOEDITOR_BUFFER_getOldestBuffer WARNING no buffer available");
+        err = M4ERR_NO_BUFFER_AVAILABLE;
+    }
+    return err;
+}
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp
new file mode 100755
index 0000000..a07bf47
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorMp3Reader.cpp
@@ -0,0 +1,801 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorMp3Reader.cpp
+* @brief  StageFright shell MP3 Reader
+*************************************************************************
+*/
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_MP3READER"
+
+/**
+ * HEADERS
+ *
+ */
+#include "M4OSA_Debug.h"
+#include "M4SYS_AccessUnit.h"
+#include "VideoEditorMp3Reader.h"
+#include "VideoEditorUtils.h"
+
+#include "utils/Log.h"
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+
+/**
+ * SOURCE CLASS
+ */
+
+namespace android {
+/**
+ * ENGINE INTERFACE
+ */
+
+/**
+ **************************************************************************
+ * structure VideoEditorMp3Reader_Context
+ * @brief    This structure defines the context of the SF MP3 reader shell.
+ **************************************************************************
+ */
+typedef struct {
+    sp<DataSource>              mDataSource;
+    sp<MediaExtractor>          mExtractor;
+    sp<MediaSource>             mMediaSource;
+    M4_AudioStreamHandler*      mAudioStreamHandler;
+    M4SYS_AccessUnit            mAudioAu;
+    M4OSA_Time                  mMaxDuration;
+    M4OSA_UInt8                 mStreamNumber;
+    M4OSA_Bool                  mSeeking;
+    M4OSA_Time                  mSeekTime;
+    uint32_t                    mExtractorFlags;
+} VideoEditorMp3Reader_Context;
+
+/**
+ ****************************************************************************
+ * @brief    create an instance of the MP3 reader
+ * @note     allocates the context
+ *
+ * @param    pContext:        (OUT)    pointer on a reader context
+ *
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_ALLOC                a memory allocation has failed
+ * @return    M4ERR_PARAMETER            at least one parameter is not valid
+ ****************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_create(M4OSA_Context *pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorMp3Reader_Context *pReaderContext = M4OSA_NULL;
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    LOGV("VideoEditorMp3Reader_create begin");
+
+    /* Context allocation & initialization */
+    SAFE_MALLOC(pReaderContext, VideoEditorMp3Reader_Context, 1,
+        "VideoEditorMp3Reader");
+
+    pReaderContext->mAudioStreamHandler  = M4OSA_NULL;
+    pReaderContext->mAudioAu.dataAddress = M4OSA_NULL;
+    M4OSA_INT64_FROM_INT32(pReaderContext->mMaxDuration, 0);
+    *pContext = pReaderContext;
+
+cleanUp:
+    if (M4NO_ERROR == err) {
+        LOGV("VideoEditorMp3Reader_create no error");
+    } else {
+        LOGV("VideoEditorMp3Reader_create ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorMp3Reader_create end");
+    return err;
+}
+
+/**
+ *******************************************************************************
+ * @brief     destroy the instance of the MP3 reader
+ * @note      after this call the context is invalid
+ * @param     context:        (IN)    Context of the reader
+ * @return    M4NO_ERROR                 there is no error
+ * @return    M4ERR_PARAMETER            The input parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_destroy(M4OSA_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)pContext;
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderContext, M4ERR_PARAMETER);
+    LOGV("VideoEditorMp3Reader_destroy begin");
+
+    SAFE_FREE(pReaderContext);
+cleanUp:
+    if (M4NO_ERROR == err) {
+        LOGV("VideoEditorMp3Reader_destroy no error");
+    } else {
+        LOGV("VideoEditorMp3Reader_destroy ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorMp3Reader_destroy end");
+    return err;
+}
+/**
+ ******************************************************************************
+ * @brief    open the reader and initializes its created instance
+ * @note    this function opens the MP3 file
+ * @param    context:            (IN)    Context of the reader
+ * @param    pFileDescriptor:    (IN)    Pointer to proprietary data identifying
+ *                                       the media to open
+
+ * @return    M4NO_ERROR                     there is no error
+ * @return    M4ERR_PARAMETER                the context is NULL
+ * @return    M4ERR_BAD_CONTEXT            provided context is not a valid one
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_open(M4OSA_Context context,
+        M4OSA_Void* pFileDescriptor){
+    VideoEditorMp3Reader_Context *pReaderContext =
+    (VideoEditorMp3Reader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditorMp3Reader_open begin");
+    /* Check function parameters*/
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext),  M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_open: invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pFileDescriptor), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_open: invalid pointer pFileDescriptor");
+
+    LOGV("VideoEditorMp3Reader_open Datasource start %s",
+        (char*)pFileDescriptor);
+    pReaderContext->mDataSource = DataSource::CreateFromURI(
+        (char*)pFileDescriptor);
+    LOGV("VideoEditorMp3Reader_open Datasource end");
+
+    if (pReaderContext->mDataSource == NULL) {
+        LOGV("VideoEditorMp3Reader_open Datasource error");
+        return UNKNOWN_ERROR;
+    }
+
+    LOGV("VideoEditorMp3Reader_open extractor start");
+    pReaderContext->mExtractor = MediaExtractor::Create(
+        pReaderContext->mDataSource,MEDIA_MIMETYPE_AUDIO_MPEG);
+    LOGV("VideoEditorMp3Reader_open extractor end");
+
+    if (pReaderContext->mExtractor == NULL)    {
+        LOGV("VideoEditorMp3Reader_open extractor error");
+        return UNKNOWN_ERROR;
+    }
+    pReaderContext->mStreamNumber = 0;
+
+    LOGV("VideoEditorMp3Reader_open end");
+    return err;
+}
+/**
+ **************************************************************************
+ * @brief    close the reader
+ * @note    this function closes the MP3 reader
+ * @param    context:        (IN)      Context of the reader
+ * @return    M4NO_ERROR               there is no error
+ * @return    M4ERR_PARAMETER          the context is NULL
+ **************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_close(M4OSA_Context context) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditorMp3Reader_close begin");
+    /* Check function parameters */
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
+            "VideoEditorMp3Reader_close: invalid context pointer");
+
+    if (pReaderContext->mAudioStreamHandler != NULL) {
+        if (M4OSA_NULL != pReaderContext->mAudioStreamHandler->\
+        m_basicProperties.m_pDecoderSpecificInfo) {
+            M4OSA_free((M4OSA_MemAddr32)pReaderContext->mAudioStreamHandler->\
+                m_basicProperties.m_pDecoderSpecificInfo);
+            pReaderContext->mAudioStreamHandler->m_basicProperties.\
+                m_decoderSpecificInfoSize = 0;
+            pReaderContext->mAudioStreamHandler->m_basicProperties.\
+                m_pDecoderSpecificInfo = M4OSA_NULL;
+        }
+
+        /* Finally destroy the stream handler */
+        M4OSA_free((M4OSA_MemAddr32)pReaderContext->mAudioStreamHandler);
+        pReaderContext->mAudioStreamHandler = M4OSA_NULL;
+
+        if (pReaderContext->mAudioAu.dataAddress != NULL) {
+            M4OSA_free((M4OSA_MemAddr32)pReaderContext->mAudioAu.dataAddress);
+            pReaderContext->mAudioAu.dataAddress = NULL;
+        }
+    }
+
+    pReaderContext->mMediaSource->stop();
+    pReaderContext->mMediaSource.clear();
+    pReaderContext->mDataSource.clear();
+
+    LOGV("VideoEditorMp3Reader_close end ");
+    return err;
+}
+/**
+ ******************************************************************************
+ * @brief    get an option value from the reader
+ * @note
+ *          it allows the caller to retrieve a property value:
+ *
+ * @param    context:        (IN)    Context of the reader
+ * @param    optionId:       (IN)    indicates the option to get
+ * @param    pValue:         (OUT)   pointer to structure or value (allocated
+ *                                   by user) where option is stored
+ *
+ * @return    M4NO_ERROR             there is no error
+ * @return    M4ERR_PARAMETER        at least one parameter is not properly set
+ * @return    M4ERR_BAD_OPTION_ID    when the option ID is not a valid one
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_getOption(M4OSA_Context context,
+          M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditorMp3Reader_getOption begin: optionId= %d ",(int)optionId);
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
+        "invalid value pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+        "invalid value pointer");
+
+    switch(optionId) {
+    case M4READER_kOptionID_Duration:
+        {
+            LOGV("Mp3Reader duration=%ld",pReaderContext->mMaxDuration);
+            M4OSA_TIME_SET(*(M4OSA_Time*)pValue, pReaderContext->mMaxDuration);
+        }
+        break;
+
+    case M4READER_kOptionID_Bitrate:
+        {
+            M4OSA_UInt32* pBitrate = (M4OSA_UInt32*)pValue;
+            if (M4OSA_NULL != pReaderContext->mAudioStreamHandler) {
+                *pBitrate = pReaderContext->mAudioStreamHandler->\
+                    m_basicProperties.m_averageBitRate;
+            } else {
+                pBitrate = 0;
+                err = M4ERR_PARAMETER;
+            }
+        }
+        break;
+
+    case M4READER_kOptionID_Mp3Id3v1Tag:
+        break;
+
+    case M4READER_kOptionID_Mp3Id3v2Tag:
+        break;
+
+    case M4READER_kOptionID_GetMetadata:
+        break;
+
+    default :
+        {
+            LOGV("VideoEditorMp3Reader_getOption:  M4ERR_BAD_OPTION_ID");
+            err = M4ERR_BAD_OPTION_ID;
+        }
+    }
+    LOGV("VideoEditorMp3Reader_getOption end ");
+    return err;
+}
+/**
+ ******************************************************************************
+ * @brief   set an option value of the reader
+ * @note
+ *          it allows the caller to set a property value:
+ *
+ * @param   context:    (IN)        Context of the reader
+ * @param   optionId:   (IN)        Identifier indicating the option to set
+ * @param   pValue:     (IN)        Pointer to structure or value (allocated
+ *                                  by user) where option is stored
+ *
+ * @return  M4NO_ERROR              There is no error
+ * @return  M4ERR_BAD_OPTION_ID     The option ID is not a valid one
+ * @return  M4ERR_STATE             State automaton is not applied
+ * @return  M4ERR_PARAMETER         The option parameter is invalid
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_setOption(M4OSA_Context context,
+        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditorMp3Reader_Context begin: optionId: %d Value: %d ",
+        (int)optionId,(int)pValue);
+
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pReaderContext), M4ERR_PARAMETER,
+        "invalid context pointer");
+    M4OSA_DEBUG_IF1((M4OSA_NULL == pValue), M4ERR_PARAMETER,
+        "invalid value pointer");
+
+    switch(optionId) {
+        case M4READER_kOptionID_SetOsaFileReaderFctsPtr:
+        default :
+        {
+            err = M4NO_ERROR;
+        }
+    }
+    LOGV("VideoEditorMp3Reader_Context end ");
+    return err;
+}
+/**
+ ******************************************************************************
+ * @brief    jump into the stream at the specified time
+ * @note
+ * @param    context:      (IN)   Context of the reader
+ * @param    pStreamHandler(IN)   stream description of the stream to make jump
+ * @param    pTime         (I/O)IN:the time to jump to (in ms)
+ *                              OUT: the time to which the stream really jumped
+ * @return    M4NO_ERROR           there is no error
+ * @return    M4ERR_PARAMETER      at least one parameter is not properly set
+ ******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_jump(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler, M4OSA_Int32* pTime) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+    M4SYS_StreamID streamIdArray[2];
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_AccessUnit* pAu;
+    M4OSA_Time time64;
+    M4OSA_Double timeDouble;
+
+    LOGV("VideoEditorMp3Reader_jump begin");
+    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_jump: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_jump: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pTime == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_jump: invalid time pointer");
+
+    M4OSA_INT64_FROM_INT32(time64, *pTime);
+
+    if(pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+        mAudioStreamHandler){
+        pAu = &pReaderContext->mAudioAu;
+    } else {
+        LOGV("VideoEditorMp3Reader_jump: passed StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+
+    LOGV("VideoEditorMp3Reader_jump time ms %ld ", time64);
+
+    pAu->CTS = time64;
+    pAu->DTS = time64;
+
+    time64 = time64 * 1000; /* Convert the time into micro sec */
+    LOGV("VideoEditorMp3Reader_jump time us %ld ", time64);
+
+    pReaderContext->mSeeking = M4OSA_TRUE;
+    pReaderContext->mSeekTime = time64;
+
+    time64 = time64 / 1000; /* Convert the time into milli sec */
+    M4OSA_INT64_TO_DOUBLE(timeDouble, time64);
+    *pTime = (M4OSA_Int32)timeDouble;
+    LOGV("VideoEditorMp3Reader_jump end ");
+    return err;
+}
+/**
+ *******************************************************************************
+ * @brief   Get the next stream found in the media file
+ *
+ * @param    context:        (IN)  Context of the reader
+ * @param    pMediaFamily:   (OUT) pointer to a user allocated
+ *                                 M4READER_MediaFamily that will be filled with
+ *                                 the media family of the found stream
+ * @param    pStreamHandler: (OUT) pointer to a stream handler that will be
+ *                                 allocated and filled with stream description
+ *
+ * @return    M4NO_ERROR             there is no error
+ * @return    M4WAR_NO_MORE_STREAM   no more available stream in the media
+ * @return    M4ERR_PARAMETER        at least one parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_getNextStream(M4OSA_Context context,
+        M4READER_MediaFamily *pMediaFamily,
+        M4_StreamHandler **pStreamHandlerParam) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_StreamID streamIdArray[2];
+    M4SYS_StreamDescription streamDesc;
+    M4_AudioStreamHandler* pAudioStreamHandler;
+    M4_StreamHandler* pStreamHandler;
+    M4OSA_UInt8 type, temp;
+    M4OSA_Bool haveAudio = M4OSA_FALSE;
+    sp<MetaData> meta = NULL;
+    int64_t Duration;
+
+    LOGV("VideoEditorMp3Reader_getNextStream begin");
+    M4OSA_DEBUG_IF1((pReaderContext == 0),      M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_getNextStream: invalid context");
+    M4OSA_DEBUG_IF1((pMediaFamily == 0),        M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_getNextStream: invalid pointer to MediaFamily");
+    M4OSA_DEBUG_IF1((pStreamHandlerParam == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_getNextStream: invalid pointer to StreamHandler");
+
+    LOGV("VideoEditorMp3Reader_getNextStream stream number = %d",
+        pReaderContext->mStreamNumber);
+    if (pReaderContext->mStreamNumber >= 1) {
+        LOGV("VideoEditorMp3Reader_getNextStream max number of stream reached");
+        return M4WAR_NO_MORE_STREAM;
+    }
+    pReaderContext->mStreamNumber = pReaderContext->mStreamNumber + 1;
+    LOGV("VideoEditorMp3Reader_getNextStream number of Tracks%d",
+        pReaderContext->mExtractor->countTracks());
+    for (temp = 0; temp < pReaderContext->mExtractor->countTracks(); temp++) {
+        meta = pReaderContext->mExtractor->getTrackMetaData(temp);
+        const char *mime;
+        CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+        if (!haveAudio && !strncasecmp(mime, "audio/", 6)) {
+            pReaderContext->mMediaSource =
+                pReaderContext->mExtractor->getTrack(temp);
+            pReaderContext->mMediaSource->start();
+            haveAudio = true;
+        }
+
+        if (haveAudio) {
+            break;
+        }
+    }
+
+    if (!haveAudio) {
+        LOGV("VideoEditorMp3Reader_getNextStream no more stream ");
+        pReaderContext->mDataSource.clear();
+        return M4WAR_NO_MORE_STREAM;
+    }
+
+    pReaderContext->mExtractorFlags = pReaderContext->mExtractor->flags();
+    *pMediaFamily = M4READER_kMediaFamilyAudio;
+
+    streamDesc.duration = meta->findInt64(kKeyDuration, &Duration);
+    streamDesc.duration = (M4OSA_Time)Duration/1000;
+
+    meta->findInt32(kKeyBitRate, (int32_t*)&streamDesc.averageBitrate);
+    meta->findInt32(kKeySampleRate, (int32_t*)&streamDesc.timeScale);
+    LOGV("Bitrate = %d, SampleRate = %d duration = %lld",
+        streamDesc.averageBitrate,streamDesc.timeScale,Duration/1000);
+
+    streamDesc.streamType = M4SYS_kMP3;
+    streamDesc.profileLevel = 0xFF ;
+    streamDesc.streamID = pReaderContext->mStreamNumber;
+    streamDesc.decoderSpecificInfo = M4OSA_NULL;
+    streamDesc.decoderSpecificInfoSize = 0;
+    streamDesc.maxBitrate = streamDesc.averageBitrate;
+
+    /*    Allocate the audio stream handler and set its parameters    */
+    pAudioStreamHandler = (M4_AudioStreamHandler*)M4OSA_malloc(
+        sizeof(M4_AudioStreamHandler), M4READER_MP3,
+        (M4OSA_Char*)"M4_AudioStreamHandler");
+
+    if (pAudioStreamHandler == M4OSA_NULL) {
+        LOGV("VideoEditorMp3Reader_getNextStream malloc failed");
+        pReaderContext->mMediaSource->stop();
+        pReaderContext->mMediaSource.clear();
+        pReaderContext->mDataSource.clear();
+
+        return M4ERR_ALLOC;
+    }
+    pStreamHandler =(M4_StreamHandler*)(pAudioStreamHandler);
+    *pStreamHandlerParam = pStreamHandler;
+    pReaderContext->mAudioStreamHandler = pAudioStreamHandler;
+
+    pAudioStreamHandler->m_structSize = sizeof(M4_AudioStreamHandler);
+
+    if (meta == NULL) {
+        LOGV("VideoEditorMp3Reader_getNextStream meta is NULL");
+    }
+
+    pAudioStreamHandler->m_samplingFrequency = streamDesc.timeScale;
+    pStreamHandler->m_pDecoderSpecificInfo =
+        (M4OSA_UInt8*)(streamDesc.decoderSpecificInfo);
+    pStreamHandler->m_decoderSpecificInfoSize =
+        streamDesc.decoderSpecificInfoSize;
+
+    meta->findInt32(kKeyChannelCount,
+        (int32_t*)&pAudioStreamHandler->m_nbChannels);
+    pAudioStreamHandler->m_byteFrameLength = 1152;
+    pAudioStreamHandler->m_byteSampleSize = 2;
+
+    pStreamHandler->m_pUserData = NULL;
+    pStreamHandler->m_streamId = streamDesc.streamID;
+    pStreamHandler->m_duration = streamDesc.duration;
+    pReaderContext->mMaxDuration = streamDesc.duration;
+    pStreamHandler->m_averageBitRate = streamDesc.averageBitrate;
+
+    pStreamHandler->m_maxAUSize = 0;
+    pStreamHandler->m_streamType = M4DA_StreamTypeAudioMp3;
+
+    LOGV("VideoEditorMp3Reader_getNextStream end ");
+    return err;
+}
+
+/**
+ *******************************************************************************
+ * @brief    fill the access unit structure with initialization values
+ * @param    context:        (IN)     Context of the reader
+ * @param    pStreamHandler: (IN)     pointer to the stream handler to which
+ *                                    the access unit will be associated
+ * @param    pAccessUnit:    (IN/OUT) pointer to the access unit (allocated by
+ *                                    the caller) to initialize
+ * @return   M4NO_ERROR               there is no error
+ * @return   M4ERR_PARAMETER          at least one parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_fillAuStruct(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+    M4SYS_AccessUnit *pAu;
+
+    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_fillAuStruct: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_fillAuStruct invalid pointer to StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_fillAuStruct: invalid pointer to M4_AccessUnit");
+
+    LOGV("VideoEditorMp3Reader_fillAuStruct start ");
+    if(pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+        mAudioStreamHandler){
+        pAu = &pReaderContext->mAudioAu;
+    } else {
+        LOGV("VideoEditorMp3Reader_fillAuStruct StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+
+    /* Initialize pAu structure */
+    pAu->dataAddress = M4OSA_NULL;
+    pAu->size        = 0;
+    pAu->CTS         = 0;
+    pAu->DTS         = 0;
+    pAu->attribute   = 0;
+    pAu->nbFrag      = 0;
+
+    /* Initialize pAccessUnit structure */
+    pAccessUnit->m_size         = 0;
+    pAccessUnit->m_CTS          = 0;
+    pAccessUnit->m_DTS          = 0;
+    pAccessUnit->m_attribute    = 0;
+    pAccessUnit->m_dataAddress  = M4OSA_NULL;
+    pAccessUnit->m_maxsize      = pStreamHandler->m_maxAUSize;
+    pAccessUnit->m_streamID     = pStreamHandler->m_streamId;
+    pAccessUnit->m_structSize   = sizeof(M4_AccessUnit);
+
+    LOGV("VideoEditorMp3Reader_fillAuStruct end");
+    return M4NO_ERROR;
+}
+
+/**
+ *******************************************************************************
+ * @brief    reset the stream, i.e seek it to the beginning
+ * @note
+ * @param     context:          (IN)  Context of the reader
+ * @param     pStreamHandler    (IN)  The stream handler of the stream to reset
+ * @return    M4NO_ERROR              there is no error
+ * @return    M4ERR_PARAMETER         at least one parameter is not properly set
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_reset(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_StreamID streamIdArray[2];
+    M4SYS_AccessUnit* pAu;
+    M4OSA_Time time64;
+
+    LOGV("VideoEditorMp3Reader_reset start");
+    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_reset: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_reset: invalid pointer to M4_StreamHandler");
+
+    M4OSA_INT64_FROM_INT32(time64, 0);
+
+    if (pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+        mAudioStreamHandler) {
+        pAu = &pReaderContext->mAudioAu;
+    } else {
+        LOGV("VideoEditorMp3Reader_reset StreamHandler is not known");
+        return M4ERR_PARAMETER;
+    }
+    streamIdArray[0] = pStreamHandler->m_streamId;
+    streamIdArray[1] = 0;
+    pAu->CTS = time64;
+    pAu->DTS = time64;
+
+    pReaderContext->mSeeking = M4OSA_TRUE;
+    pReaderContext->mSeekTime = time64;
+
+    LOGV("VideoEditorMp3Reader_reset end");
+    return err;
+}
+/**
+ *******************************************************************************
+ * @brief   Gets an access unit (AU) from the stream handler source.
+ * @note    AU is the smallest possible amount of data to be decoded by decoder
+ *
+ * @param   context:       (IN) Context of the reader
+ * @param   pStreamHandler (IN) The stream handler of the stream to make jump
+ * @param   pAccessUnit    (I/O)Pointer to an access unit to fill with read data
+ * @return    M4NO_ERROR        there is no error
+ * @return    M4ERR_PARAMETER   at least one parameter is not properly set
+ * @returns   M4ERR_ALLOC       memory allocation failed
+ * @returns   M4WAR_NO_MORE_AU  there are no more access unit in the stream
+ *******************************************************************************
+*/
+M4OSA_ERR VideoEditorMp3Reader_getNextAu(M4OSA_Context context,
+        M4_StreamHandler *pStreamHandler, M4_AccessUnit *pAccessUnit) {
+    VideoEditorMp3Reader_Context *pReaderContext =
+        (VideoEditorMp3Reader_Context*)context;
+    M4OSA_ERR err = M4NO_ERROR;
+    M4SYS_AccessUnit* pAu;
+    MediaBuffer *mAudioBuffer;
+    MediaSource::ReadOptions options;
+
+    LOGV("VideoEditorMp3Reader_getNextAu start");
+    M4OSA_DEBUG_IF1((pReaderContext == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_getNextAu: invalid context");
+    M4OSA_DEBUG_IF1((pStreamHandler == 0), M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_StreamHandler");
+    M4OSA_DEBUG_IF1((pAccessUnit == 0),    M4ERR_PARAMETER,
+        "VideoEditorMp3Reader_getNextAu: invalid pointer to M4_AccessUnit");
+
+    if (pStreamHandler == (M4_StreamHandler*)pReaderContext->\
+        mAudioStreamHandler) {
+        pAu = &pReaderContext->mAudioAu;
+    } else {
+        LOGV("VideoEditorMp3Reader_getNextAu: StreamHandler is not known\n");
+        return M4ERR_PARAMETER;
+    }
+
+    if (pReaderContext->mSeeking) {
+        options.setSeekTo(pReaderContext->mSeekTime);
+    }
+
+    pReaderContext->mMediaSource->read(&mAudioBuffer, &options);
+
+    if (mAudioBuffer != NULL) {
+        if ((pAu->dataAddress == NULL) ||
+            (pAu->size < mAudioBuffer->range_length())) {
+            if (pAu->dataAddress != NULL) {
+                M4OSA_free((M4OSA_Int32*)pAu->dataAddress);
+                pAu->dataAddress = NULL;
+            }
+            pAu->dataAddress = (M4OSA_Int32*)M4OSA_malloc(
+                (mAudioBuffer->range_length() + 3) & ~0x3,
+                M4READER_MP3, (M4OSA_Char*)"pAccessUnit->m_dataAddress" );
+
+            if (pAu->dataAddress == NULL) {
+                LOGV("VideoEditorMp3Reader_getNextAu malloc failed");
+                pReaderContext->mMediaSource->stop();
+                pReaderContext->mMediaSource.clear();
+                pReaderContext->mDataSource.clear();
+
+                return M4ERR_ALLOC;
+            }
+        }
+        pAu->size = mAudioBuffer->range_length();
+        memcpy((M4OSA_MemAddr8)pAu->dataAddress,
+            (const char *)mAudioBuffer->data() + mAudioBuffer->range_offset(),
+            mAudioBuffer->range_length());
+
+        mAudioBuffer->meta_data()->findInt64(kKeyTime, (int64_t*)&pAu->CTS);
+
+
+        pAu->CTS = pAu->CTS / 1000; /*converting the microsec to millisec */
+        pAu->DTS  = pAu->CTS;
+        pAu->attribute = M4SYS_kFragAttrOk;
+        mAudioBuffer->release();
+
+        LOGV("VideoEditorMp3Reader_getNextAu AU CTS = %ld",pAu->CTS);
+
+        pAccessUnit->m_dataAddress = (M4OSA_Int8*) pAu->dataAddress;
+        pAccessUnit->m_size = pAu->size;
+        pAccessUnit->m_CTS = pAu->CTS;
+        pAccessUnit->m_DTS = pAu->DTS;
+        pAccessUnit->m_attribute = pAu->attribute;
+    } else {
+        LOGV("VideoEditorMp3Reader_getNextAu EOS reached.");
+        pAccessUnit->m_size=0;
+        err = M4WAR_NO_MORE_AU;
+    }
+    pAu->nbFrag = 0;
+
+    options.clearSeekTo();
+    pReaderContext->mSeeking = M4OSA_FALSE;
+    mAudioBuffer = NULL;
+    LOGV("VideoEditorMp3Reader_getNextAu end");
+
+    return err;
+}
+
+extern "C" {
+
+M4OSA_ERR VideoEditorMp3Reader_getInterface(
+        M4READER_MediaType *pMediaType,
+        M4READER_GlobalInterface **pRdrGlobalInterface,
+        M4READER_DataInterface **pRdrDataInterface) {
+    M4OSA_ERR err = M4NO_ERROR;
+
+    LOGV("VideoEditorMp3Reader_getInterface: begin");
+    /* Input parameters check */
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pMediaType,      M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrGlobalInterface, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pRdrDataInterface, M4ERR_PARAMETER);
+
+    SAFE_MALLOC(*pRdrGlobalInterface, M4READER_GlobalInterface, 1,
+        "VideoEditorMp3Reader_getInterface");
+    SAFE_MALLOC(*pRdrDataInterface, M4READER_DataInterface, 1,
+        "VideoEditorMp3Reader_getInterface");
+
+    *pMediaType = M4READER_kMediaTypeMP3;
+
+    (*pRdrGlobalInterface)->m_pFctCreate       = VideoEditorMp3Reader_create;
+    (*pRdrGlobalInterface)->m_pFctDestroy      = VideoEditorMp3Reader_destroy;
+    (*pRdrGlobalInterface)->m_pFctOpen         = VideoEditorMp3Reader_open;
+    (*pRdrGlobalInterface)->m_pFctClose        = VideoEditorMp3Reader_close;
+    (*pRdrGlobalInterface)->m_pFctGetOption    = VideoEditorMp3Reader_getOption;
+    (*pRdrGlobalInterface)->m_pFctSetOption    = VideoEditorMp3Reader_setOption;
+    (*pRdrGlobalInterface)->m_pFctGetNextStream =
+        VideoEditorMp3Reader_getNextStream;
+    (*pRdrGlobalInterface)->m_pFctFillAuStruct =
+        VideoEditorMp3Reader_fillAuStruct;
+    (*pRdrGlobalInterface)->m_pFctStart        = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctStop         = M4OSA_NULL;
+    (*pRdrGlobalInterface)->m_pFctJump         = VideoEditorMp3Reader_jump;
+    (*pRdrGlobalInterface)->m_pFctReset        = VideoEditorMp3Reader_reset;
+    (*pRdrGlobalInterface)->m_pFctGetPrevRapTime = M4OSA_NULL;
+
+    (*pRdrDataInterface)->m_pFctGetNextAu      = VideoEditorMp3Reader_getNextAu;
+    (*pRdrDataInterface)->m_readerContext      = M4OSA_NULL;
+
+cleanUp:
+    if( M4NO_ERROR == err )
+    {
+        LOGV("VideoEditorMp3Reader_getInterface no error");
+    }
+    else
+    {
+        SAFE_FREE(*pRdrGlobalInterface);
+        SAFE_FREE(*pRdrDataInterface);
+
+        LOGV("VideoEditorMp3Reader_getInterface ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorMp3Reader_getInterface: end");
+    return err;
+}
+}  /* extern "C" */
+}  /* namespace android */
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp
new file mode 100755
index 0000000..733c5d6
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorUtils.cpp
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+*************************************************************************
+* @file   VideoEditorUtils.cpp
+* @brief  StageFright shell Utilities
+*************************************************************************
+*/
+#define LOG_NDEBUG 0
+#define LOG_TAG "SF_utils"
+#include "utils/Log.h"
+
+#include "VideoEditorUtils.h"
+
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXCodec.h>
+
+/* Android includes*/
+#include <utils/Log.h>
+#include <memory.h>
+
+/*---------------------*/
+/*  DEBUG LEVEL SETUP  */
+/*---------------------*/
+#define LOG1 LOGE    /*ERRORS Logging*/
+#define LOG2 LOGI    /*WARNING Logging*/
+#define LOG3 //LOGV  /*COMMENTS Logging*/
+
+namespace android {
+
+void displayMetaData(const sp<MetaData> meta) {
+
+    const char* charData;
+    int32_t int32Data;
+    int64_t int64Data;
+    uint32_t type;
+    const void* data;
+    void* ptr;
+    size_t size;
+
+    if (meta->findCString(kKeyMIMEType, &charData)) {
+        LOG1("displayMetaData kKeyMIMEType %s", charData);
+    }
+    if (meta->findInt32(kKeyWidth, &int32Data)) {
+        LOG1("displayMetaData kKeyWidth %d", int32Data);
+    }
+    if (meta->findInt32(kKeyHeight, &int32Data)) {
+        LOG1("displayMetaData kKeyHeight %d", int32Data);
+    }
+    if (meta->findInt32(kKeyIFramesInterval, &int32Data)) {
+        LOG1("displayMetaData kKeyIFramesInterval %d", int32Data);
+    }
+    if (meta->findInt32(kKeyStride, &int32Data)) {
+        LOG1("displayMetaData kKeyStride %d", int32Data);
+    }
+    if (meta->findInt32(kKeySliceHeight, &int32Data)) {
+        LOG1("displayMetaData kKeySliceHeight %d", int32Data);
+    }
+    if (meta->findInt32(kKeyChannelCount, &int32Data)) {
+        LOG1("displayMetaData kKeyChannelCount %d", int32Data);
+    }
+    if (meta->findInt32(kKeySampleRate, &int32Data)) {
+        LOG1("displayMetaData kKeySampleRate %d", int32Data);
+    }
+    if (meta->findInt32(kKeyBitRate, &int32Data)) {
+        LOG1("displayMetaData kKeyBitRate %d", int32Data);
+    }
+    if (meta->findData(kKeyESDS, &type, &data, &size)) {
+        LOG1("displayMetaData kKeyESDS type=%d size=%d", type, size);
+    }
+    if (meta->findData(kKeyAVCC, &type, &data, &size)) {
+        LOG1("displayMetaData kKeyAVCC data=0x%X type=%d size=%d",
+            *((unsigned int*)data), type, size);
+    }
+    if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) {
+        LOG1("displayMetaData kKeyVorbisInfo type=%d size=%d", type, size);
+    }
+    if (meta->findData(kKeyVorbisBooks, &type, &data, &size)) {
+        LOG1("displayMetaData kKeyVorbisBooks type=%d size=%d", type, size);
+    }
+    if (meta->findInt32(kKeyWantsNALFragments, &int32Data)) {
+        LOG1("displayMetaData kKeyWantsNALFragments %d", int32Data);
+    }
+    if (meta->findInt32(kKeyIsSyncFrame, &int32Data)) {
+        LOG1("displayMetaData kKeyIsSyncFrame %d", int32Data);
+    }
+    if (meta->findInt32(kKeyIsCodecConfig, &int32Data)) {
+        LOG1("displayMetaData kKeyIsCodecConfig %d", int32Data);
+    }
+    if (meta->findInt64(kKeyTime, &int64Data)) {
+        LOG1("displayMetaData kKeyTime %lld", int64Data);
+    }
+    if (meta->findInt32(kKeyDuration, &int32Data)) {
+        LOG1("displayMetaData kKeyDuration %d", int32Data);
+    }
+    if (meta->findInt32(kKeyColorFormat, &int32Data)) {
+        LOG1("displayMetaData kKeyColorFormat %d", int32Data);
+    }
+    if (meta->findPointer(kKeyPlatformPrivate, &ptr)) {
+        LOG1("displayMetaData kKeyPlatformPrivate pointer=0x%x", (int32_t) ptr);
+    }
+    if (meta->findCString(kKeyDecoderComponent, &charData)) {
+        LOG1("displayMetaData kKeyDecoderComponent %s", charData);
+    }
+    if (meta->findInt32(kKeyBufferID, &int32Data)) {
+        LOG1("displayMetaData kKeyBufferID %d", int32Data);
+    }
+    if (meta->findInt32(kKeyMaxInputSize, &int32Data)) {
+        LOG1("displayMetaData kKeyMaxInputSize %d", int32Data);
+    }
+    if (meta->findInt64(kKeyThumbnailTime, &int64Data)) {
+        LOG1("displayMetaData kKeyThumbnailTime %lld", int64Data);
+    }
+    if (meta->findCString(kKeyAlbum, &charData)) {
+        LOG1("displayMetaData kKeyAlbum %s", charData);
+    }
+    if (meta->findCString(kKeyArtist, &charData)) {
+        LOG1("displayMetaData kKeyArtist %s", charData);
+    }
+    if (meta->findCString(kKeyAlbumArtist, &charData)) {
+        LOG1("displayMetaData kKeyAlbumArtist %s", charData);
+    }
+    if (meta->findCString(kKeyComposer, &charData)) {
+        LOG1("displayMetaData kKeyComposer %s", charData);
+    }
+    if (meta->findCString(kKeyGenre, &charData)) {
+        LOG1("displayMetaData kKeyGenre %s", charData);
+    }
+    if (meta->findCString(kKeyTitle, &charData)) {
+        LOG1("displayMetaData kKeyTitle %s", charData);
+    }
+    if (meta->findCString(kKeyYear, &charData)) {
+        LOG1("displayMetaData kKeyYear %s", charData);
+    }
+    if (meta->findData(kKeyAlbumArt, &type, &data, &size)) {
+        LOG1("displayMetaData kKeyAlbumArt type=%d size=%d", type, size);
+    }
+    if (meta->findCString(kKeyAlbumArtMIME, &charData)) {
+        LOG1("displayMetaData kKeyAlbumArtMIME %s", charData);
+    }
+    if (meta->findCString(kKeyAuthor, &charData)) {
+        LOG1("displayMetaData kKeyAuthor %s", charData);
+    }
+    if (meta->findCString(kKeyCDTrackNumber, &charData)) {
+        LOG1("displayMetaData kKeyCDTrackNumber %s", charData);
+    }
+    if (meta->findCString(kKeyDiscNumber, &charData)) {
+        LOG1("displayMetaData kKeyDiscNumber %s", charData);
+    }
+    if (meta->findCString(kKeyDate, &charData)) {
+        LOG1("displayMetaData kKeyDate %s", charData);
+    }
+    if (meta->findCString(kKeyWriter, &charData)) {
+        LOG1("displayMetaData kKeyWriter %s", charData);
+    }
+    if (meta->findInt32(kKeyTimeScale, &int32Data)) {
+        LOG1("displayMetaData kKeyTimeScale %d", int32Data);
+    }
+    if (meta->findInt32(kKeyVideoProfile, &int32Data)) {
+        LOG1("displayMetaData kKeyVideoProfile %d", int32Data);
+    }
+    if (meta->findInt32(kKeyVideoLevel, &int32Data)) {
+        LOG1("displayMetaData kKeyVideoLevel %d", int32Data);
+    }
+    if (meta->findInt32(kKey64BitFileOffset, &int32Data)) {
+        LOG1("displayMetaData kKey64BitFileOffset %d", int32Data);
+    }
+    if (meta->findInt32(kKeyFileType, &int32Data)) {
+        LOG1("displayMetaData kKeyFileType %d", int32Data);
+    }
+    if (meta->findInt64(kKeyTrackTimeStatus, &int64Data)) {
+        LOG1("displayMetaData kKeyTrackTimeStatus %lld", int64Data);
+    }
+    if (meta->findInt32(kKeyNotRealTime, &int32Data)) {
+        LOG1("displayMetaData kKeyNotRealTime %d", int32Data);
+    }
+}
+
+/**
+ * This code was extracted from StageFright MPEG4 writer
+ * Is is used to parse and format the AVC codec specific info received
+ * from StageFright encoders
+ */
+static const uint8_t kNalUnitTypeSeqParamSet = 0x07;
+static const uint8_t kNalUnitTypePicParamSet = 0x08;
+struct AVCParamSet {
+    AVCParamSet(uint16_t length, const uint8_t *data)
+        : mLength(length), mData(data) {}
+
+    uint16_t mLength;
+    const uint8_t *mData;
+};
+struct AVCCodecSpecificContext {
+    List<AVCParamSet> mSeqParamSets;
+    List<AVCParamSet> mPicParamSets;
+    uint8_t mProfileIdc;
+    uint8_t mProfileCompatible;
+    uint8_t mLevelIdc;
+};
+
+const uint8_t *parseParamSet(AVCCodecSpecificContext* pC,
+        const uint8_t *data, size_t length, int type, size_t *paramSetLen) {
+    CHECK(type == kNalUnitTypeSeqParamSet ||
+          type == kNalUnitTypePicParamSet);
+
+    size_t bytesLeft = length;
+    while (bytesLeft > 4  &&
+            memcmp("\x00\x00\x00\x01", &data[length - bytesLeft], 4)) {
+        --bytesLeft;
+    }
+    if (bytesLeft <= 4) {
+        bytesLeft = 0; // Last parameter set
+    }
+    const uint8_t *nextStartCode = &data[length - bytesLeft];
+    *paramSetLen = nextStartCode - data;
+    if (*paramSetLen == 0) {
+        LOGE("Param set is malformed, since its length is 0");
+        return NULL;
+    }
+
+    AVCParamSet paramSet(*paramSetLen, data);
+    if (type == kNalUnitTypeSeqParamSet) {
+        if (*paramSetLen < 4) {
+            LOGE("Seq parameter set malformed");
+            return NULL;
+        }
+        if (pC->mSeqParamSets.empty()) {
+            pC->mProfileIdc = data[1];
+            pC->mProfileCompatible = data[2];
+            pC->mLevelIdc = data[3];
+        } else {
+            if (pC->mProfileIdc != data[1] ||
+                pC->mProfileCompatible != data[2] ||
+                pC->mLevelIdc != data[3]) {
+                LOGV("Inconsistent profile/level found in seq parameter sets");
+                return NULL;
+            }
+        }
+        pC->mSeqParamSets.push_back(paramSet);
+    } else {
+        pC->mPicParamSets.push_back(paramSet);
+    }
+    return nextStartCode;
+}
+
+status_t buildAVCCodecSpecificData(uint8_t **pOutputData, size_t *pOutputSize,
+        const uint8_t *data, size_t size, MetaData *param)
+{
+    //LOGV("buildAVCCodecSpecificData");
+
+    if ( (pOutputData == NULL) || (pOutputSize == NULL) ) {
+        LOGE("output is invalid");
+        return ERROR_MALFORMED;
+    }
+
+    if (*pOutputData != NULL) {
+        LOGE("Already have codec specific data");
+        return ERROR_MALFORMED;
+    }
+
+    if (size < 4) {
+        LOGE("Codec specific data length too short: %d", size);
+        return ERROR_MALFORMED;
+    }
+
+    // Data is in the form of AVCCodecSpecificData
+    if (memcmp("\x00\x00\x00\x01", data, 4)) {
+        // 2 bytes for each of the parameter set length field
+        // plus the 7 bytes for the header
+        if (size < 4 + 7) {
+            LOGE("Codec specific data length too short: %d", size);
+            return ERROR_MALFORMED;
+        }
+
+        *pOutputSize = size;
+        *pOutputData = (uint8_t*)malloc(size);
+        memcpy(*pOutputData, data, size);
+        return OK;
+    }
+
+    AVCCodecSpecificContext ctx;
+    uint8_t *outputData = NULL;
+    size_t outputSize = 0;
+
+    // Check if the data is valid
+    uint8_t type = kNalUnitTypeSeqParamSet;
+    bool gotSps = false;
+    bool gotPps = false;
+    const uint8_t *tmp = data;
+    const uint8_t *nextStartCode = data;
+    size_t bytesLeft = size;
+    size_t paramSetLen = 0;
+    outputSize = 0;
+    while (bytesLeft > 4 && !memcmp("\x00\x00\x00\x01", tmp, 4)) {
+        type = (*(tmp + 4)) & 0x1F;
+        if (type == kNalUnitTypeSeqParamSet) {
+            if (gotPps) {
+                LOGE("SPS must come before PPS");
+                return ERROR_MALFORMED;
+            }
+            if (!gotSps) {
+                gotSps = true;
+            }
+            nextStartCode = parseParamSet(&ctx, tmp + 4, bytesLeft - 4, type,
+                &paramSetLen);
+        } else if (type == kNalUnitTypePicParamSet) {
+            if (!gotSps) {
+                LOGE("SPS must come before PPS");
+                return ERROR_MALFORMED;
+            }
+            if (!gotPps) {
+                gotPps = true;
+            }
+            nextStartCode = parseParamSet(&ctx, tmp + 4, bytesLeft - 4, type,
+                &paramSetLen);
+        } else {
+            LOGE("Only SPS and PPS Nal units are expected");
+            return ERROR_MALFORMED;
+        }
+
+        if (nextStartCode == NULL) {
+            return ERROR_MALFORMED;
+        }
+
+        // Move on to find the next parameter set
+        bytesLeft -= nextStartCode - tmp;
+        tmp = nextStartCode;
+        outputSize += (2 + paramSetLen);
+    }
+
+    {
+        // Check on the number of seq parameter sets
+        size_t nSeqParamSets = ctx.mSeqParamSets.size();
+        if (nSeqParamSets == 0) {
+            LOGE("Cound not find sequence parameter set");
+            return ERROR_MALFORMED;
+        }
+
+        if (nSeqParamSets > 0x1F) {
+            LOGE("Too many seq parameter sets (%d) found", nSeqParamSets);
+            return ERROR_MALFORMED;
+        }
+    }
+
+    {
+        // Check on the number of pic parameter sets
+        size_t nPicParamSets = ctx.mPicParamSets.size();
+        if (nPicParamSets == 0) {
+            LOGE("Cound not find picture parameter set");
+            return ERROR_MALFORMED;
+        }
+        if (nPicParamSets > 0xFF) {
+            LOGE("Too many pic parameter sets (%d) found", nPicParamSets);
+            return ERROR_MALFORMED;
+        }
+    }
+
+    {
+        // Check on the profiles
+        // These profiles requires additional parameter set extensions
+        if (ctx.mProfileIdc == 100 || ctx.mProfileIdc == 110 ||
+            ctx.mProfileIdc == 122 || ctx.mProfileIdc == 144) {
+            LOGE("Sorry, no support for profile_idc: %d!", ctx.mProfileIdc);
+            return BAD_VALUE;
+        }
+    }
+
+    // ISO 14496-15: AVC file format
+    outputSize += 7;  // 7 more bytes in the header
+    outputData = (uint8_t *)malloc(outputSize);
+    uint8_t *header = outputData;
+    header[0] = 1;                     // version
+    header[1] = ctx.mProfileIdc;           // profile indication
+    header[2] = ctx.mProfileCompatible;    // profile compatibility
+    header[3] = ctx.mLevelIdc;
+
+    // 6-bit '111111' followed by 2-bit to lengthSizeMinuusOne
+    int32_t use2ByteNalLength = 0;
+    if (param &&
+        param->findInt32(kKey2ByteNalLength, &use2ByteNalLength) &&
+        use2ByteNalLength) {
+        header[4] = 0xfc | 1;  // length size == 2 bytes
+    } else {
+        header[4] = 0xfc | 3;  // length size == 4 bytes
+    }
+
+    // 3-bit '111' followed by 5-bit numSequenceParameterSets
+    int nSequenceParamSets = ctx.mSeqParamSets.size();
+    header[5] = 0xe0 | nSequenceParamSets;
+    header += 6;
+    for (List<AVCParamSet>::iterator it = ctx.mSeqParamSets.begin();
+         it != ctx.mSeqParamSets.end(); ++it) {
+        // 16-bit sequence parameter set length
+        uint16_t seqParamSetLength = it->mLength;
+        header[0] = seqParamSetLength >> 8;
+        header[1] = seqParamSetLength & 0xff;
+        //LOGE("### SPS %d %d %d", seqParamSetLength, header[0], header[1]);
+
+        // SPS NAL unit (sequence parameter length bytes)
+        memcpy(&header[2], it->mData, seqParamSetLength);
+        header += (2 + seqParamSetLength);
+    }
+
+    // 8-bit nPictureParameterSets
+    int nPictureParamSets = ctx.mPicParamSets.size();
+    header[0] = nPictureParamSets;
+    header += 1;
+    for (List<AVCParamSet>::iterator it = ctx.mPicParamSets.begin();
+         it != ctx.mPicParamSets.end(); ++it) {
+        // 16-bit picture parameter set length
+        uint16_t picParamSetLength = it->mLength;
+        header[0] = picParamSetLength >> 8;
+        header[1] = picParamSetLength & 0xff;
+//LOGE("### PPS %d %d %d", picParamSetLength, header[0], header[1]);
+
+        // PPS Nal unit (picture parameter set length bytes)
+        memcpy(&header[2], it->mData, picParamSetLength);
+        header += (2 + picParamSetLength);
+    }
+
+    *pOutputSize = outputSize;
+    *pOutputData = outputData;
+    return OK;
+}
+}// namespace android
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
new file mode 100755
index 0000000..b362197
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoDecoder.cpp
@@ -0,0 +1,1404 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorVideoDecoder.cpp
+* @brief  StageFright shell video decoder
+*************************************************************************
+*/
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_VIDEODECODER"
+
+/*******************
+ *     HEADERS     *
+ *******************/
+
+#include "VideoEditorVideoDecoder_internal.h"
+#include "VideoEditorUtils.h"
+#include "M4VD_Tools.h"
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+
+/********************
+ *   DEFINITIONS    *
+ ********************/
+#define OMX_QCOM_COLOR_FormatYVU420SemiPlanar 0x7FA30C00
+#define MAX_DEC_BUFFERS 10
+
+/********************
+ *   SOURCE CLASS   *
+ ********************/
+using namespace android;
+
+class VideoEditorVideoDecoderSource : public MediaSource {
+    public:
+        VideoEditorVideoDecoderSource(const sp<MetaData> &format,
+            VIDEOEDITOR_CodecType codecType, void *decoderShellContext);
+        virtual status_t start(MetaData *params = NULL);
+        virtual status_t stop();
+        virtual sp<MetaData> getFormat();
+        virtual status_t read(
+            MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+    protected :
+        virtual ~VideoEditorVideoDecoderSource();
+
+    private:
+        sp<MetaData> mFormat;
+        MediaBuffer* mBuffer;
+        MediaBufferGroup* mGroup;
+        Mutex mLock;
+        VideoEditorVideoDecoder_Context* mpDecShellContext;
+        int32_t mMaxAUSize;
+        bool mStarted;
+        VIDEOEDITOR_CodecType mCodecType;
+        VideoEditorVideoDecoderSource(const MediaSource &);
+        VideoEditorVideoDecoderSource &operator=(const MediaSource &);
+};
+
+VideoEditorVideoDecoderSource::VideoEditorVideoDecoderSource(
+        const sp<MetaData> &format, VIDEOEDITOR_CodecType codecType,
+        void *decoderShellContext) :
+        mFormat(format),
+        mBuffer(NULL),
+        mGroup(NULL),
+        mStarted(false),
+        mCodecType(codecType) {
+    mpDecShellContext = (VideoEditorVideoDecoder_Context*) decoderShellContext;
+}
+
+VideoEditorVideoDecoderSource::~VideoEditorVideoDecoderSource() {
+    if (mStarted == true) {
+        stop();
+    }
+}
+
+status_t VideoEditorVideoDecoderSource::start(
+        MetaData *params) {
+
+    LOGV("VideoEditorVideoDecoderSource::start() begin ");
+    if (!mStarted) {
+        if(mFormat->findInt32(kKeyMaxInputSize, &mMaxAUSize) == false) {
+            LOGW("FATAL: Should never happen ");
+            mMaxAUSize = 10000;
+        }
+
+        mGroup = new MediaBufferGroup;
+        if(mGroup == NULL) {
+            LOGE("FATAL: memory limitation ! ");
+            return NO_MEMORY;
+        }
+        LOGV("VideoEditorVideoDecoderSource:adding buffer to group MaxSize= %d",
+            mMaxAUSize);
+        mGroup->add_buffer(new MediaBuffer(mMaxAUSize));
+
+        mStarted = true;
+    }
+    LOGV("VideoEditorVideoDecoderSource::start() end OK");
+    return OK;
+}
+
+status_t VideoEditorVideoDecoderSource::stop() {
+    int ref_count = 0;
+    int i;
+
+    LOGV("VideoEditorVideoDecoderSource::stop() begin");
+    if (mStarted) {
+        if(mBuffer != NULL) {
+            ref_count = mBuffer->refcount();
+            LOGV("MediaBuffer refcount is %d",ref_count);
+            for (i=0; i< ref_count; i++) {
+                mBuffer->release();
+            }
+
+            mBuffer = NULL;
+        }
+        delete mGroup;
+        mGroup = NULL;
+        mStarted = false;
+    }
+    LOGV("VideoEditorVideoDecoderSource::stop() end");
+    return OK;
+}
+
+sp<MetaData> VideoEditorVideoDecoderSource::getFormat() {
+    Mutex::Autolock autolock(mLock);
+
+    return mFormat;
+}
+
+status_t VideoEditorVideoDecoderSource::read(MediaBuffer** buffer_out,
+        const ReadOptions *options) {
+
+    Mutex::Autolock autolock(mLock);
+    //We donot use read options on decoder hence dont impliment this option here
+    M4_AccessUnit* pAccessUnit = mpDecShellContext->m_pNextAccessUnitToDecode;
+    M4OSA_UInt32 lSize = 0;
+    M4OSA_ERR lerr = M4NO_ERROR;
+    int64_t frameTime;
+
+    *buffer_out = NULL;
+
+    LOGV("VideoEditorVideoDecoderSource::read begin");
+
+    if (options) {
+        int64_t time = 0;
+        ReadOptions::SeekMode mode = ReadOptions::SEEK_CLOSEST_SYNC;
+        bool hasOptions = FALSE;
+        hasOptions = options->getSeekTo(&time, &mode);
+        if (hasOptions) {
+            LOGV("VideoEditorVideoDecoderSource: Options is not NULL  %lld %d",
+                time, mode);
+        } else {
+            LOGV("VideoEditorVideoDecoderSource: Options is not NULL ****");
+        }
+    }
+    lerr = mGroup->acquire_buffer(&mBuffer);
+    if (lerr != OK) {
+        return lerr;
+    }
+    LOGV("VideoEditorVideoDecoderSource: got a buffer from group");
+
+    if (mStarted) {
+        //getNext AU from reader.
+        lerr = mpDecShellContext->m_pReader->m_pFctGetNextAu(
+                   mpDecShellContext->m_pReader->m_readerContext,
+                   (M4_StreamHandler*)mpDecShellContext->m_pVideoStreamhandler,
+                   pAccessUnit);
+        if (lerr == M4WAR_NO_DATA_YET) {
+            LOGV("VideoEditorVideoDecoderSource::read() M4WAR_NO_DATA_YET");
+            mBuffer->set_range(0, 0);
+            mBuffer->meta_data()->clear();
+
+            *buffer_out = mBuffer;
+        }
+        if (lerr == M4WAR_NO_MORE_AU) {
+            LOGV("VideoEditorVideoDecoderSource::read() returning err = "
+                "ERROR_END_OF_STREAM;");
+            *buffer_out = NULL;
+            return ERROR_END_OF_STREAM;
+        }
+        LOGV("VideoEditorVideoDecoderSource: getNextAU  succesful ts = %lf",
+            pAccessUnit->m_CTS);
+
+        //copy the reader AU buffer to mBuffer
+        lSize  = (pAccessUnit->m_size > (M4OSA_UInt32)mMaxAUSize)\
+            ? (M4OSA_UInt32)mMaxAUSize : pAccessUnit->m_size;
+        LOGV("VideoDecoderSource:Read() copying AU to i/p buffer of decoder,"
+            "Bufer Add = 0x%x, size = %d", mBuffer->data(), lSize);
+        M4OSA_memcpy((M4OSA_MemAddr8)mBuffer->data(),pAccessUnit->m_dataAddress,
+            lSize);
+
+        mBuffer->set_range(0, lSize);
+        mBuffer->meta_data()->clear();
+        frameTime = (int64_t)pAccessUnit->m_CTS;
+        mBuffer->meta_data()->setInt64(kKeyTime, (int64_t)frameTime*1000);
+
+        // Replace the AU start code for H264
+        if (VIDEOEDITOR_kH264VideoDec == mCodecType) {
+            uint8_t *data =(uint8_t *)mBuffer->data() + mBuffer->range_offset();
+            data[0]=0;
+            data[1]=0;
+            data[2]=0;
+            data[3]=1;
+        }
+        mBuffer->meta_data()->setInt32(kKeyIsSyncFrame,
+            (pAccessUnit->m_attribute == 0x04)? 1 : 0);
+        *buffer_out = mBuffer;
+    }
+    LOGV("VideoEditorVideoDecoderSource::read end");
+    return OK;
+}
+/********************
+ *      TOOLS       *
+ ********************/
+
+static M4OSA_UInt32 VideoEditorVideoDecoder_GetBitsFromMemory(
+        VIDEOEDITOR_VIDEO_Bitstream_ctxt* parsingCtxt, M4OSA_UInt32 nb_bits) {
+    return (M4VD_Tools_GetBitsFromMemory((M4VS_Bitstream_ctxt*) parsingCtxt,
+            nb_bits));
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_internalParseVideoDSI(M4OSA_UInt8* pVol,
+        M4OSA_Int32 aVolSize, M4DECODER_MPEG4_DecoderConfigInfo* pDci,
+        M4DECODER_VideoSize* pVideoSize) {
+
+    VIDEOEDITOR_VIDEO_Bitstream_ctxt parsingCtxt;
+    M4OSA_UInt32 code, j;
+    M4OSA_MemAddr8 start;
+    M4OSA_UInt8 i;
+    M4OSA_UInt32 time_incr_length;
+    M4OSA_UInt8 vol_verid=0, b_hierarchy_type;
+
+    /* Parsing variables */
+    M4OSA_UInt8 video_object_layer_shape = 0;
+    M4OSA_UInt8 sprite_enable = 0;
+    M4OSA_UInt8 reduced_resolution_vop_enable = 0;
+    M4OSA_UInt8 scalability = 0;
+    M4OSA_UInt8 enhancement_type = 0;
+    M4OSA_UInt8 complexity_estimation_disable = 0;
+    M4OSA_UInt8 interlaced = 0;
+    M4OSA_UInt8 sprite_warping_points = 0;
+    M4OSA_UInt8 sprite_brightness_change = 0;
+    M4OSA_UInt8 quant_precision = 0;
+
+    /* Fill the structure with default parameters */
+    pVideoSize->m_uiWidth      = 0;
+    pVideoSize->m_uiHeight     = 0;
+
+    pDci->uiTimeScale          = 0;
+    pDci->uiProfile            = 0;
+    pDci->uiUseOfResynchMarker = 0;
+    pDci->bDataPartition       = M4OSA_FALSE;
+    pDci->bUseOfRVLC           = M4OSA_FALSE;
+
+    /* Reset the bitstream context */
+    parsingCtxt.stream_byte = 0;
+    parsingCtxt.stream_index = 8;
+    parsingCtxt.in = (M4OSA_MemAddr8) pVol;
+
+    start = (M4OSA_MemAddr8) pVol;
+
+    /* Start parsing */
+    while (parsingCtxt.in - start < aVolSize) {
+        code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt, 8);
+        if (code == 0) {
+            code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt, 8);
+            if (code == 0) {
+                code = VideoEditorVideoDecoder_GetBitsFromMemory(&parsingCtxt,8);
+                if (code == 1) {
+                    /* start code found */
+                    code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                        &parsingCtxt, 8);
+
+                    /* ----- 0x20..0x2F : video_object_layer_start_code ----- */
+
+                    if ((code > 0x1F) && (code < 0x30)) {
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 8);
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);
+                        if (code == 1) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 4);
+                            vol_verid = (M4OSA_UInt8)code;
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 3);
+                        }
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 4);
+                        if (code == 15) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 16);
+                        }
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);
+                        if (code == 1) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 3);
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);
+                            if (code == 1) {
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 32);
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 31);
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 16);
+                            }
+                        }
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 2);
+                        /* Need to save it for vop parsing */
+                        video_object_layer_shape = (M4OSA_UInt8)code;
+
+                        if (code != 0) {
+                            return 0;    /* only rectangular case supported */
+                        }
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 16);
+                        pDci->uiTimeScale = code;
+
+                        /* Computes time increment length */
+                        j    = code - 1;
+                        for (i = 0; (i < 32) && (j != 0); j >>=1) {
+                            i++;
+                        }
+                        time_incr_length = (i == 0) ? 1 : i;
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);
+                        if (code == 1) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, time_incr_length);
+                        }
+
+                        if(video_object_layer_shape != 1) { /* 1 = Binary */
+                            if(video_object_layer_shape == 0) {
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* Marker bit */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 13);/* Width */
+                                pVideoSize->m_uiWidth = code;
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* Marker bit */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 13);/* Height */
+                                pVideoSize->m_uiHeight = code;
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* Marker bit */
+                            }
+                        }
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* interlaced */
+                        interlaced = (M4OSA_UInt8)code;
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* OBMC disable */
+
+                        if(vol_verid == 1) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* sprite enable */
+                            sprite_enable = (M4OSA_UInt8)code;
+                        } else {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 2);/* sprite enable */
+                            sprite_enable = (M4OSA_UInt8)code;
+                        }
+                        if ((sprite_enable == 1) || (sprite_enable == 2)) {
+                            if (sprite_enable != 2) {
+
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 13);/* sprite width */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* Marker bit */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 13);/* sprite height */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* Marker bit */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 13);/* sprite l coordinate */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* Marker bit */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 13);/* sprite top coordinate */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* Marker bit */
+                            }
+
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 6);/* sprite warping points */
+                            sprite_warping_points = (M4OSA_UInt8)code;
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 2);/* sprite warping accuracy */
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* sprite brightness change */
+                            sprite_brightness_change = (M4OSA_UInt8)code;
+                            if (sprite_enable != 2) {
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);
+                            }
+                        }
+                        if ((vol_verid != 1) && (video_object_layer_shape != 0)){
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* sadct disable */
+                        }
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1); /* not 8 bits */
+                        if (code) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 4);/* quant precision */
+                            quant_precision = (M4OSA_UInt8)code;
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 4);/* bits per pixel */
+                        }
+
+                        /* greyscale not supported */
+                        if(video_object_layer_shape == 3) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 3);
+                        }
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* quant type */
+                        if (code) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* load intra quant mat */
+                            if (code) {
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 8);/* */
+                                i    = 1;
+                                while (i < 64) {
+                                    code =
+                                        VideoEditorVideoDecoder_GetBitsFromMemory(
+                                            &parsingCtxt, 8);
+                                    if (code == 0) {
+                                        break;
+                                    }
+                                    i++;
+                                }
+                            }
+
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* load non intra quant mat */
+                            if (code) {
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 8);/* */
+                                i    = 1;
+                                while (i < 64) {
+                                    code =
+                                        VideoEditorVideoDecoder_GetBitsFromMemory(
+                                        &parsingCtxt, 8);
+                                    if (code == 0) {
+                                        break;
+                                    }
+                                    i++;
+                                }
+                            }
+                        }
+
+                        if (vol_verid != 1) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* quarter sample */
+                        }
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* complexity estimation disable */
+                        complexity_estimation_disable = (M4OSA_UInt8)code;
+                        if (!code) {
+                            //return M4ERR_NOT_IMPLEMENTED;
+                        }
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* resync marker disable */
+                        pDci->uiUseOfResynchMarker = (code) ? 0 : 1;
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* data partitionned */
+                        pDci->bDataPartition = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+                        if (code) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* reversible VLC */
+                            pDci->bUseOfRVLC = (code) ? M4OSA_TRUE : M4OSA_FALSE;
+                        }
+
+                        if (vol_verid != 1) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* newpred */
+                            if (code) {
+                                //return M4ERR_PARAMETER;
+                            }
+
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);
+                            reduced_resolution_vop_enable = (M4OSA_UInt8)code;
+                        }
+
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* scalability */
+                        scalability = (M4OSA_UInt8)code;
+                        if (code) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* hierarchy type */
+                            b_hierarchy_type = (M4OSA_UInt8)code;
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 4);/* ref layer id */
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* ref sampling direct */
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 5);/* hor sampling factor N */
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 5);/* hor sampling factor M */
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 5);/* vert sampling factor N */
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 5);/* vert sampling factor M */
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 1);/* enhancement type */
+                            enhancement_type = (M4OSA_UInt8)code;
+                            if ((!b_hierarchy_type) &&
+                                    (video_object_layer_shape == 1)) {
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* use ref shape */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 1);/* use ref texture */
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 5);
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 5);
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 5);
+                                code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                    &parsingCtxt, 5);
+                            }
+                        }
+                        break;
+                    }
+
+                    /* ----- 0xB0 : visual_object_sequence_start_code ----- */
+
+                    else if(code == 0xB0) {
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 8);/* profile_and_level_indication */
+                        pDci->uiProfile = (M4OSA_UInt8)code;
+                    }
+
+                    /* ----- 0xB5 : visual_object_start_code ----- */
+
+                    else if(code == 0xB5) {
+                        code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                            &parsingCtxt, 1);/* is object layer identifier */
+                        if (code == 1) {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 4); /* visual object verid */
+                            vol_verid = (M4OSA_UInt8)code;
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 3);
+                        } else {
+                            code = VideoEditorVideoDecoder_GetBitsFromMemory(
+                                &parsingCtxt, 7); /* Realign on byte */
+                            vol_verid = 1;
+                        }
+                    }
+
+                    /* ----- end ----- */
+                } else {
+                    if ((code >> 2) == 0x20) {
+                        /* H263 ...-> wrong*/
+                        break;
+                    }
+                }
+            }
+        }
+    }
+    return M4NO_ERROR;
+}
+
+M4VIFI_UInt8 M4VIFI_SemiplanarYVU420toYUV420(void *user_data,
+        M4VIFI_UInt8 *inyuv, M4VIFI_ImagePlane *PlaneOut ) {
+    M4VIFI_UInt8 return_code = M4VIFI_OK;
+    M4VIFI_UInt8 *outyuv =
+        ((M4VIFI_UInt8*)&(PlaneOut[0].pac_data[PlaneOut[0].u_topleft]));
+    int32_t width = PlaneOut[0].u_width;
+    int32_t height = PlaneOut[0].u_height;
+
+    int32_t outYsize = width * height;
+    uint32_t *outy =  (uint32_t *) outyuv;
+    uint16_t *outcb =
+        (uint16_t *) &(PlaneOut[1].pac_data[PlaneOut[1].u_topleft]);
+    uint16_t *outcr =
+        (uint16_t *) &(PlaneOut[2].pac_data[PlaneOut[2].u_topleft]);
+
+    /* Y copying */
+    memcpy(outy, inyuv, outYsize);
+
+    /* U & V copying */
+    uint32_t *inyuv_4 = (uint32_t *) (inyuv + outYsize);
+    for (int32_t i = height >> 1; i > 0; --i) {
+        for (int32_t j = width >> 2; j > 0; --j) {
+            uint32_t temp = *inyuv_4++;
+            uint32_t tempU = temp & 0xFF;
+            tempU = tempU | ((temp >> 8) & 0xFF00);
+
+            uint32_t tempV = (temp >> 8) & 0xFF;
+            tempV = tempV | ((temp >> 16) & 0xFF00);
+
+            // Flip U and V
+            *outcb++ = tempV;
+            *outcr++ = tempU;
+        }
+    }
+    return return_code;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_ParseAVCDSI(M4OSA_UInt8* pDSI,
+        M4OSA_Int32 DSISize, M4DECODER_AVCProfileLevel *profile) {
+    M4OSA_ERR err = M4NO_ERROR;
+    M4OSA_Bool NALSPS_and_Profile0Found = M4OSA_FALSE;
+    M4OSA_UInt16 index;
+    M4OSA_Bool constraintSet3;
+
+    for(index = 0; index < (DSISize-1); index++) {
+        if(((pDSI[index] & 0x1f) == 0x07) && (pDSI[index+1] == 0x42)) {
+            NALSPS_and_Profile0Found = M4OSA_TRUE;
+            break;
+        }
+    }
+    if(M4OSA_FALSE == NALSPS_and_Profile0Found) {
+        LOGV("VideoEditorVideoDecoder_ParseAVCDSI: index bad = %d", index);
+        *profile = M4DECODER_AVC_kProfile_and_Level_Out_Of_Range;
+    } else {
+        LOGV("VideoEditorVideoDecoder_ParseAVCDSI: index = %d", index);
+        constraintSet3 = (pDSI[index+2] & 0x10);
+        LOGV("VideoEditorVideoDecoder_ParseAVCDSI: level = %d", pDSI[index+3]);
+        switch(pDSI[index+3]) {
+            case 10:
+                *profile = M4DECODER_AVC_kProfile_0_Level_1;
+                break;
+            case 11:
+                if(constraintSet3) {
+                    *profile = M4DECODER_AVC_kProfile_0_Level_1b;
+                } else {
+                    *profile = M4DECODER_AVC_kProfile_0_Level_1_1;
+                }
+                break;
+            case 12:
+                *profile = M4DECODER_AVC_kProfile_0_Level_1_2;
+                break;
+            case 13:
+                *profile = M4DECODER_AVC_kProfile_0_Level_1_3;
+                break;
+            case 20:
+                *profile = M4DECODER_AVC_kProfile_0_Level_2;
+                break;
+            case 21:
+                *profile = M4DECODER_AVC_kProfile_0_Level_2_1;
+                break;
+            case 22:
+                *profile = M4DECODER_AVC_kProfile_0_Level_2_2;
+                break;
+            case 30:
+                *profile = M4DECODER_AVC_kProfile_0_Level_3;
+                break;
+            case 31:
+                *profile = M4DECODER_AVC_kProfile_0_Level_3_1;
+                break;
+            case 32:
+                *profile = M4DECODER_AVC_kProfile_0_Level_3_2;
+                break;
+            case 40:
+                *profile = M4DECODER_AVC_kProfile_0_Level_4;
+                break;
+            case 41:
+                *profile = M4DECODER_AVC_kProfile_0_Level_4_1;
+                break;
+            case 42:
+                *profile = M4DECODER_AVC_kProfile_0_Level_4_2;
+                break;
+            case 50:
+                *profile = M4DECODER_AVC_kProfile_0_Level_5;
+                break;
+            case 51:
+                *profile = M4DECODER_AVC_kProfile_0_Level_5_1;
+                break;
+            default:
+                *profile = M4DECODER_AVC_kProfile_and_Level_Out_Of_Range;
+        }
+    }
+    return err;
+}
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+M4OSA_ERR VideoEditorVideoDecoder_configureFromMetadata(M4OSA_Context pContext,
+        MetaData* meta) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
+    bool success = OK;
+    int32_t width = 0;
+    int32_t height = 0;
+    int32_t frameSize = 0;
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != meta,     M4ERR_PARAMETER);
+
+    LOGV("VideoEditorVideoDecoder_configureFromMetadata begin");
+
+    pDecShellContext = (VideoEditorVideoDecoder_Context*)pContext;
+
+    // Get the parameters
+    success  = meta->findInt32(kKeyWidth,  &width);
+    success &= meta->findInt32(kKeyHeight, &height);
+    VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
+
+    LOGV("VideoDecoder_configureFromMetadata : W=%d H=%d", width, height);
+    VIDEOEDITOR_CHECK((0 != width) && (0 != height), M4ERR_PARAMETER);
+
+    LOGV("VideoDecoder_configureFromMetadata : W=%d H=%d", width, height);
+
+    if( (M4OSA_NULL != pDecShellContext->m_pDecBufferPool) &&
+        (pDecShellContext->m_pVideoStreamhandler->m_videoWidth  == \
+            (uint32_t)width) &&
+        (pDecShellContext->m_pVideoStreamhandler->m_videoHeight == \
+            (uint32_t)height) ) {
+        // No need to reconfigure
+        goto cleanUp;
+    }
+    LOGV("VideoDecoder_configureFromMetadata  reset: W=%d H=%d", width, height);
+    // Update the stream handler parameters
+    pDecShellContext->m_pVideoStreamhandler->m_videoWidth  = width;
+    pDecShellContext->m_pVideoStreamhandler->m_videoHeight = height;
+    frameSize = (width * height * 3) / 2;
+
+    // Configure the buffer pool
+    if( M4OSA_NULL != pDecShellContext->m_pDecBufferPool ) {
+        LOGV("VideoDecoder_configureFromMetadata : reset the buffer pool");
+        VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
+        pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+    }
+    err =  VIDEOEDITOR_BUFFER_allocatePool(&pDecShellContext->m_pDecBufferPool,
+        MAX_DEC_BUFFERS, (M4OSA_Char*)"VIDEOEDITOR_DecodedBufferPool");
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+    err = VIDEOEDITOR_BUFFER_initPoolBuffers(pDecShellContext->m_pDecBufferPool,
+        frameSize + width * 2);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoDecoder_configureFromMetadata no error");
+    } else {
+        if( M4OSA_NULL != pDecShellContext->m_pDecBufferPool ) {
+            VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
+            pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+        }
+        LOGV("VideoEditorVideoDecoder_configureFromMetadata ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorVideoDecoder_configureFromMetadata end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_destroy(M4OSA_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoDecoder_Context* pDecShellContext =
+        (VideoEditorVideoDecoder_Context*)pContext;
+
+    // Input parameters check
+    LOGV("VideoEditorVideoDecoder_destroy begin");
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    // Destroy the graph
+    if( pDecShellContext->mVideoDecoder != NULL ) {
+        LOGV("### VideoEditorVideoDecoder_destroy : releasing decoder");
+        pDecShellContext->mVideoDecoder->stop();
+        pDecShellContext->mVideoDecoder.clear();
+    }
+    pDecShellContext->mClient.disconnect();
+    pDecShellContext->mReaderSource.clear();
+
+    // Release memory
+    if( pDecShellContext->m_pDecBufferPool != M4OSA_NULL ) {
+        VIDEOEDITOR_BUFFER_freePool(pDecShellContext->m_pDecBufferPool);
+        pDecShellContext->m_pDecBufferPool = M4OSA_NULL;
+    }
+    SAFE_FREE(pDecShellContext);
+    pContext = NULL;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoDecoder_destroy no error");
+    } else {
+        LOGV("VideoEditorVideoDecoder_destroy ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorVideoDecoder_destroy end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_create(M4OSA_Context *pContext,
+        M4_StreamHandler *pStreamHandler,
+        M4READER_DataInterface *pReaderDataInterface,
+        M4_AccessUnit *pAccessUnit, M4OSA_Void *pUserData) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoDecoder_Context* pDecShellContext = M4OSA_NULL;
+    status_t status = OK;
+    bool success = TRUE;
+    int32_t colorFormat = 0;
+    M4OSA_UInt32 size = 0;
+    sp<MetaData> decoderMetadata = NULL;
+
+    LOGV("VideoEditorVideoDecoder_create begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,             M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pStreamHandler,       M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pReaderDataInterface, M4ERR_PARAMETER);
+
+    // Context allocation & initialization
+    SAFE_MALLOC(pDecShellContext, VideoEditorVideoDecoder_Context, 1,
+        "VideoEditorVideoDecoder");
+    pDecShellContext->m_pVideoStreamhandler =
+        (M4_VideoStreamHandler*)pStreamHandler;
+    pDecShellContext->m_pNextAccessUnitToDecode = pAccessUnit;
+    pDecShellContext->m_pReader = pReaderDataInterface;
+    pDecShellContext->m_lastDecodedCTS = -1;
+    pDecShellContext->m_lastRenderCts = -1;
+    switch( pStreamHandler->m_streamType ) {
+        case M4DA_StreamTypeVideoH263:
+            pDecShellContext->mDecoderType = VIDEOEDITOR_kH263VideoDec;
+            break;
+        case M4DA_StreamTypeVideoMpeg4:
+            pDecShellContext->mDecoderType = VIDEOEDITOR_kMpeg4VideoDec;
+            // Parse the VOL header
+            err = VideoEditorVideoDecoder_internalParseVideoDSI(
+                (M4OSA_UInt8*)pDecShellContext->m_pVideoStreamhandler->\
+                    m_basicProperties.m_pDecoderSpecificInfo,
+                pDecShellContext->m_pVideoStreamhandler->\
+                    m_basicProperties.m_decoderSpecificInfoSize,
+                &pDecShellContext->m_Dci, &pDecShellContext->m_VideoSize);
+            VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+            break;
+        case M4DA_StreamTypeVideoMpeg4Avc:
+            pDecShellContext->mDecoderType = VIDEOEDITOR_kH264VideoDec;
+            break;
+        default:
+            VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
+                M4ERR_PARAMETER);
+            break;
+    }
+
+    pDecShellContext->mNbInputFrames     = 0;
+    pDecShellContext->mFirstInputCts     = -1.0;
+    pDecShellContext->mLastInputCts      = -1.0;
+    pDecShellContext->mNbRenderedFrames  = 0;
+    pDecShellContext->mFirstRenderedCts  = -1.0;
+    pDecShellContext->mLastRenderedCts   = -1.0;
+    pDecShellContext->mNbOutputFrames    = 0;
+    pDecShellContext->mFirstOutputCts    = -1;
+    pDecShellContext->mLastOutputCts     = -1;
+
+    /**
+     * StageFright graph building
+     */
+    decoderMetadata = new MetaData;
+    switch( pDecShellContext->mDecoderType ) {
+        case VIDEOEDITOR_kH263VideoDec:
+            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_H263);
+            break;
+        case VIDEOEDITOR_kMpeg4VideoDec:
+            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4);
+            decoderMetadata->setData(kKeyESDS, kTypeESDS,
+                pStreamHandler->m_pESDSInfo,
+                pStreamHandler->m_ESDSInfoSize);
+            break;
+        case VIDEOEDITOR_kH264VideoDec:
+            decoderMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
+            decoderMetadata->setData(kKeyAVCC, kTypeAVCC,
+                pStreamHandler->m_pH264DecoderSpecificInfo,
+                pStreamHandler->m_H264decoderSpecificInfoSize);
+            break;
+        default:
+            VIDEOEDITOR_CHECK(!"VideoDecoder_create : incorrect stream type",
+                M4ERR_PARAMETER);
+            break;
+    }
+
+    decoderMetadata->setInt32(kKeyMaxInputSize, pStreamHandler->m_maxAUSize);
+    decoderMetadata->setInt32(kKeyWidth,
+        pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
+    decoderMetadata->setInt32(kKeyHeight,
+        pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
+
+    // Create the decoder source
+    pDecShellContext->mReaderSource = new VideoEditorVideoDecoderSource(
+        decoderMetadata, pDecShellContext->mDecoderType,
+        (void *)pDecShellContext);
+    VIDEOEDITOR_CHECK(NULL != pDecShellContext->mReaderSource.get(),
+        M4ERR_SF_DECODER_RSRC_FAIL);
+
+    // Connect to the OMX client
+    status = pDecShellContext->mClient.connect();
+    VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
+
+    // Create the decoder
+    pDecShellContext->mVideoDecoder = OMXCodec::Create(
+        pDecShellContext->mClient.interface(),
+        decoderMetadata, false, pDecShellContext->mReaderSource);
+    VIDEOEDITOR_CHECK(NULL != pDecShellContext->mVideoDecoder.get(),
+        M4ERR_SF_DECODER_RSRC_FAIL);
+
+
+    // Get the output color format
+    success = pDecShellContext->mVideoDecoder->getFormat()->findInt32(
+        kKeyColorFormat, &colorFormat);
+    VIDEOEDITOR_CHECK(TRUE == success, M4ERR_PARAMETER);
+    pDecShellContext->decOuputColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
+
+    pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyWidth,
+        pDecShellContext->m_pVideoStreamhandler->m_videoWidth);
+    pDecShellContext->mVideoDecoder->getFormat()->setInt32(kKeyHeight,
+        pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
+
+    // Configure the buffer pool from the metadata
+    err = VideoEditorVideoDecoder_configureFromMetadata(pDecShellContext,
+        pDecShellContext->mVideoDecoder->getFormat().get());
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+    // Start the graph
+    status = pDecShellContext->mVideoDecoder->start();
+    VIDEOEDITOR_CHECK(OK == status, M4ERR_SF_DECODER_RSRC_FAIL);
+
+    *pContext = (M4OSA_Context)pDecShellContext;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoDecoder_create no error");
+    } else {
+        VideoEditorVideoDecoder_destroy(pDecShellContext);
+        *pContext = M4OSA_NULL;
+        LOGV("VideoEditorVideoDecoder_create ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorVideoDecoder_create : DONE");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getOption(M4OSA_Context context,
+        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+    M4OSA_ERR lerr = M4NO_ERROR;
+    VideoEditorVideoDecoder_Context* pDecShellContext =
+        (VideoEditorVideoDecoder_Context*) context;
+    M4_VersionInfo* pVersionInfo;
+    M4DECODER_VideoSize* pVideoSize;
+    M4OSA_UInt32* pNextFrameCts;
+    M4OSA_UInt32 *plastDecodedFrameCts;
+    M4DECODER_AVCProfileLevel* profile;
+    M4DECODER_MPEG4_DecoderConfigInfo* pDecConfInfo;
+
+    LOGV("VideoEditorVideoDecoder_getOption begin");
+
+    switch (optionId) {
+        case M4DECODER_kOptionID_AVCLastDecodedFrameCTS:
+             plastDecodedFrameCts = (M4OSA_UInt32 *) pValue;
+             *plastDecodedFrameCts = pDecShellContext->m_lastDecodedCTS;
+             break;
+
+        case M4DECODER_kOptionID_Version:
+            pVersionInfo = (M4_VersionInfo*)pValue;
+
+            pVersionInfo->m_major = VIDEOEDITOR_VIDEC_SHELL_VER_MAJOR;
+            pVersionInfo->m_minor= VIDEOEDITOR_VIDEC_SHELL_VER_MINOR;
+            pVersionInfo->m_revision = VIDEOEDITOR_VIDEC_SHELL_VER_REVISION;
+            pVersionInfo->m_structSize=sizeof(M4_VersionInfo);
+            break;
+
+        case M4DECODER_kOptionID_VideoSize:
+            /** Only VPS uses this Option ID. */
+            pVideoSize = (M4DECODER_VideoSize*)pValue;
+            pDecShellContext->mVideoDecoder->getFormat()->findInt32(kKeyWidth,
+                (int32_t*)(&pVideoSize->m_uiWidth));
+            pDecShellContext->mVideoDecoder->getFormat()->findInt32(kKeyHeight,
+                (int32_t*)(&pVideoSize->m_uiHeight));
+            LOGV("VideoEditorVideoDecoder_getOption : W=%d H=%d",
+                pVideoSize->m_uiWidth, pVideoSize->m_uiHeight);
+            break;
+
+        case M4DECODER_kOptionID_NextRenderedFrameCTS:
+            /** How to get this information. SF decoder does not provide this. *
+            ** Let us provide last decoded frame CTS as of now. *
+            ** Only VPS uses this Option ID. */
+            pNextFrameCts = (M4OSA_UInt32 *)pValue;
+            *pNextFrameCts = pDecShellContext->m_lastDecodedCTS;
+            break;
+        case M4DECODER_kOptionID_AVCProfileAndLevel:
+            profile = (M4DECODER_AVCProfileLevel *) pValue;
+            VideoEditorVideoDecoder_ParseAVCDSI (
+                pDecShellContext->m_pVideoStreamhandler->\
+                    m_basicProperties.m_pDecoderSpecificInfo,
+                pDecShellContext->m_pVideoStreamhandler->\
+                    m_basicProperties.m_decoderSpecificInfoSize,
+                profile);
+            break;
+        case M4DECODER_MPEG4_kOptionID_DecoderConfigInfo:
+            if(pDecShellContext->mDecoderType == VIDEOEDITOR_kMpeg4VideoDec) {
+                (*(M4DECODER_MPEG4_DecoderConfigInfo*)pValue) =
+                    pDecShellContext->m_Dci;
+            }
+            break;
+        default:
+            lerr = M4ERR_BAD_OPTION_ID;
+            break;
+
+    }
+
+    LOGV("VideoEditorVideoDecoder_getOption: end with err = 0x%x", lerr);
+    return lerr;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_setOption(M4OSA_Context context,
+        M4OSA_OptionID optionId, M4OSA_DataOption pValue) {
+    M4OSA_ERR lerr = M4NO_ERROR;
+    VideoEditorVideoDecoder_Context *pDecShellContext =
+        (VideoEditorVideoDecoder_Context*) context;
+
+    LOGV("VideoEditorVideoDecoder_setOption begin");
+
+    switch (optionId) {
+        case M4DECODER_kOptionID_OutputFilter: {
+                M4DECODER_OutputFilter* pOutputFilter =
+                    (M4DECODER_OutputFilter*) pValue;
+                pDecShellContext->m_pFilter =
+                    (M4VIFI_PlanConverterFunctionType*)pOutputFilter->\
+                    m_pFilterFunction;
+                pDecShellContext->m_pFilterUserData =
+                    pOutputFilter->m_pFilterUserData;
+            }
+            break;
+        case M4DECODER_kOptionID_DeblockingFilter:
+            break;
+        default:
+            lerr = M4ERR_BAD_CONTEXT;
+            break;
+    }
+
+    LOGV("VideoEditorVideoDecoder_setOption: end with err = 0x%x", lerr);
+    return lerr;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_decode(M4OSA_Context context,
+        M4_MediaTime* pTime, M4OSA_Bool bJump) {
+    M4OSA_ERR lerr = M4NO_ERROR;
+    VideoEditorVideoDecoder_Context* pDecShellContext =
+        (VideoEditorVideoDecoder_Context*) context;
+    int64_t lFrameTime;
+    VIDEOEDITOR_BUFFER_Buffer* tmpDecBuffer;
+    MediaSource::ReadOptions decShellOptions;
+    MediaBuffer* pDecoderBuffer = NULL;
+    status_t errStatus;
+
+
+    LOGV("VideoEditorVideoDecoder_decode begin");
+
+    if( M4OSA_TRUE == pDecShellContext->mReachedEOS ) {
+        // Do not call read(), it could lead to a freeze
+        LOGV("VideoEditorVideoDecoder_decode : EOS already reached");
+        lerr = M4WAR_NO_MORE_AU;
+        goto VIDEOEDITOR_VideoDecode_cleanUP;
+    }
+    if(pDecShellContext->m_lastDecodedCTS >= *pTime) {
+        LOGV("VideoDecoder_decode: Already decoded up to this time CTS = %lf.",
+            pDecShellContext->m_lastDecodedCTS);
+        goto VIDEOEDITOR_VideoDecode_cleanUP;
+    }
+    if(M4OSA_TRUE == bJump) {
+        LOGV("VideoEditorVideoDecoder_decode: Jump called");
+        pDecShellContext->m_lastDecodedCTS = -1;
+        pDecShellContext->m_lastRenderCts = -1;
+    }
+
+    pDecShellContext->mNbInputFrames++;
+    if (0 > pDecShellContext->mFirstInputCts){
+        pDecShellContext->mFirstInputCts = *pTime;
+    }
+    pDecShellContext->mLastInputCts = *pTime;
+
+    while (pDecShellContext->m_lastDecodedCTS < *pTime) {
+        LOGV("VideoEditorVideoDecoder_decode, frameCTS = %lf, DecodeUpTo = %lf",
+            pDecShellContext->m_lastDecodedCTS, *pTime);
+        lerr = VIDEOEDITOR_BUFFER_getBuffer(pDecShellContext->m_pDecBufferPool,
+            VIDEOEDITOR_BUFFER_kEmpty, &tmpDecBuffer);
+        if (lerr == (M4OSA_UInt32)M4ERR_NO_BUFFER_AVAILABLE) {
+            lerr = VIDEOEDITOR_BUFFER_getOldestBuffer(
+                pDecShellContext->m_pDecBufferPool,
+                VIDEOEDITOR_BUFFER_kFilled, &tmpDecBuffer);
+            tmpDecBuffer->state = VIDEOEDITOR_BUFFER_kEmpty;
+            lerr = M4NO_ERROR;
+        }
+
+        if (lerr != M4NO_ERROR) {
+            goto VIDEOEDITOR_VideoDecode_cleanUP;
+        }
+
+        if (pDecoderBuffer != NULL) {
+            pDecoderBuffer->release();
+            pDecoderBuffer = NULL;
+        }
+
+        decShellOptions.reset();
+        errStatus = pDecShellContext->mVideoDecoder->read(&pDecoderBuffer,
+            &decShellOptions);
+        if (errStatus == ERROR_END_OF_STREAM) {
+            LOGV("End of stream reached, returning M4WAR_NO_MORE_AU ");
+            pDecShellContext->mReachedEOS = M4OSA_TRUE;
+            lerr = M4WAR_NO_MORE_AU;
+            goto VIDEOEDITOR_VideoDecode_cleanUP;
+        } else if ( INFO_FORMAT_CHANGED == errStatus ) {
+            LOGV("VideoDecoder_decode:source returns INFO_FORMAT_CHANGED:TODO");
+
+#if 1
+            LOGV("VideoDecoder_decode : source returns INFO_FORMAT_CHANGED");
+            lerr = VideoEditorVideoDecoder_configureFromMetadata(
+                pDecShellContext,
+                pDecShellContext->mVideoDecoder->getFormat().get());
+            if( M4NO_ERROR != lerr ) {
+                LOGV("!!! VideoEditorVideoDecoder_decode ERROR : "
+                    "VideoDecoder_configureFromMetadata returns 0x%X", lerr);
+                break;
+            }
+#endif
+            continue;
+        }
+
+        if( 0 < pDecoderBuffer->range_length() ) {
+        LOGV("VIDEOEDITOR_VideoDecoder frame buffer size = %d",
+            pDecoderBuffer->range_length());
+
+        pDecoderBuffer->meta_data()->findInt64(kKeyTime, &lFrameTime);
+        pDecShellContext->m_lastDecodedCTS = (M4_MediaTime)(lFrameTime/1000);
+        LOGV("VideoEditorVideoDecoder_decode,decoded frametime = %lf,size = %d",
+            (M4_MediaTime)lFrameTime, pDecoderBuffer->size() );
+
+        switch ( pDecShellContext->decOuputColorFormat ) {
+            case OMX_QCOM_COLOR_FormatYVU420SemiPlanar: {
+                M4VIFI_ImagePlane tmpPlane[3];
+                // Prepare the output image for conversion
+                if( pDecoderBuffer->range_length() != (
+                    pDecShellContext->m_pVideoStreamhandler->m_videoWidth *
+                    pDecShellContext->m_pVideoStreamhandler->m_videoHeight \
+                     * 3)/2 ) {
+                    LOGV("VideoEditorVideoDecoder_decod invalid frame size S=%d"
+                        "W=%d H=%d", pDecoderBuffer->range_length(),
+                        pDecShellContext->m_pVideoStreamhandler->m_videoWidth,
+                        pDecShellContext->m_pVideoStreamhandler->m_videoHeight);
+                    lerr = M4ERR_PARAMETER;
+                    goto VIDEOEDITOR_VideoDecode_cleanUP;
+                }
+                tmpPlane[0].u_width   =
+                    pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
+                tmpPlane[0].u_height  =
+                    pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
+                tmpPlane[0].u_topleft = 0;
+                tmpPlane[0].u_stride  = tmpPlane[0].u_width;
+                tmpPlane[0].pac_data  = (M4VIFI_UInt8*)tmpDecBuffer->pData;
+                tmpPlane[1].u_width   = tmpPlane[0].u_width/2;
+                tmpPlane[1].u_height  = tmpPlane[0].u_height/2;
+                tmpPlane[1].u_topleft = 0;
+                tmpPlane[1].u_stride  = tmpPlane[0].u_stride/2;
+                tmpPlane[1].pac_data  = tmpPlane[0].pac_data +
+                    (tmpPlane[0].u_stride * tmpPlane[0].u_height);
+                tmpPlane[2].u_width   = tmpPlane[1].u_width;
+                tmpPlane[2].u_height  = tmpPlane[1].u_height;
+                tmpPlane[2].u_topleft = 0;
+                tmpPlane[2].u_stride  = tmpPlane[1].u_stride;
+                tmpPlane[2].pac_data  = tmpPlane[1].pac_data +
+                    (tmpPlane[1].u_stride * tmpPlane[1].u_height);
+                M4VIFI_SemiplanarYVU420toYUV420(M4OSA_NULL,
+                    (M4VIFI_UInt8 *)pDecoderBuffer->data() + \
+                    pDecoderBuffer->range_offset(), &tmpPlane[0]);
+                break;
+            }
+            case OMX_COLOR_FormatYUV420Planar:
+                M4OSA_memcpy((M4OSA_MemAddr8)tmpDecBuffer->pData,
+                    (M4OSA_MemAddr8) pDecoderBuffer->data() +
+                    pDecoderBuffer->range_offset(),
+                    (M4OSA_UInt32)pDecoderBuffer->range_length());
+                break;
+            default:
+                LOGV("VideoDecoder_decode: unexpected color format 0x%X",
+                    pDecShellContext->decOuputColorFormat);
+                return M4ERR_PARAMETER;
+        }
+
+        tmpDecBuffer->buffCTS = pDecShellContext->m_lastDecodedCTS;
+        tmpDecBuffer->state = VIDEOEDITOR_BUFFER_kFilled;
+        tmpDecBuffer->size = pDecoderBuffer->size();
+
+        } else {
+            LOGV("VideoEditorVideoDecoder_decode : empty buffer was returned");
+        }
+    }
+    pDecShellContext->mNbOutputFrames++;
+    if ( 0 > pDecShellContext->mFirstOutputCts ) {
+        pDecShellContext->mFirstOutputCts = *pTime;
+    }
+    pDecShellContext->mLastOutputCts = *pTime;
+
+VIDEOEDITOR_VideoDecode_cleanUP:
+    *pTime = pDecShellContext->m_lastDecodedCTS;
+    if (pDecoderBuffer != NULL) {
+        pDecoderBuffer->release();
+        pDecoderBuffer = NULL;
+    }
+
+    LOGV("VideoEditorVideoDecoder_decode: end with 0x%x", lerr);
+    return lerr;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_render(M4OSA_Context context,
+        M4_MediaTime* pTime, M4VIFI_ImagePlane* pOutputPlane,
+        M4OSA_Bool bForceRender) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoDecoder_Context* pDecShellContext =
+        (VideoEditorVideoDecoder_Context*) context;
+    M4OSA_UInt32 lindex, i;
+    M4OSA_UInt8* p_buf_src, *p_buf_dest;
+    M4VIFI_ImagePlane tmpPlaneIn, tmpPlaneOut;
+    VIDEOEDITOR_BUFFER_Buffer* pTmpVIDEOEDITORBuffer, *pRenderVIDEOEDITORBuffer
+                                                                  = M4OSA_NULL;
+    M4_MediaTime candidateTimeStamp = -1;
+    M4OSA_Bool bFound = M4OSA_FALSE;
+
+    LOGV("VideoEditorVideoDecoder_render begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != context, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pTime, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pOutputPlane, M4ERR_PARAMETER);
+
+    // The output buffer is already allocated, just copy the data
+    if ( (*pTime <= pDecShellContext->m_lastRenderCts) &&
+            (M4OSA_FALSE == bForceRender) ) {
+        LOGV("VIDEOEDITOR_VIDEO_render Frame in the past");
+        err = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+        goto cleanUp;
+    }
+    LOGV("VideoDecoder_render: lastRendered time = %lf,requested render time = "
+        "%lf", pDecShellContext->m_lastRenderCts, *pTime);
+
+    /**
+     * Find the buffer appropriate for rendering.  */
+    for (i=0; i < pDecShellContext->m_pDecBufferPool->NB; i++) {
+        pTmpVIDEOEDITORBuffer = &pDecShellContext->m_pDecBufferPool\
+            ->pNXPBuffer[i];
+        if (pTmpVIDEOEDITORBuffer->state == VIDEOEDITOR_BUFFER_kFilled) {
+            /** Free all those buffers older than last rendered frame. */
+            if (pTmpVIDEOEDITORBuffer->buffCTS < pDecShellContext->\
+                    m_lastRenderCts) {
+                pTmpVIDEOEDITORBuffer->state = VIDEOEDITOR_BUFFER_kEmpty;
+            }
+
+            /** Get the buffer with appropriate timestamp  */
+            if ( (pTmpVIDEOEDITORBuffer->buffCTS >= pDecShellContext->\
+                    m_lastRenderCts) &&
+                (pTmpVIDEOEDITORBuffer->buffCTS <= *pTime) &&
+                (pTmpVIDEOEDITORBuffer->buffCTS > candidateTimeStamp)) {
+                bFound = M4OSA_TRUE;
+                pRenderVIDEOEDITORBuffer = pTmpVIDEOEDITORBuffer;
+                candidateTimeStamp = pTmpVIDEOEDITORBuffer->buffCTS;
+                LOGV("VideoDecoder_render: found a buffer with timestamp = %lf",
+                    candidateTimeStamp);
+            }
+        }
+    }
+    if (M4OSA_FALSE == bFound) {
+        err = M4WAR_VIDEORENDERER_NO_NEW_FRAME;
+        goto cleanUp;
+    }
+
+    LOGV("VideoEditorVideoDecoder_render 3 ouput %d %d %d %d",
+        pOutputPlane[0].u_width, pOutputPlane[0].u_height,
+        pOutputPlane[0].u_topleft, pOutputPlane[0].u_stride);
+
+    pDecShellContext->m_lastRenderCts = candidateTimeStamp;
+
+    if( M4OSA_NULL != pDecShellContext->m_pFilter ) {
+        // Filtering was requested
+        M4VIFI_ImagePlane tmpPlane[3];
+        // Prepare the output image for conversion
+        tmpPlane[0].u_width   =
+            pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
+        tmpPlane[0].u_height  =
+            pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
+        tmpPlane[0].u_topleft = 0;
+        tmpPlane[0].u_stride  = tmpPlane[0].u_width;
+        tmpPlane[0].pac_data  = (M4VIFI_UInt8*)pRenderVIDEOEDITORBuffer->pData;
+        tmpPlane[1].u_width   = tmpPlane[0].u_width/2;
+        tmpPlane[1].u_height  = tmpPlane[0].u_height/2;
+        tmpPlane[1].u_topleft = 0;
+        tmpPlane[1].u_stride  = tmpPlane[0].u_stride/2;
+        tmpPlane[1].pac_data  = tmpPlane[0].pac_data +
+            (tmpPlane[0].u_stride * tmpPlane[0].u_height);
+        tmpPlane[2].u_width   = tmpPlane[1].u_width;
+        tmpPlane[2].u_height  = tmpPlane[1].u_height;
+        tmpPlane[2].u_topleft = 0;
+        tmpPlane[2].u_stride  = tmpPlane[1].u_stride;
+        tmpPlane[2].pac_data  = tmpPlane[1].pac_data +
+            (tmpPlane[1].u_stride * tmpPlane[1].u_height);
+
+        LOGV("VideoEditorVideoDecoder_render w = %d H = %d",
+            tmpPlane[0].u_width,tmpPlane[0].u_height);
+        pDecShellContext->m_pFilter(M4OSA_NULL, &tmpPlane[0], pOutputPlane);
+    } else {
+        // Just copy the YUV420P buffer
+        M4OSA_MemAddr8 tempBuffPtr =
+            (M4OSA_MemAddr8)pRenderVIDEOEDITORBuffer->pData;
+        M4OSA_UInt32 tempWidth =
+            pDecShellContext->m_pVideoStreamhandler->m_videoWidth;
+        M4OSA_UInt32 tempHeight =
+            pDecShellContext->m_pVideoStreamhandler->m_videoHeight;
+
+        M4OSA_memcpy((M4OSA_MemAddr8) pOutputPlane[0].pac_data, tempBuffPtr,
+            tempWidth * tempHeight);
+        tempBuffPtr += (tempWidth * tempHeight);
+        M4OSA_memcpy((M4OSA_MemAddr8) pOutputPlane[1].pac_data, tempBuffPtr,
+            (tempWidth/2) * (tempHeight/2));
+        tempBuffPtr += ((tempWidth/2) * (tempHeight/2));
+        M4OSA_memcpy((M4OSA_MemAddr8) pOutputPlane[2].pac_data, tempBuffPtr,
+            (tempWidth/2) * (tempHeight/2));
+    }
+
+    pDecShellContext->mNbRenderedFrames++;
+    if ( 0 > pDecShellContext->mFirstRenderedCts ) {
+        pDecShellContext->mFirstRenderedCts = *pTime;
+    }
+    pDecShellContext->mLastRenderedCts = *pTime;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        *pTime = pDecShellContext->m_lastRenderCts;
+        LOGV("VideoEditorVideoDecoder_render no error");
+    } else {
+        LOGV("VideoEditorVideoDecoder_render ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorVideoDecoder_render end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface(M4DECODER_VideoType decoderType,
+        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+    M4DECODER_VideoInterface* pDecoderInterface = M4OSA_NULL;
+
+    pDecoderInterface = (M4DECODER_VideoInterface*)M4OSA_malloc(
+        sizeof(M4DECODER_VideoInterface), M4DECODER_EXTERNAL,
+        (M4OSA_Char*)"VideoEditorVideoDecoder_getInterface" );
+    if (M4OSA_NULL == pDecoderInterface) {
+        return M4ERR_ALLOC;
+    }
+
+    *pDecoderType = decoderType;
+
+    pDecoderInterface->m_pFctCreate    = VideoEditorVideoDecoder_create;
+    pDecoderInterface->m_pFctDestroy   = VideoEditorVideoDecoder_destroy;
+    pDecoderInterface->m_pFctGetOption = VideoEditorVideoDecoder_getOption;
+    pDecoderInterface->m_pFctSetOption = VideoEditorVideoDecoder_setOption;
+    pDecoderInterface->m_pFctDecode    = VideoEditorVideoDecoder_decode;
+    pDecoderInterface->m_pFctRender    = VideoEditorVideoDecoder_render;
+
+    *pDecInterface = (M4OSA_Context)pDecoderInterface;
+    return M4NO_ERROR;
+}
+
+extern "C" {
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_MPEG4(
+        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+    return VideoEditorVideoDecoder_getInterface(M4DECODER_kVideoTypeMPEG4,
+        pDecoderType, pDecInterface);
+}
+
+M4OSA_ERR VideoEditorVideoDecoder_getInterface_H264(
+        M4DECODER_VideoType *pDecoderType, M4OSA_Context *pDecInterface) {
+    return VideoEditorVideoDecoder_getInterface(M4DECODER_kVideoTypeAVC,
+        pDecoderType, pDecInterface);
+
+}
+
+}  // extern "C"
diff --git a/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp
new file mode 100755
index 0000000..0813b5c
--- /dev/null
+++ b/libvideoeditor/vss/stagefrightshells/src/VideoEditorVideoEncoder.cpp
@@ -0,0 +1,1288 @@
+/*
+ * Copyright (C) 2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+*************************************************************************
+* @file   VideoEditorVideoEncoder.cpp
+* @brief  StageFright shell video encoder
+*************************************************************************
+*/
+#define LOG_NDEBUG 1
+#define LOG_TAG "VIDEOEDITOR_VIDEOENCODER"
+
+/*******************
+ *     HEADERS     *
+ *******************/
+#include "M4OSA_Debug.h"
+#include "M4SYS_AccessUnit.h"
+#include "VideoEditorVideoEncoder.h"
+#include "VideoEditorUtils.h"
+
+#include "utils/Log.h"
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include "OMX_Video.h"
+
+/********************
+ *   DEFINITIONS    *
+ ********************/
+
+// Minimum number of buffer in the source in order to allow encoding
+#define VIDEOEDITOR_MIN_BUFFER_NB 15
+
+// Not enough source buffers available
+#define M4WAR_SF_LOW_BUFFER M4OSA_ERR_CREATE(M4_WAR, 0xFF, 0x00001)
+
+// Encoder color format
+#define VIDEOEDITOR_ENCODER_COLOR_FORMAT OMX_COLOR_FormatYUV420Planar
+
+// Force using hardware encoder
+#define VIDEOEDITOR_FORCECODEC kHardwareCodecsOnly
+
+// Force Encoder to produce a DSI by sending fake input frames upon creation
+#define VIDEOEDITOR_ENCODER_GET_DSI_AT_CREATION
+
+#if defined(VIDEOEDITOR_ENCODER_GET_DSI_AT_CREATION) && \
+    !defined(VIDEOEDITOR_FORCECODEC)
+    #error "Cannot force DSI retrieval if codec type is not fixed"
+#endif
+
+/********************
+ *   SOURCE CLASS   *
+ ********************/
+
+namespace android {
+
+struct VideoEditorVideoEncoderSource : public MediaSource {
+    public:
+        static sp<VideoEditorVideoEncoderSource> Create();
+        virtual status_t start(MetaData *params = NULL);
+        virtual status_t stop();
+        virtual sp<MetaData> getFormat();
+        virtual status_t read(MediaBuffer **buffer,
+            const ReadOptions *options = NULL);
+        virtual int32_t storeBuffer(MediaBuffer *buffer);
+
+    protected:
+        virtual ~VideoEditorVideoEncoderSource();
+
+    private:
+        struct MediaBufferChain {
+            MediaBuffer* buffer;
+            MediaBufferChain* nextLink;
+        };
+        enum State {
+            CREATED,
+            STARTED,
+            ERROR
+        };
+        VideoEditorVideoEncoderSource();
+        MediaBufferChain* mFirstBufferLink;
+        MediaBufferChain* mLastBufferLink;
+        int32_t           mNbBuffer;
+        bool              mIsEOS;
+        State             mState;
+};
+
+sp<VideoEditorVideoEncoderSource> VideoEditorVideoEncoderSource::Create() {
+

+    sp<VideoEditorVideoEncoderSource> aSource =
+        new VideoEditorVideoEncoderSource();
+    return aSource;
+}
+
+VideoEditorVideoEncoderSource::VideoEditorVideoEncoderSource():
+        mFirstBufferLink(NULL),
+        mLastBufferLink(NULL),
+        mNbBuffer(0),
+        mIsEOS(false),
+        mState(CREATED) {
+    LOGV("VideoEditorVideoEncoderSource::VideoEditorVideoEncoderSource");
+}
+
+VideoEditorVideoEncoderSource::~VideoEditorVideoEncoderSource() {
+

+    // Safety clean up
+    if( STARTED == mState ) {
+        stop();
+    }
+}
+
+status_t VideoEditorVideoEncoderSource::start(MetaData *meta) {
+    status_t err = OK;
+
+    LOGV("VideoEditorVideoEncoderSource::start() begin");
+
+    if( CREATED != mState ) {
+        LOGV("VideoEditorVideoEncoderSource::start: invalid state %d", mState);
+        return UNKNOWN_ERROR;
+    }
+    mState = STARTED;
+
+    LOGV("VideoEditorVideoEncoderSource::start() END (0x%x)", err);
+    return err;
+}
+
+status_t VideoEditorVideoEncoderSource::stop() {
+    status_t err = OK;
+
+    LOGV("VideoEditorVideoEncoderSource::stop() begin");
+
+    if( STARTED != mState ) {
+        LOGV("VideoEditorVideoEncoderSource::stop: invalid state %d", mState);
+        return UNKNOWN_ERROR;
+    }
+
+    // Release the buffer chain
+    int32_t i = 0;
+    MediaBufferChain* tmpLink = NULL;
+    while( mFirstBufferLink ) {
+        i++;
+        tmpLink = mFirstBufferLink;
+        mFirstBufferLink = mFirstBufferLink->nextLink;
+        delete tmpLink;
+    }
+    LOGV("VideoEditorVideoEncoderSource::stop : %d buffer remained", i);
+    mFirstBufferLink = NULL;
+    mLastBufferLink = NULL;
+
+    mState = CREATED;
+
+    LOGV("VideoEditorVideoEncoderSource::stop() END (0x%x)", err);
+    return err;
+}
+
+sp<MetaData> VideoEditorVideoEncoderSource::getFormat() {
+
+    LOGW("VideoEditorVideoEncoderSource::getFormat:THIS IS NOT IMPLEMENTED");
+    return NULL;
+}

+
+status_t VideoEditorVideoEncoderSource::read(MediaBuffer **buffer,
+        const ReadOptions *options) {
+    MediaSource::ReadOptions readOptions;
+    status_t err = OK;
+    MediaBufferChain* tmpLink = NULL;
+
+    LOGV("VideoEditorVideoEncoderSource::read() begin");
+
+    if ( STARTED != mState ) {
+        LOGV("VideoEditorVideoEncoderSource::read: invalid state %d", mState);
+        return UNKNOWN_ERROR;
+    }
+
+    // Get a buffer from the chain
+    if ( NULL == mFirstBufferLink ) {
+        *buffer = NULL;
+        if( mIsEOS ) {
+            LOGV("VideoEditorVideoEncoderSource::read : EOS");
+            return ERROR_END_OF_STREAM;
+        } else {
+            LOGV("VideoEditorVideoEncoderSource::read: no buffer available");
+            return ERROR_END_OF_STREAM;
+        }
+    }
+    *buffer = mFirstBufferLink->buffer;
+    tmpLink = mFirstBufferLink;
+    mFirstBufferLink = mFirstBufferLink->nextLink;

+
+    if ( NULL == mFirstBufferLink ) {
+        mLastBufferLink = NULL;
+    }
+    delete tmpLink;
+    mNbBuffer--;
+
+    LOGV("VideoEditorVideoEncoderSource::read() END (0x%x)", err);
+    return err;
+}
+
+int32_t VideoEditorVideoEncoderSource::storeBuffer(MediaBuffer *buffer) {
+    status_t err = OK;
+
+    LOGV("VideoEditorVideoEncoderSource::storeBuffer() begin");
+
+    if( NULL == buffer ) {
+        LOGV("VideoEditorVideoEncoderSource::storeBuffer : reached EOS");
+        mIsEOS = true;
+    } else {
+        MediaBufferChain* newLink = new MediaBufferChain;
+        newLink->buffer = buffer;
+        newLink->nextLink = NULL;
+        if( NULL != mLastBufferLink ) {
+            mLastBufferLink->nextLink = newLink;
+        } else {
+            mFirstBufferLink = newLink;
+        }
+        mLastBufferLink = newLink;
+        mNbBuffer++;
+    }
+    LOGV("VideoEditorVideoEncoderSource::storeBuffer() end");
+    return mNbBuffer;
+}

+
+/**
+ ******************************************************************************
+ * structure VideoEditorVideoEncoder_Context
+ * @brief    This structure defines the context of the StageFright video encoder
+ *           shell
+ ******************************************************************************
+*/
+typedef enum {
+    CREATED   = 0x1,
+    OPENED    = 0x2,
+    STARTED   = 0x4,
+    BUFFERING = 0x8,
+    READING   = 0x10
+} VideoEditorVideoEncoder_State;

+
+typedef struct {
+    VideoEditorVideoEncoder_State     mState;
+    M4ENCODER_Format                  mFormat;
+    M4WRITER_DataInterface*           mWriterDataInterface;
+    M4VPP_apply_fct*                  mPreProcFunction;
+    M4VPP_Context                     mPreProcContext;
+    M4SYS_AccessUnit*                 mAccessUnit;
+    M4ENCODER_Params*                 mCodecParams;
+    M4ENCODER_Header                  mHeader;
+    H264MCS_ProcessEncodedNALU_fct*   mH264NALUPostProcessFct;
+    M4OSA_Context                     mH264NALUPostProcessCtx;
+    M4OSA_UInt32                      mLastCTS;
+    sp<VideoEditorVideoEncoderSource> mEncoderSource;
+    OMXClient                         mClient;
+    sp<MediaSource>                   mEncoder;
+    OMX_COLOR_FORMATTYPE              mEncoderColorFormat;
+
+    uint32_t                          mNbInputFrames;
+    double                            mFirstInputCts;
+    double                            mLastInputCts;
+    uint32_t                          mNbOutputFrames;
+    int64_t                           mFirstOutputCts;
+    int64_t                           mLastOutputCts;
+
+} VideoEditorVideoEncoder_Context;
+
+/********************
+ *      TOOLS       *
+ ********************/
+
+M4OSA_ERR VideoEditorVideoEncoder_getDSI(M4ENCODER_Context pContext,
+        sp<MetaData> metaData) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context*  pEncoderContext = M4OSA_NULL;
+    status_t result = OK;
+    int32_t nbBuffer = 0;
+    int32_t stride = 0;
+    int32_t height = 0;
+    int32_t framerate = 0;
+    int32_t isCodecConfig = 0;
+    size_t size = 0;
+    uint32_t codecFlags = 0;
+    MediaBuffer* inputBuffer = NULL;
+    MediaBuffer* outputBuffer = NULL;
+    sp<VideoEditorVideoEncoderSource> encoderSource = NULL;
+    sp<MediaSource> encoder = NULL;;
+    OMXClient client;
+

+    LOGV("VideoEditorVideoEncoder_getDSI begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext,       M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != metaData.get(), M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+    VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
+
+    // Create the encoder source
+    encoderSource = VideoEditorVideoEncoderSource::Create();
+    VIDEOEDITOR_CHECK(NULL != encoderSource.get(), M4ERR_STATE);
+
+    // Connect to the OMX client
+    result = client.connect();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Create the OMX codec
+    // VIDEOEDITOR_FORCECODEC MUST be defined here
+    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+    encoder = OMXCodec::Create(client.interface(), metaData, true,
+        encoderSource, NULL, codecFlags);
+    VIDEOEDITOR_CHECK(NULL != encoder.get(), M4ERR_STATE);
+
+    /**
+     * Send fake frames and retrieve the DSI
+     */
+    // Send a fake frame to the source
+    metaData->findInt32(kKeyStride,     &stride);
+    metaData->findInt32(kKeyHeight,     &height);
+    metaData->findInt32(kKeySampleRate, &framerate);
+    size = (size_t)(stride*height*3)/2;
+    inputBuffer = new MediaBuffer(size);
+    inputBuffer->meta_data()->setInt64(kKeyTime, 0);
+    nbBuffer = encoderSource->storeBuffer(inputBuffer);
+    encoderSource->storeBuffer(NULL); // Signal EOS
+
+    // Call read once to get the DSI
+    result = encoder->start();;
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+    result = encoder->read(&outputBuffer, NULL);
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+    VIDEOEDITOR_CHECK(outputBuffer->meta_data()->findInt32(
+        kKeyIsCodecConfig, &isCodecConfig) && isCodecConfig, M4ERR_STATE);
+
+    VIDEOEDITOR_CHECK(M4OSA_NULL == pEncoderContext->mHeader.pBuf, M4ERR_STATE);
+    if ( M4ENCODER_kH264 == pEncoderContext->mFormat ) {
+        // For H264, format the DSI
+        result = buildAVCCodecSpecificData(
+            (uint8_t**)(&(pEncoderContext->mHeader.pBuf)),
+            (size_t*)(&(pEncoderContext->mHeader.Size)),
+            (const uint8_t*)outputBuffer->data() + outputBuffer->range_offset(),
+            outputBuffer->range_length(), encoder->getFormat().get());
+        outputBuffer->release();
+        VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+    } else {
+        // For MPEG4, just copy the DSI
+        pEncoderContext->mHeader.Size =
+            (M4OSA_UInt32)outputBuffer->range_length();
+        SAFE_MALLOC(pEncoderContext->mHeader.pBuf, M4OSA_Int8,
+            pEncoderContext->mHeader.Size, "Encoder header");
+        M4OSA_memcpy(pEncoderContext->mHeader.pBuf,
+            (M4OSA_MemAddr8)(outputBuffer->data())+outputBuffer->range_offset(),
+            pEncoderContext->mHeader.Size);
+        outputBuffer->release();
+    }
+
+    result = encoder->stop();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+cleanUp:
+    // Destroy the graph
+    if ( encoder != NULL ) { encoder.clear(); }
+    client.disconnect();
+    if ( encoderSource != NULL ) { encoderSource.clear(); }
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_getDSI no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_getDSI ERROR 0x%X", err);
+    }
+    LOGV("VideoEditorVideoEncoder_getDSI end");
+    return err;
+}
+/********************
+ * ENGINE INTERFACE *
+ ********************/
+
+M4OSA_ERR VideoEditorVideoEncoder_cleanup(M4ENCODER_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+

+    LOGV("VideoEditorVideoEncoder_cleanup begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+    VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
+
+    // Release memory
+    SAFE_FREE(pEncoderContext->mHeader.pBuf);
+    SAFE_FREE(pEncoderContext);
+    pContext = M4OSA_NULL;
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_cleanup no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_cleanup ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_cleanup end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_init(M4ENCODER_Format format,
+        M4ENCODER_Context* pContext,
+        M4WRITER_DataInterface* pWriterDataInterface,
+        M4VPP_apply_fct* pVPPfct, M4VPP_Context pVPPctxt,
+        M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData) {

+
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+

+    LOGV("VideoEditorVideoEncoder_init begin: format  %d", format);
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pWriterDataInterface, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pVPPfct, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pVPPctxt, M4ERR_PARAMETER);
+
+    // Context allocation & initialization
+    SAFE_MALLOC(pEncoderContext, VideoEditorVideoEncoder_Context, 1,
+        "VideoEditorVideoEncoder");
+    pEncoderContext->mState = CREATED;
+    pEncoderContext->mFormat = format;
+    pEncoderContext->mWriterDataInterface = pWriterDataInterface;
+    pEncoderContext->mPreProcFunction = pVPPfct;
+    pEncoderContext->mPreProcContext = pVPPctxt;
+
+    *pContext = pEncoderContext;
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_init no error");
+    } else {
+        VideoEditorVideoEncoder_cleanup(pEncoderContext);
+        *pContext = M4OSA_NULL;
+        LOGV("VideoEditorVideoEncoder_init ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_init end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_init_H263(M4ENCODER_Context* pContext,
+        M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
+        M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
+        {

+
+    return VideoEditorVideoEncoder_init(M4ENCODER_kH263, pContext,
+        pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorVideoEncoder_init_MPEG4(M4ENCODER_Context* pContext,
+        M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
+        M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
+        {

+
+    return VideoEditorVideoEncoder_init(M4ENCODER_kMPEG4, pContext,
+        pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
+}
+
+
+M4OSA_ERR VideoEditorVideoEncoder_init_H264(M4ENCODER_Context* pContext,
+        M4WRITER_DataInterface* pWriterDataInterface, M4VPP_apply_fct* pVPPfct,
+        M4VPP_Context pVPPctxt, M4OSA_Void* pExternalAPI, M4OSA_Void* pUserData)
+        {

+
+    return VideoEditorVideoEncoder_init(M4ENCODER_kH264, pContext,
+        pWriterDataInterface, pVPPfct, pVPPctxt, pExternalAPI, pUserData);
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_close(M4ENCODER_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+

+    LOGV("VideoEditorVideoEncoder_close begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+    VIDEOEDITOR_CHECK(OPENED == pEncoderContext->mState, M4ERR_STATE);
+
+    // Release memory
+    SAFE_FREE(pEncoderContext->mCodecParams);
+
+    // Destroy the graph
+    pEncoderContext->mEncoder.clear();
+    pEncoderContext->mClient.disconnect();
+    pEncoderContext->mEncoderSource.clear();
+
+    // Set the new state
+    pEncoderContext->mState = CREATED;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_close no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_close ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_close end");
+    return err;
+}
+
+
+M4OSA_ERR VideoEditorVideoEncoder_open(M4ENCODER_Context pContext,
+        M4SYS_AccessUnit* pAU, M4OSA_Void* pParams) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+    M4ENCODER_Params* pCodecParams = M4OSA_NULL;
+    status_t result = OK;
+    sp<MetaData> encoderMetadata = NULL;
+    const char* mime = NULL;
+    int32_t iProfile = 0;
+    int32_t iFrameRate = 0;
+    uint32_t codecFlags = 0;
+

+    LOGV(">>> VideoEditorVideoEncoder_open begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pAU,      M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pParams,  M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+    pCodecParams = (M4ENCODER_Params*)pParams;
+    VIDEOEDITOR_CHECK(CREATED == pEncoderContext->mState, M4ERR_STATE);
+
+    // Context initialization
+    pEncoderContext->mAccessUnit = pAU;
+
+    // Allocate & initialize the encoding parameters
+    SAFE_MALLOC(pEncoderContext->mCodecParams, M4ENCODER_Params, 1,
+        "VideoEditorVideoEncoder");
+
+
+    pEncoderContext->mCodecParams->InputFormat = pCodecParams->InputFormat;
+    pEncoderContext->mCodecParams->InputFrameWidth =
+        pCodecParams->InputFrameWidth;
+    pEncoderContext->mCodecParams->InputFrameHeight =
+        pCodecParams->InputFrameHeight;
+    pEncoderContext->mCodecParams->FrameWidth = pCodecParams->FrameWidth;
+    pEncoderContext->mCodecParams->FrameHeight = pCodecParams->FrameHeight;
+    pEncoderContext->mCodecParams->Bitrate = pCodecParams->Bitrate;
+    pEncoderContext->mCodecParams->FrameRate = pCodecParams->FrameRate;
+    pEncoderContext->mCodecParams->Format = pCodecParams->Format;
+
+    // Check output format consistency and resolution
+    VIDEOEDITOR_CHECK(
+        pEncoderContext->mCodecParams->Format == pEncoderContext->mFormat,
+        M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(0 == pEncoderContext->mCodecParams->FrameWidth  % 16,
+        M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(0 == pEncoderContext->mCodecParams->FrameHeight % 16,
+        M4ERR_PARAMETER);
+
+    /**
+     * StageFright graph building
+     */
+
+    // Create the meta data for the encoder
+    encoderMetadata = new MetaData;
+    switch( pEncoderContext->mCodecParams->Format ) {
+        case M4ENCODER_kH263:
+            mime     = MEDIA_MIMETYPE_VIDEO_H263;
+            iProfile = OMX_VIDEO_H263ProfileBaseline;
+            break;
+        case M4ENCODER_kMPEG4:
+            mime     = MEDIA_MIMETYPE_VIDEO_MPEG4;
+            iProfile = OMX_VIDEO_MPEG4ProfileSimple;
+            break;
+        case M4ENCODER_kH264:
+            mime     = MEDIA_MIMETYPE_VIDEO_AVC;
+            iProfile = OMX_VIDEO_AVCProfileBaseline;
+            break;
+        default:
+            VIDEOEDITOR_CHECK(!"VideoEncoder_open : incorrect input format",
+                M4ERR_PARAMETER);
+            break;
+    }
+    encoderMetadata->setCString(kKeyMIMEType, mime);
+    encoderMetadata->setInt32(kKeyVideoProfile, iProfile);
+    encoderMetadata->setInt32(kKeyWidth,
+        (int32_t)pEncoderContext->mCodecParams->FrameWidth);
+    encoderMetadata->setInt32(kKeyStride,
+        (int32_t)pEncoderContext->mCodecParams->FrameWidth);
+    encoderMetadata->setInt32(kKeyHeight,
+        (int32_t)pEncoderContext->mCodecParams->FrameHeight);
+    encoderMetadata->setInt32(kKeySliceHeight,
+        (int32_t)pEncoderContext->mCodecParams->FrameHeight);
+
+    switch( pEncoderContext->mCodecParams->FrameRate ) {
+        case M4ENCODER_k5_FPS:    iFrameRate = 5;  break;
+        case M4ENCODER_k7_5_FPS:  iFrameRate = 8;  break;
+        case M4ENCODER_k10_FPS:   iFrameRate = 10; break;
+        case M4ENCODER_k12_5_FPS: iFrameRate = 13; break;
+        case M4ENCODER_k15_FPS:   iFrameRate = 15; break;
+        case M4ENCODER_k20_FPS:   iFrameRate = 20; break;
+        case M4ENCODER_k25_FPS:   iFrameRate = 25; break;
+        case M4ENCODER_k30_FPS:   iFrameRate = 30; break;
+        case M4ENCODER_kVARIABLE_FPS:
+            iFrameRate = 30;
+            LOGI("Frame rate set to M4ENCODER_kVARIABLE_FPS: set to 30");
+          break;
+        case M4ENCODER_kUSE_TIMESCALE:
+            iFrameRate = 30;
+            LOGI("Frame rate set to M4ENCODER_kUSE_TIMESCALE:  set to 30");
+            break;
+
+        default:
+            VIDEOEDITOR_CHECK(!"VideoEncoder_open:incorrect framerate",
+                M4ERR_STATE);
+            break;
+    }
+    encoderMetadata->setInt32(kKeyFrameRate, iFrameRate);
+    encoderMetadata->setInt32(kKeyBitRate,
+        (int32_t)pEncoderContext->mCodecParams->Bitrate);
+    encoderMetadata->setInt32(kKeyIFramesInterval, 1);
+
+    pEncoderContext->mEncoderColorFormat = VIDEOEDITOR_ENCODER_COLOR_FORMAT;
+    encoderMetadata->setInt32(kKeyColorFormat,
+        pEncoderContext->mEncoderColorFormat);
+
+#ifdef VIDEOEDITOR_ENCODER_GET_DSI_AT_CREATION
+    // Get the encoder DSI
+    err = VideoEditorVideoEncoder_getDSI(pEncoderContext, encoderMetadata);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+#endif /* VIDEOEDITOR_ENCODER_GET_DSI_AT_CREATION */
+
+    // Create the encoder source
+    pEncoderContext->mEncoderSource = VideoEditorVideoEncoderSource::Create();
+    VIDEOEDITOR_CHECK(
+        NULL != pEncoderContext->mEncoderSource.get(), M4ERR_STATE);
+
+    // Connect to the OMX client
+    result = pEncoderContext->mClient.connect();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Create the OMX codec
+#ifdef VIDEOEDITOR_FORCECODEC
+    codecFlags |= OMXCodec::VIDEOEDITOR_FORCECODEC;
+#endif /* VIDEOEDITOR_FORCECODEC */
+    pEncoderContext->mEncoder = OMXCodec::Create(
+        pEncoderContext->mClient.interface(), encoderMetadata, true,
+        pEncoderContext->mEncoderSource, NULL, codecFlags);
+    VIDEOEDITOR_CHECK(NULL != pEncoderContext->mEncoder.get(), M4ERR_STATE);
+    LOGV("VideoEditorVideoEncoder_open : DONE");
+
+    // Set the new state
+    pEncoderContext->mState = OPENED;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_open no error");
+    } else {
+        VideoEditorVideoEncoder_close(pEncoderContext);
+        LOGV("VideoEditorVideoEncoder_open ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_open end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_processOutputBuffer(
+        M4ENCODER_Context pContext, MediaBuffer* buffer);
+M4OSA_ERR VideoEditorVideoEncoder_processInputBuffer(
+        M4ENCODER_Context pContext, M4OSA_Double Cts,
+        M4OSA_Bool bReachedEOS) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+    M4VIFI_ImagePlane pOutPlane[3];
+    MediaBuffer* buffer = NULL;
+    int32_t nbBuffer = 0;
+

+    LOGV("VideoEditorVideoEncoder_processInputBuffer begin: cts  %f", Cts);
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+    pOutPlane[0].pac_data = M4OSA_NULL;
+    pOutPlane[1].pac_data = M4OSA_NULL;
+    pOutPlane[2].pac_data = M4OSA_NULL;
+
+    if ( M4OSA_FALSE == bReachedEOS ) {
+        M4OSA_UInt32 sizeY = pEncoderContext->mCodecParams->FrameWidth *
+            pEncoderContext->mCodecParams->FrameHeight;
+        M4OSA_UInt32 sizeU = sizeY >> 2;
+        M4OSA_UInt32 size  = sizeY + 2*sizeU;
+        M4OSA_UInt8* pData = M4OSA_NULL;
+        buffer = new MediaBuffer((size_t)size);
+        pData = (M4OSA_UInt8*)buffer->data() + buffer->range_offset();
+
+        // Prepare the output image for pre-processing
+        pOutPlane[0].u_width   = pEncoderContext->mCodecParams->FrameWidth;
+        pOutPlane[0].u_height  = pEncoderContext->mCodecParams->FrameHeight;
+        pOutPlane[0].u_topleft = 0;
+        pOutPlane[0].u_stride  = pOutPlane[0].u_width;
+        pOutPlane[1].u_width   = pOutPlane[0].u_width/2;
+        pOutPlane[1].u_height  = pOutPlane[0].u_height/2;
+        pOutPlane[1].u_topleft = 0;
+        pOutPlane[1].u_stride  = pOutPlane[0].u_stride/2;
+        pOutPlane[2].u_width   = pOutPlane[1].u_width;
+        pOutPlane[2].u_height  = pOutPlane[1].u_height;
+        pOutPlane[2].u_topleft = 0;
+        pOutPlane[2].u_stride  = pOutPlane[1].u_stride;
+
+        switch( pEncoderContext->mEncoderColorFormat ) {
+            case OMX_COLOR_FormatYUV420Planar:
+                pOutPlane[0].pac_data = pData;
+                pOutPlane[1].pac_data = pData + sizeY;
+                pOutPlane[2].pac_data = pData + sizeY + sizeU;
+            break;
+            case OMX_COLOR_FormatYUV420SemiPlanar:
+                pOutPlane[0].pac_data = pData;
+                SAFE_MALLOC(pOutPlane[1].pac_data, M4VIFI_UInt8,
+                    pOutPlane[1].u_height*pOutPlane[1].u_stride,"OutputPlaneU");
+                SAFE_MALLOC(pOutPlane[2].pac_data, M4VIFI_UInt8,
+                    pOutPlane[2].u_height*pOutPlane[2].u_stride,"OutputPlaneV");
+            break;
+            default:
+                LOGV("VideoEditorVideoEncoder_processInputBuffer : unsupported "
+                    "color format 0x%X", pEncoderContext->mEncoderColorFormat);
+                VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+            break;
+        }
+
+        // Apply pre-processing
+        err = pEncoderContext->mPreProcFunction(
+            pEncoderContext->mPreProcContext, M4OSA_NULL, pOutPlane);
+        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+        // Convert to MediaBuffer format if necessary
+        if( OMX_COLOR_FormatYUV420SemiPlanar == \
+                pEncoderContext->mEncoderColorFormat ) {
+            M4OSA_UInt8* pTmpData = M4OSA_NULL;
+            pTmpData = pData + sizeY;
+            // Highly unoptimized copy...
+            for( M4OSA_UInt32 i=0; i<sizeU; i++ ) {
+                *pTmpData = pOutPlane[2].pac_data[i]; pTmpData++;
+                *pTmpData = pOutPlane[1].pac_data[i]; pTmpData++;
+            }
+        }
+
+        // Set the metadata
+        buffer->meta_data()->setInt64(kKeyTime, (int64_t)(Cts*1000));
+    }
+
+    // Push the buffer to the source, a NULL buffer, notifies the source of EOS
+    nbBuffer = pEncoderContext->mEncoderSource->storeBuffer(buffer);
+    if ( VIDEOEDITOR_MIN_BUFFER_NB > nbBuffer ) {
+        LOGV("VideoEncoder_processInputBuffer not enough source buffer"
+        "%d", nbBuffer);
+        err = M4WAR_SF_LOW_BUFFER;
+    }
+
+cleanUp:
+    if ( OMX_COLOR_FormatYUV420SemiPlanar == \
+            pEncoderContext->mEncoderColorFormat ) {
+        // Y plane has not been allocated
+        if ( pOutPlane[1].pac_data ) {

+            SAFE_FREE(pOutPlane[1].pac_data);

+        }
+        if ( pOutPlane[2].pac_data ) {

+            SAFE_FREE(pOutPlane[2].pac_data);

+        }
+    }
+    if ( (M4NO_ERROR == err) || (M4WAR_SF_LOW_BUFFER == err) ) {
+        LOGV("VideoEditorVideoEncoder_processInputBuffer error 0x%X", err);
+    } else {
+        if( NULL != buffer ) {
+            buffer->release();
+        }
+        LOGV("VideoEditorVideoEncoder_processInputBuffer ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_processInputBuffer end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_processOutputBuffer(
+        M4ENCODER_Context pContext, MediaBuffer* buffer) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+    M4OSA_UInt32 Cts = 0;
+    int32_t i32Tmp = 0;
+    int64_t i64Tmp = 0;
+    status_t result = OK;
+

+    LOGV("VideoEditorVideoEncoder_processOutputBuffer begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != buffer,   M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+    // Process the returned AU
+    if ( 0 == buffer->range_length() ) {
+        // Encoder has no data yet, nothing unusual
+        LOGV("VideoEditorVideoEncoder_processOutputBuffer : buffer is empty");
+        goto cleanUp;
+    }
+    VIDEOEDITOR_CHECK(0 == ((M4OSA_UInt32)buffer->data())%4, M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(buffer->meta_data().get(), M4ERR_PARAMETER);
+    if ( buffer->meta_data()->findInt32(kKeyIsCodecConfig, &i32Tmp) && i32Tmp ){
+#if 1
+        {   // Display the DSI
+            LOGV("VideoEditorVideoEncoder_processOutputBuffer DSI %d",
+                buffer->range_length());
+            uint8_t* tmp = (uint8_t*)(buffer->data());
+            for( uint32_t i=0; i<buffer->range_length(); i++ ) {
+                LOGV("DSI [%d] %.2X", i, tmp[i]);
+            }
+        }
+#endif
+
+#ifndef VIDEOEDITOR_ENCODER_GET_DSI_AT_CREATION
+        VIDEOEDITOR_CHECK(M4OSA_NULL == pEncoderContext->mHeader.pBuf,
+            M4ERR_STATE);
+        if ( M4ENCODER_kH264 == pEncoderContext->mFormat ) {
+            result = buildAVCCodecSpecificData(
+                (uint8_t**)(&(pEncoderContext->mHeader.pBuf)),
+                (size_t*)(&(pEncoderContext->mHeader.Size)),
+                (const uint8_t *)buffer->data() + buffer->range_offset(),
+                buffer->range_length(),
+                pEncoderContext->mEncoder->getFormat().get());
+        } else {
+            pEncoderContext->mHeader.Size =
+                (M4OSA_UInt32)buffer->range_length();
+            SAFE_MALLOC(pEncoderContext->mHeader.pBuf, M4OSA_Int8,
+                pEncoderContext->mHeader.Size, "Encoder header");
+            M4OSA_memcpy(pEncoderContext->mHeader.pBuf,
+                (M4OSA_MemAddr8)(buffer->data())+buffer->range_offset(),
+                pEncoderContext->mHeader.Size);
+        }
+#endif /* VIDEOEDITOR_ENCODER_GET_DSI_AT_CREATION */
+    } else {
+        // Check the CTS
+        VIDEOEDITOR_CHECK(buffer->meta_data()->findInt64(kKeyTime, &i64Tmp),
+            M4ERR_STATE);
+
+        pEncoderContext->mNbOutputFrames++;
+        if ( 0 > pEncoderContext->mFirstOutputCts ) {

+            pEncoderContext->mFirstOutputCts = i64Tmp;

+        }
+        pEncoderContext->mLastOutputCts = i64Tmp;
+
+        Cts = (M4OSA_Int32)(i64Tmp/1000);
+        LOGV("[TS_CHECK] VI/ENC WRITE frame %d @ %lld -> %d (last %d)",
+            pEncoderContext->mNbOutputFrames, i64Tmp, Cts,
+            pEncoderContext->mLastCTS);
+        if ( Cts < pEncoderContext->mLastCTS ) {
+            LOGV("VideoEncoder_processOutputBuffer WARNING : Cts is going "
+            "backwards %d < %d", Cts, pEncoderContext->mLastCTS);
+            goto cleanUp;
+        }
+        LOGV("VideoEditorVideoEncoder_processOutputBuffer : %d %d",
+            Cts, pEncoderContext->mLastCTS);
+
+        // Retrieve the AU container
+        err = pEncoderContext->mWriterDataInterface->pStartAU(
+            pEncoderContext->mWriterDataInterface->pWriterContext,
+            pEncoderContext->mAccessUnit->stream->streamID,
+            pEncoderContext->mAccessUnit);
+        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+        // Format the AU
+        VIDEOEDITOR_CHECK(
+            buffer->range_length() <= pEncoderContext->mAccessUnit->size,
+            M4ERR_PARAMETER);
+        // Remove H264 AU start code
+        if ( M4ENCODER_kH264 == pEncoderContext->mFormat ) {
+            if (!memcmp((const uint8_t *)buffer->data() + \
+                    buffer->range_offset(), "\x00\x00\x00\x01", 4) ) {
+                buffer->set_range(buffer->range_offset() + 4,
+                    buffer->range_length() - 4);
+            }
+        }
+
+        if ( (M4ENCODER_kH264 == pEncoderContext->mFormat) &&
+            (M4OSA_NULL != pEncoderContext->mH264NALUPostProcessFct) ) {
+        // H264 trimming case, NALU post processing is needed
+        M4OSA_Int32 outputSize = pEncoderContext->mAccessUnit->size;
+        err = pEncoderContext->mH264NALUPostProcessFct(
+            pEncoderContext->mH264NALUPostProcessCtx,
+            (M4OSA_UInt8*)buffer->data()+buffer->range_offset(),
+            buffer->range_length(),
+            (M4OSA_UInt8*)pEncoderContext->mAccessUnit->dataAddress,
+            &outputSize);
+        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+        pEncoderContext->mAccessUnit->size = (M4OSA_UInt32)outputSize;
+        } else {
+            // The AU can just be copied
+            M4OSA_memcpy((M4OSA_MemAddr8)pEncoderContext->mAccessUnit->\
+                dataAddress, (M4OSA_MemAddr8)(buffer->data())+buffer->\
+                range_offset(), buffer->range_length());
+            pEncoderContext->mAccessUnit->size =
+                (M4OSA_UInt32)buffer->range_length();
+        }
+
+        if ( buffer->meta_data()->findInt32(kKeyIsSyncFrame,&i32Tmp) && i32Tmp){
+            pEncoderContext->mAccessUnit->attribute = AU_RAP;
+        } else {
+            pEncoderContext->mAccessUnit->attribute = AU_B_Frame;
+        }
+        pEncoderContext->mLastCTS = Cts;
+        pEncoderContext->mAccessUnit->CTS = Cts;
+        pEncoderContext->mAccessUnit->DTS = Cts;
+
+        LOGV("VideoEditorVideoEncoder_processOutputBuffer: AU @ 0x%X 0x%X %d %d",

+            pEncoderContext->mAccessUnit->dataAddress,
+            *pEncoderContext->mAccessUnit->dataAddress,

+            pEncoderContext->mAccessUnit->size,
+            pEncoderContext->mAccessUnit->CTS);
+
+        // Write the AU
+        err = pEncoderContext->mWriterDataInterface->pProcessAU(
+            pEncoderContext->mWriterDataInterface->pWriterContext,
+            pEncoderContext->mAccessUnit->stream->streamID,
+            pEncoderContext->mAccessUnit);
+        VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+    }
+
+cleanUp:
+    buffer->release();
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_processOutputBuffer no error");
+    } else {
+        SAFE_FREE(pEncoderContext->mHeader.pBuf);
+        pEncoderContext->mHeader.Size = 0;
+        LOGV("VideoEditorVideoEncoder_processOutputBuffer ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_processOutputBuffer end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_encode(M4ENCODER_Context pContext,
+        M4VIFI_ImagePlane* pInPlane, M4OSA_Double Cts,
+        M4ENCODER_FrameMode FrameMode) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+    status_t result = OK;
+    MediaBuffer* outputBuffer = NULL;
+

+    LOGV("VideoEditorVideoEncoder_encode 0x%X %f %d", pInPlane, Cts, FrameMode);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+    if ( STARTED == pEncoderContext->mState ) {

+        pEncoderContext->mState = BUFFERING;

+    }
+    VIDEOEDITOR_CHECK(
+        (BUFFERING | READING) & pEncoderContext->mState, M4ERR_STATE);
+
+    pEncoderContext->mNbInputFrames++;
+    if ( 0 > pEncoderContext->mFirstInputCts ) {

+        pEncoderContext->mFirstInputCts = Cts;

+    }
+    pEncoderContext->mLastInputCts = Cts;
+
+    LOGV("VideoEditorVideoEncoder_encode 0x%X %d %f (%d)", pInPlane, FrameMode,
+        Cts, pEncoderContext->mLastCTS);
+
+    // Push the input buffer to the encoder source
+    err = VideoEditorVideoEncoder_processInputBuffer(pEncoderContext, Cts,
+        M4OSA_FALSE);
+    VIDEOEDITOR_CHECK((M4NO_ERROR == err) || (M4WAR_SF_LOW_BUFFER == err), err);
+
+    // Notify the source in case of EOS
+    if ( M4ENCODER_kLastFrame == FrameMode ) {
+        err = VideoEditorVideoEncoder_processInputBuffer(
+            pEncoderContext, 0, M4OSA_TRUE);
+        VIDEOEDITOR_CHECK((M4NO_ERROR == err) || (M4WAR_SF_LOW_BUFFER == err),
+            err);
+    }
+
+    if ( BUFFERING == pEncoderContext->mState ) {
+        if ( M4WAR_SF_LOW_BUFFER == err ) {
+            // Insufficient prefetch, do not encode
+            err = M4NO_ERROR;
+            goto cleanUp;
+        } else {
+            // Prefetch is complete, start reading
+            pEncoderContext->mState = READING;
+        }
+    }
+    // Read
+    result = pEncoderContext->mEncoder->read(&outputBuffer, NULL);
+    if( OK != result ) {

+        LOGV("VideoEditorVideoEncoder_encode: encoder returns 0x%X", result);

+    }

+
+    if( ERROR_END_OF_STREAM == result ) {
+        if( outputBuffer != NULL ) {

+            LOGV("VideoEditorVideoEncoder_encode : EOS w/ buffer");

+        }
+        VIDEOEDITOR_CHECK(0 == VIDEOEDITOR_MIN_BUFFER_NB, M4ERR_STATE);
+        // No output provided here, just exit
+        goto cleanUp;
+    }
+    VIDEOEDITOR_CHECK((OK == result) || (ERROR_END_OF_STREAM == result),
+        M4ERR_STATE);
+
+    // Provide the encoded AU to the writer
+    err = VideoEditorVideoEncoder_processOutputBuffer(pEncoderContext,
+        outputBuffer);
+    VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_encode no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_encode ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_encode end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_start(M4ENCODER_Context pContext) {
+    M4OSA_ERR                  err             = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+    status_t                   result          = OK;
+

+    LOGV("VideoEditorVideoEncoder_start begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+    VIDEOEDITOR_CHECK(OPENED == pEncoderContext->mState, M4ERR_STATE);
+
+    pEncoderContext->mNbInputFrames  = 0;
+    pEncoderContext->mFirstInputCts  = -1.0;
+    pEncoderContext->mLastInputCts   = -1.0;
+    pEncoderContext->mNbOutputFrames = 0;
+    pEncoderContext->mFirstOutputCts = -1;
+    pEncoderContext->mLastOutputCts  = -1;
+
+    result = pEncoderContext->mEncoder->start();
+    VIDEOEDITOR_CHECK(OK == result, M4ERR_STATE);
+
+    // Set the new state
+    pEncoderContext->mState = STARTED;
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_start no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_start ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_start end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_stop(M4ENCODER_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+    MediaBuffer* outputBuffer = NULL;
+    status_t result = OK;
+

+    LOGV("VideoEditorVideoEncoder_stop begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+    // Process the remaining buffers if necessary
+    if ( (BUFFERING | READING) & pEncoderContext->mState ) {
+        // Send EOS again just in case
+        err = VideoEditorVideoEncoder_processInputBuffer(pEncoderContext, 0,
+            M4OSA_TRUE);
+        VIDEOEDITOR_CHECK((M4NO_ERROR == err) || (M4WAR_SF_LOW_BUFFER == err),
+            err);
+        while( OK == result ) {
+            result = pEncoderContext->mEncoder->read(&outputBuffer, NULL);
+            if ( OK == result ) {
+                err = VideoEditorVideoEncoder_processOutputBuffer(
+                    pEncoderContext, outputBuffer);
+                VIDEOEDITOR_CHECK(M4NO_ERROR == err, err);
+            }
+        }
+        pEncoderContext->mState = STARTED;
+    }
+
+    // Stop the graph module if necessary
+    if ( STARTED == pEncoderContext->mState ) {
+        pEncoderContext->mEncoder->stop();
+        pEncoderContext->mState = OPENED;
+    }
+
+    if ( pEncoderContext->mNbInputFrames != pEncoderContext->mNbInputFrames ) {
+        LOGV("VideoEditorVideoEncoder_stop: some frames were not encoded %d %d",
+            pEncoderContext->mNbInputFrames, pEncoderContext->mNbInputFrames);
+    }
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_stop no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_stop ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_stop end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_regulBitRate(M4ENCODER_Context pContext) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+

+    LOGV("VideoEditorVideoEncoder_regulBitRate begin");
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+    LOGV("VideoEditorVideoEncoder_regulBitRate : THIS IS NOT IMPLEMENTED");
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_regulBitRate no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_regulBitRate ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_regulBitRate end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_setOption(M4ENCODER_Context pContext,
+        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+

+    LOGV("VideoEditorVideoEncoder_setOption start optionID 0x%X", optionID);
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+    switch( optionID ) {
+        case M4ENCODER_kOptionID_SetH264ProcessNALUfctsPtr:
+            pEncoderContext->mH264NALUPostProcessFct =
+                (H264MCS_ProcessEncodedNALU_fct*)optionValue;
+            break;
+        case M4ENCODER_kOptionID_H264ProcessNALUContext:
+            pEncoderContext->mH264NALUPostProcessCtx =
+                (M4OSA_Context)optionValue;
+            break;
+        default:
+            LOGV("VideoEditorVideoEncoder_setOption: unsupported optionId 0x%X",
+                optionID);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+            break;
+    }
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_setOption no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_setOption ERROR 0x%X", err);
+    }

+    LOGV("VideoEditorVideoEncoder_setOption end");
+    return err;
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_getOption(M4ENCODER_Context pContext,
+        M4OSA_UInt32 optionID, M4OSA_DataOption optionValue) {
+    M4OSA_ERR err = M4NO_ERROR;
+    VideoEditorVideoEncoder_Context* pEncoderContext = M4OSA_NULL;
+

+    LOGV("VideoEditorVideoEncoder_getOption begin optinId 0x%X", optionID);
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pContext, M4ERR_PARAMETER);
+    pEncoderContext = (VideoEditorVideoEncoder_Context*)pContext;
+
+    switch( optionID ) {
+        case M4ENCODER_kOptionID_EncoderHeader:
+            VIDEOEDITOR_CHECK(
+                    M4OSA_NULL != pEncoderContext->mHeader.pBuf, M4ERR_STATE);
+            *(M4ENCODER_Header**)optionValue = &(pEncoderContext->mHeader);
+            break;
+        default:
+            LOGV("VideoEditorVideoEncoder_getOption: unsupported optionId 0x%X",
+                optionID);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_BAD_OPTION_ID);
+            break;
+    }
+
+cleanUp:
+    if ( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_getOption no error");
+    } else {
+        LOGV("VideoEditorVideoEncoder_getOption ERROR 0x%X", err);
+    }
+    return err;
+}

+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface(M4ENCODER_Format format,
+        M4ENCODER_Format* pFormat,
+        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+    M4OSA_ERR err = M4NO_ERROR;
+
+    // Input parameters check
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pFormat,           M4ERR_PARAMETER);
+    VIDEOEDITOR_CHECK(M4OSA_NULL != pEncoderInterface, M4ERR_PARAMETER);
+
+    LOGV("VideoEditorVideoEncoder_getInterface begin 0x%x 0x%x %d", pFormat,
+        pEncoderInterface, mode);
+
+    SAFE_MALLOC(*pEncoderInterface, M4ENCODER_GlobalInterface, 1,
+        "VideoEditorVideoEncoder");
+
+    *pFormat = format;
+
+    switch( format ) {
+        case M4ENCODER_kH263:

+            {

+                (*pEncoderInterface)->pFctInit =
+                    VideoEditorVideoEncoder_init_H263;

+                break;

+            }
+        case M4ENCODER_kMPEG4:

+            {

+                (*pEncoderInterface)->pFctInit =
+                    VideoEditorVideoEncoder_init_MPEG4;

+                break;

+            }
+        case M4ENCODER_kH264:

+            {

+                (*pEncoderInterface)->pFctInit =
+                    VideoEditorVideoEncoder_init_H264;

+                break;

+            }
+        default:
+            LOGV("VideoEditorVideoEncoder_getInterface : unsupported format %d",
+                format);
+            VIDEOEDITOR_CHECK(M4OSA_FALSE, M4ERR_PARAMETER);
+        break;
+    }
+    (*pEncoderInterface)->pFctOpen         = VideoEditorVideoEncoder_open;
+    (*pEncoderInterface)->pFctStart        = VideoEditorVideoEncoder_start;
+    (*pEncoderInterface)->pFctStop         = VideoEditorVideoEncoder_stop;
+    (*pEncoderInterface)->pFctPause        = M4OSA_NULL;
+    (*pEncoderInterface)->pFctResume       = M4OSA_NULL;
+    (*pEncoderInterface)->pFctClose        = VideoEditorVideoEncoder_close;
+    (*pEncoderInterface)->pFctCleanup      = VideoEditorVideoEncoder_cleanup;
+    (*pEncoderInterface)->pFctRegulBitRate =
+        VideoEditorVideoEncoder_regulBitRate;
+    (*pEncoderInterface)->pFctEncode       = VideoEditorVideoEncoder_encode;
+    (*pEncoderInterface)->pFctSetOption    = VideoEditorVideoEncoder_setOption;
+    (*pEncoderInterface)->pFctGetOption    = VideoEditorVideoEncoder_getOption;
+
+cleanUp:
+    if( M4NO_ERROR == err ) {
+        LOGV("VideoEditorVideoEncoder_getInterface no error");
+    } else {
+        *pEncoderInterface = M4OSA_NULL;
+        LOGV("VideoEditorVideoEncoder_getInterface ERROR 0x%X", err);
+    }
+    return err;
+}
+
+extern "C" {
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H263(M4ENCODER_Format* pFormat,
+        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+    return VideoEditorVideoEncoder_getInterface(M4ENCODER_kH263, pFormat,
+            pEncoderInterface, mode);
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_MPEG4(M4ENCODER_Format* pFormat,
+        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+    return VideoEditorVideoEncoder_getInterface(M4ENCODER_kMPEG4, pFormat,
+           pEncoderInterface, mode);
+}
+
+M4OSA_ERR VideoEditorVideoEncoder_getInterface_H264(M4ENCODER_Format* pFormat,
+        M4ENCODER_GlobalInterface** pEncoderInterface, M4ENCODER_OpenMode mode){
+    return VideoEditorVideoEncoder_getInterface(M4ENCODER_kH264, pFormat,
+           pEncoderInterface, mode);
+
+}
+
+}  // extern "C"
+
+}  // namespace android
diff --git a/libvideoeditor/vss/video_filters/Android.mk b/libvideoeditor/vss/video_filters/Android.mk
new file mode 100755
index 0000000..e2d2111
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/Android.mk
@@ -0,0 +1,5 @@
+#LOCAL_PATH:= $(call my-dir)
+#include $(CLEAR_VARS)
+
+#include $(call all-makefiles-under,$(LOCAL_PATH))
+include $(call all-subdir-makefiles)
diff --git a/libvideoeditor/vss/video_filters/src/Android.mk b/libvideoeditor/vss/video_filters/src/Android.mk
new file mode 100755
index 0000000..3c1f4aa
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/Android.mk
@@ -0,0 +1,63 @@
+#
+# Copyright (C) 2011 NXP Software
+# Copyright (C) 2011 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+#
+# libvideofilters
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE:= libvideoeditor_videofilters
+
+LOCAL_SRC_FILES:=          \
+      M4VIFI_BGR565toYUV420.c \
+      M4VIFI_ResizeRGB888toRGB888.c \
+      M4VIFI_Clip.c \
+      M4VIFI_ResizeYUVtoBGR565.c \
+      M4VIFI_RGB888toYUV420.c \
+      M4VFL_transition.c
+
+LOCAL_MODULE_TAGS := development
+
+LOCAL_SHARED_LIBRARIES := libcutils libutils
+
+LOCAL_STATIC_LIBRARIES := \
+    libvideoeditor_osal
+
+LOCAL_C_INCLUDES += \
+    $(TOP)/frameworks/media/libvideoeditor/osal/inc \
+    $(TOP)/frameworks/media/libvideoeditor/vss/common/inc
+
+ifeq ($(TARGET_SIMULATOR),true)
+else
+    LOCAL_SHARED_LIBRARIES += libdl
+endif
+
+# All of the shared libraries we link against.
+LOCAL_LDLIBS := \
+    -lpthread -ldl
+
+LOCAL_CFLAGS += -Wno-multichar
+
+# Don't prelink this library.  For more efficient code, you may want
+# to add this library to the prelink map and set this to true.
+LOCAL_PRELINK_MODULE := false
+
+include $(BUILD_STATIC_LIBRARY)
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VFL_transition.c b/libvideoeditor/vss/video_filters/src/M4VFL_transition.c
new file mode 100755
index 0000000..7d7ca44
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VFL_transition.c
@@ -0,0 +1,900 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file        M4TRAN_transition.c
+ * @brief
+ ******************************************************************************
+*/
+
+/**
+ * OSAL (memset and memcpy) ***/
+#include "M4OSA_Memory.h"
+
+#include "M4VFL_transition.h"
+
+#include <string.h>
+
+#ifdef LITTLE_ENDIAN
+#define M4VFL_SWAP_SHORT(a) a = ((a & 0xFF) << 8) | ((a & 0xFF00) >> 8)
+#else
+#define M4VFL_SWAP_SHORT(a)
+#endif
+
+#define LUM_FACTOR_MAX 10
+
+
+unsigned char M4VFL_modifyLumaByStep(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                     M4VFL_ModifLumParam *lum_param, void *user_data)
+{
+#if 1
+    unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
+    unsigned long pix_src;
+    unsigned long u_outpx, u_outpx2;
+    unsigned long u_width, u_stride, u_stride_out,u_height, pix;
+    unsigned long lf1, lf2, lf3;
+    long i, j;
+
+    if (lum_param->copy_chroma != 0)
+    {
+        /* copy chroma plane */
+
+    }
+
+    /* apply luma factor */
+    u_width = plane_in[0].u_width;
+    u_height = plane_in[0].u_height;
+    u_stride = (plane_in[0].u_stride >> 1);
+    u_stride_out = (plane_out[0].u_stride >> 1);
+    p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+    p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+    p_dest_line = p_dest;
+    p_src_line = p_src;
+
+    switch(lum_param->lum_factor)
+    {
+    case 0:
+        /* very specific case : set luma plane to 16 */
+        for (j = u_height; j != 0; j--)
+        {
+            M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 16);
+            p_dest += u_stride_out;
+        }
+        return 0;
+
+    case 1:
+        /* 0.25 */
+        lf1 = 6; lf2 = 6; lf3 = 7;
+        break;
+    case 2:
+        /* 0.375 */
+        lf1 = 7; lf2 = 7; lf3 = 7;
+        break;
+    case 3:
+        /* 0.5 */
+        lf1 = 7; lf2 = 7; lf3 = 8;
+        break;
+    case 4:
+        /* 0.625 */
+        lf1 = 7; lf2 = 8; lf3 = 8;
+        break;
+    case 5:
+        /* 0.75 */
+        lf1 = 8; lf2 = 8; lf3 = 8;
+        break;
+    case 6:
+        /* 0.875 */
+        lf1 = 9; lf2 = 8; lf3 = 7;
+        break;
+    default:
+        lf1 = 8; lf2 = 8; lf3 = 9;
+        break;
+    }
+
+    for (j = u_height; j != 0; j--)
+    {
+        p_dest = p_dest_line;
+        p_src = p_src_line;
+        for (i = (u_width >> 1); i != 0; i--)
+        {
+            pix_src = (unsigned long) *p_src++;
+            pix = pix_src & 0xFF;
+            u_outpx = (((pix << lf1) + (pix << lf2) + (pix << lf3) ) >> LUM_FACTOR_MAX);
+            pix = ((pix_src & 0xFF00) >> 8);
+            u_outpx2 = ((((pix << lf1) + (pix << lf2) + (pix << lf3) ) >> LUM_FACTOR_MAX)<< 8) ;
+            *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
+        }
+        p_dest_line += u_stride_out;
+        p_src_line += u_stride;
+    }
+#else /* if 0 */
+    unsigned char *p_src, *p_dest, *p_src_line, *p_dest_line;
+    unsigned long u_width, u_stride, u_stride_out,u_height, pix;
+    unsigned long lf1, lf2, lf3;
+    long i, j;
+
+    u_width = plane_in[0].u_width;
+    u_height = plane_in[0].u_height;
+    u_stride = (plane_in[0].u_stride);
+    u_stride_out = (plane_out[0].u_stride);
+    p_dest = (unsigned char *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+    p_src = (unsigned char *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+    p_dest_line = p_dest;
+    p_src_line = p_src;
+
+    switch(lum_param->lum_factor)
+    {
+    case 0:
+        /* very specific case : set luma plane to 16 */
+        for (j = u_height; j != 0; j--)
+        {
+            M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 16);
+            p_dest += u_stride_out;
+        }
+        return 0;
+
+    case 1:
+        /* 0.25 */
+        lf1 = 6; lf2 = 6; lf3 = 7;
+        break;
+    case 2:
+        /* 0.375 */
+        lf1 = 7; lf2 = 7; lf3 = 7;
+        break;
+    case 3:
+        /* 0.5 */
+        lf1 = 7; lf2 = 7; lf3 = 8;
+        break;
+    case 4:
+        /* 0.625 */
+        lf1 = 7; lf2 = 8; lf3 = 8;
+        break;
+    case 5:
+        /* 0.75 */
+        lf1 = 8; lf2 = 8; lf3 = 8;
+        break;
+    case 6:
+        /* 0.875 */
+        lf1 = 9; lf2 = 8; lf3 = 7;
+        break;
+    default:
+        lf1 = 8; lf2 = 8; lf3 = 9;
+        break;
+    }
+
+    if (lum_param->copy_chroma != 0)
+    {
+        /* copy chroma plane */
+
+    }
+
+
+    for (j = u_height; j != 0; j--)
+    {
+        p_dest = p_dest_line;
+        p_src = p_src_line;
+        for (i = (u_width); i != 0; i--)
+        {
+            pix = (unsigned long) *p_src++;
+            *p_dest++ = (unsigned char) (((pix << lf1) + (pix << lf2) + (pix << lf3) ) >>\
+                 LUM_FACTOR_MAX);
+        }
+        p_dest_line += u_stride_out;
+        p_src_line += u_stride;
+    }
+#endif /* if 0 */
+    return 0;
+}
+
+
+unsigned char M4VFL_modifyLumaWithScale(M4ViComImagePlane *plane_in,
+                                         M4ViComImagePlane *plane_out,
+                                         unsigned long lum_factor,
+                                         void *user_data)
+{
+    unsigned short *p_src, *p_dest, *p_src_line, *p_dest_line;
+    unsigned char *p_csrc, *p_cdest, *p_csrc_line, *p_cdest_line;
+    unsigned long pix_src;
+    unsigned long u_outpx, u_outpx2;
+    unsigned long u_width, u_stride, u_stride_out,u_height, pix;
+    long i, j;
+
+    /* copy or filter chroma */
+    u_width = plane_in[1].u_width;
+    u_height = plane_in[1].u_height;
+    u_stride = plane_in[1].u_stride;
+    u_stride_out = plane_out[1].u_stride;
+    p_cdest_line = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+    p_csrc_line = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft];
+
+    if (lum_factor > 256)
+    {
+        p_cdest = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+        p_csrc = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+        /* copy chroma */
+        for (j = u_height; j != 0; j--)
+        {
+            for (i = u_width; i != 0; i--)
+            {
+                M4OSA_memcpy((M4OSA_MemAddr8)p_cdest_line, (M4OSA_MemAddr8)p_csrc_line, u_width);
+                M4OSA_memcpy((M4OSA_MemAddr8)p_cdest,(M4OSA_MemAddr8) p_csrc, u_width);
+            }
+            p_cdest_line += u_stride_out;
+            p_cdest += u_stride_out;
+            p_csrc_line += u_stride;
+            p_csrc += u_stride;
+        }
+    }
+    else
+    {
+        /* filter chroma */
+        pix = (1024 - lum_factor) << 7;
+        for (j = u_height; j != 0; j--)
+        {
+            p_cdest = p_cdest_line;
+            p_csrc = p_csrc_line;
+            for (i = u_width; i != 0; i--)
+            {
+                *p_cdest++ = ((pix + (*p_csrc++ & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
+            }
+            p_cdest_line += u_stride_out;
+            p_csrc_line += u_stride;
+        }
+        p_cdest_line = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+        p_csrc_line = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+        for (j = u_height; j != 0; j--)
+        {
+            p_cdest = p_cdest_line;
+            p_csrc = p_csrc_line;
+            for (i = u_width; i != 0; i--)
+            {
+                *p_cdest++ = ((pix + (*p_csrc & 0xFF) * lum_factor) >> LUM_FACTOR_MAX);
+            }
+            p_cdest_line += u_stride_out;
+            p_csrc_line += u_stride;
+        }
+    }
+    /* apply luma factor */
+    u_width = plane_in[0].u_width;
+    u_height = plane_in[0].u_height;
+    u_stride = (plane_in[0].u_stride >> 1);
+    u_stride_out = (plane_out[0].u_stride >> 1);
+    p_dest = (unsigned short *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+    p_src = (unsigned short *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+    p_dest_line = p_dest;
+    p_src_line = p_src;
+
+    for (j = u_height; j != 0; j--)
+    {
+        p_dest = p_dest_line;
+        p_src = p_src_line;
+        for (i = (u_width >> 1); i != 0; i--)
+        {
+            pix_src = (unsigned long) *p_src++;
+            pix = pix_src & 0xFF;
+            u_outpx = ((pix * lum_factor) >> LUM_FACTOR_MAX);
+            pix = ((pix_src & 0xFF00) >> 8);
+            u_outpx2 = (((pix * lum_factor) >> LUM_FACTOR_MAX)<< 8) ;
+            *p_dest++ = (unsigned short) (u_outpx2 | u_outpx);
+        }
+        p_dest_line += u_stride_out;
+        p_src_line += u_stride;
+    }
+
+    return 0;
+}
+
+
+unsigned char M4VFL_applyClosingCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                         unsigned short curtain_factor, void *user_data)
+{
+    unsigned char *p_src, *p_srcu, *p_srcv,*p_dest, *p_destu, *p_destv;
+    unsigned long u_width, u_widthuv, u_stride_out, u_stride_out_uv,u_stride, u_stride_uv,u_height;
+    long j;
+    unsigned long nb_black_lines;
+
+    u_width = plane_in[0].u_width;
+    u_height = plane_in[0].u_height;
+    u_stride_out = plane_out[0].u_stride ;
+    u_stride_out_uv = plane_out[1].u_stride;
+    p_dest = (unsigned char *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+    p_destu = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+    p_destv = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+    u_widthuv = u_width >> 1;
+
+    /* nb_black_lines is even */
+    nb_black_lines = (unsigned long) ((curtain_factor >> 1) << 1);
+
+    for (j = (nb_black_lines >> 1); j != 0; j--)
+    { /* set black lines */
+        M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+        p_dest += u_stride_out;
+        M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+        p_dest += u_stride_out;
+        M4OSA_memset((M4OSA_MemAddr8)p_destu, u_widthuv, 128);
+        M4OSA_memset((M4OSA_MemAddr8)p_destv, u_widthuv, 128);
+        p_destu += u_stride_out_uv;
+        p_destv += u_stride_out_uv;
+    }
+
+    p_src = (unsigned char *) &plane_in[0].pac_data[plane_in[0].u_topleft +\
+         (nb_black_lines * plane_in[0].u_stride)];
+    p_srcu = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft +\
+         ((nb_black_lines * plane_in[1].u_stride) >> 1)];
+    p_srcv = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft+\
+         ((nb_black_lines * plane_in[2].u_stride) >> 1)];
+    u_stride = plane_in[0].u_stride ;
+    u_stride_uv = plane_in[1].u_stride;
+
+    /* copy other lines from source */
+    for (j = (u_height - nb_black_lines) >> 1; j != 0; j--)
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8)p_dest, (M4OSA_MemAddr8)p_src, u_width);
+        p_dest += u_stride_out;
+        p_src += u_stride;
+        M4OSA_memcpy((M4OSA_MemAddr8)p_dest,(M4OSA_MemAddr8) p_src, u_width);
+        p_dest += u_stride_out;
+        p_src += u_stride;
+        M4OSA_memcpy((M4OSA_MemAddr8)p_destu,(M4OSA_MemAddr8) p_srcu, u_widthuv);
+        M4OSA_memcpy((M4OSA_MemAddr8)p_destv, (M4OSA_MemAddr8)p_srcv, u_widthuv);
+        p_destu += u_stride_out_uv;
+        p_destv += u_stride_out_uv;
+        p_srcu += u_stride_uv;
+        p_srcv += u_stride_uv;
+    }
+
+    return 0;
+}
+
+
+unsigned char M4VFL_applyOpeningCurtain(M4ViComImagePlane *plane_in,
+                                         M4ViComImagePlane *plane_out,
+                                         unsigned short curtain_factor, void *user_data)
+{
+    unsigned char *p_src, *p_srcu, *p_srcv,*p_dest, *p_destu, *p_destv;
+    unsigned long u_width, u_widthuv, u_stride_out, u_stride_out_uv,u_stride, u_stride_uv,u_height;
+    long j;
+    unsigned long nb_black_lines;
+
+    u_width = plane_in[0].u_width;
+    u_height = plane_in[0].u_height;
+    u_stride_out = plane_out[0].u_stride ;
+    u_stride_out_uv = plane_out[1].u_stride;
+    p_dest = (unsigned char *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+    p_destu = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+    p_destv = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+    u_widthuv = u_width >> 1;
+
+    /* nb_black_lines is even */
+    nb_black_lines = (unsigned long) ((curtain_factor >> 1) << 1);
+
+    p_src = (unsigned char *) &plane_in[0].pac_data[plane_in[0].u_topleft +\
+         ((u_height - nb_black_lines) * plane_in[0].u_stride)];
+    p_srcu = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft +\
+         (((u_height - nb_black_lines) * plane_in[1].u_stride) >> 1)];
+    p_srcv = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft+\
+         (((u_height - nb_black_lines) * plane_in[2].u_stride) >> 1)];
+    u_stride = plane_in[0].u_stride ;
+    u_stride_uv = plane_in[1].u_stride;
+
+    for (j = (u_height - nb_black_lines) >> 1; j != 0; j--)
+    {
+        M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+        p_dest += u_stride_out;
+        M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+        p_dest += u_stride_out;
+        M4OSA_memset((M4OSA_MemAddr8)p_destu, u_widthuv, 128);
+        M4OSA_memset((M4OSA_MemAddr8)p_destv, u_widthuv, 128);
+        p_destu += u_stride_out_uv;
+        p_destv += u_stride_out_uv;
+    }
+
+    for (j = (nb_black_lines >> 1); j != 0; j--)
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8)p_dest,(M4OSA_MemAddr8) p_src, u_width);
+        p_dest += u_stride_out;
+        p_src += u_stride;
+        M4OSA_memcpy((M4OSA_MemAddr8)p_dest,(M4OSA_MemAddr8) p_src, u_width);
+        p_dest += u_stride_out;
+        p_src += u_stride;
+        M4OSA_memcpy((M4OSA_MemAddr8)p_destu,(M4OSA_MemAddr8) p_srcu, u_widthuv);
+        M4OSA_memcpy((M4OSA_MemAddr8)p_destv, (M4OSA_MemAddr8)p_srcv, u_widthuv);
+        p_destu += u_stride_out_uv;
+        p_destv += u_stride_out_uv;
+        p_srcu += u_stride_uv;
+        p_srcv += u_stride_uv;
+    }
+
+/*    p_destu = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+    p_destv = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+    for (j = (u_height >> 1); j != 0; j--)
+    {
+        M4OSA_memset(p_destu, u_widthuv, 128);
+        M4OSA_memset(p_destv, u_widthuv, 128);
+        p_destu += u_stride_out_uv;
+        p_destv += u_stride_out_uv;
+    }
+*/
+    return 0;
+}
+
+unsigned char M4VFL_applyFallingCurtain(M4ViComImagePlane *plane_in,
+                                         M4ViComImagePlane *plane_out,
+                                         unsigned short curtain_factor, void *user_data)
+{
+    unsigned char *p_src, *p_srcu, *p_srcv,*p_dest, *p_destu, *p_destv;
+    unsigned long u_width, u_widthuv, u_stride_out, u_stride_out_uv,u_stride, u_stride_uv,u_height;
+    long j;
+    unsigned long nb_black_lines;
+
+    u_width = plane_in[0].u_width;
+    u_height = plane_in[0].u_height;
+    u_stride_out = plane_out[0].u_stride ;
+    u_stride_out_uv = plane_out[1].u_stride;
+    p_dest = (unsigned char *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+    p_destu = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+    p_destv = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+    u_widthuv = u_width >> 1;
+
+    /* nb_black_lines is even */
+    nb_black_lines = (unsigned long) ((curtain_factor >> 1) << 1);
+
+    p_src = (unsigned char *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+    p_srcu = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft];
+    p_srcv = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+    u_stride = plane_in[0].u_stride ;
+    u_stride_uv = plane_in[1].u_stride;
+
+    for (j = (nb_black_lines >> 1); j != 0; j--)
+    {
+        M4OSA_memcpy((M4OSA_MemAddr8)p_dest,(M4OSA_MemAddr8) p_src, u_width);
+        p_dest += u_stride_out;
+        p_src += u_stride;
+        M4OSA_memcpy((M4OSA_MemAddr8)p_dest,(M4OSA_MemAddr8) p_src, u_width);
+        p_dest += u_stride_out;
+        p_src += u_stride;
+        M4OSA_memcpy((M4OSA_MemAddr8)p_destu,(M4OSA_MemAddr8) p_srcu, u_widthuv);
+        M4OSA_memcpy((M4OSA_MemAddr8)p_destv,(M4OSA_MemAddr8) p_srcv, u_widthuv);
+        p_destu += u_stride_out_uv;
+        p_destv += u_stride_out_uv;
+        p_srcu += u_stride_uv;
+        p_srcv += u_stride_uv;
+    }
+
+    for (j = (u_height - nb_black_lines) >> 1; j != 0; j--)
+    {
+        M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+        p_dest += u_stride_out;
+        M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+        p_dest += u_stride_out;
+        M4OSA_memset((M4OSA_MemAddr8)p_destu, u_widthuv, 128);
+        M4OSA_memset((M4OSA_MemAddr8)p_destv, u_widthuv, 128);
+        p_destu += u_stride_out_uv;
+        p_destv += u_stride_out_uv;
+    }
+    return 0;
+}
+
+/**
+ ******************************************************************************
+ * unsigned char M4VFL_applyCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+ *                                   M4VFL_CurtainParam *curtain_factor, void *user_data)
+ * @brief    This function applies a black curtain onto a YUV420 image.
+ * @note    THis function writes black lines either at the top of the image or at
+ *            the bottom of the image. The other lines are copied from the source image.
+ *            First the number of black lines is compted and is rounded to an even integer.
+ * @param    plane_in: (IN) pointer to the 3 image planes of the source image
+ * @param    plane_out: (OUT) pointer to the 3 image planes of the destination image
+ * @param    user_data: (IN) pointer to some user_data
+ * @param    curtain_factor: (IN) structure with the parameters of the curtain
+ *           (nb of black lines and if at the top/bottom of the image)
+ * @return    0: there is no error
+ ******************************************************************************
+*/
+unsigned char M4VFL_applyCurtain(M4ViComImagePlane *plane_in, M4ViComImagePlane *plane_out,
+                                 M4VFL_CurtainParam *curtain_factor, void *user_data)
+{
+    unsigned char *p_src, *p_srcu, *p_srcv,*p_dest, *p_destu, *p_destv;
+    unsigned long u_width, u_widthuv, u_stride_out, u_stride_out_uv,u_stride, u_stride_uv,u_height;
+    long j;
+    unsigned long nb_black_lines;
+
+    u_width = plane_in[0].u_width;
+    u_height = plane_in[0].u_height;
+    u_stride_out = plane_out[0].u_stride ;
+    u_stride_out_uv = plane_out[1].u_stride;
+    p_dest = (unsigned char *) &plane_out[0].pac_data[plane_out[0].u_topleft];
+    p_destu = (unsigned char *) &plane_out[1].pac_data[plane_out[1].u_topleft];
+    p_destv = (unsigned char *) &plane_out[2].pac_data[plane_out[2].u_topleft];
+    u_widthuv = u_width >> 1;
+    u_stride = plane_in[0].u_stride ;
+    u_stride_uv = plane_in[1].u_stride;
+
+    /* nb_black_lines is even */
+    nb_black_lines = (unsigned long) ((curtain_factor->nb_black_lines >> 1) << 1);
+
+    if (curtain_factor->top_is_black)
+    {
+        /* black lines first */
+        /* compute index of of first source pixels (Y, U and V) to copy after the black lines */
+        p_src = (unsigned char *) &plane_in[0].pac_data[plane_in[0].u_topleft +\
+             ((nb_black_lines) * plane_in[0].u_stride)];
+        p_srcu = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft +\
+             (((nb_black_lines) * plane_in[1].u_stride) >> 1)];
+        p_srcv = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft+\
+             (((nb_black_lines) * plane_in[2].u_stride) >> 1)];
+
+        /* write black lines */
+        for (j = (nb_black_lines >> 1); j != 0; j--)
+        {
+            M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+            p_dest += u_stride_out;
+            M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+            p_dest += u_stride_out;
+            M4OSA_memset((M4OSA_MemAddr8)p_destu, u_widthuv, 128);
+            M4OSA_memset((M4OSA_MemAddr8)p_destv, u_widthuv, 128);
+            p_destu += u_stride_out_uv;
+            p_destv += u_stride_out_uv;
+        }
+
+        /* copy from source image */
+        for (j = (u_height - nb_black_lines) >> 1; j != 0; j--)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)p_dest, (M4OSA_MemAddr8)p_src, u_width);
+            p_dest += u_stride_out;
+            p_src += u_stride;
+            M4OSA_memcpy((M4OSA_MemAddr8)p_dest,(M4OSA_MemAddr8) p_src, u_width);
+            p_dest += u_stride_out;
+            p_src += u_stride;
+            M4OSA_memcpy((M4OSA_MemAddr8)p_destu,(M4OSA_MemAddr8) p_srcu, u_widthuv);
+            M4OSA_memcpy((M4OSA_MemAddr8)p_destv, (M4OSA_MemAddr8)p_srcv, u_widthuv);
+            p_destu += u_stride_out_uv;
+            p_destv += u_stride_out_uv;
+            p_srcu += u_stride_uv;
+            p_srcv += u_stride_uv;
+        }
+    }
+    else
+    {
+        /* black lines at the bottom of the image */
+        p_src = (unsigned char *) &plane_in[0].pac_data[plane_in[0].u_topleft];
+        p_srcu = (unsigned char *) &plane_in[1].pac_data[plane_in[1].u_topleft];
+        p_srcv = (unsigned char *) &plane_in[2].pac_data[plane_in[2].u_topleft];
+
+        /* copy from source image image */
+        for (j = (nb_black_lines >> 1); j != 0; j--)
+        {
+            M4OSA_memcpy((M4OSA_MemAddr8)p_dest, (M4OSA_MemAddr8)p_src, u_width);
+            p_dest += u_stride_out;
+            p_src += u_stride;
+            M4OSA_memcpy((M4OSA_MemAddr8)p_dest,(M4OSA_MemAddr8) p_src, u_width);
+            p_dest += u_stride_out;
+            p_src += u_stride;
+            M4OSA_memcpy((M4OSA_MemAddr8)p_destu,(M4OSA_MemAddr8) p_srcu, u_widthuv);
+            M4OSA_memcpy((M4OSA_MemAddr8)p_destv,(M4OSA_MemAddr8) p_srcv, u_widthuv);
+            p_destu += u_stride_out_uv;
+            p_destv += u_stride_out_uv;
+            p_srcu += u_stride_uv;
+            p_srcv += u_stride_uv;
+        }
+
+        /* write black lines*/
+        /* the pointers to p_dest, p_destu and p_destv are used through the two loops "for" */
+        for (j = (u_height - nb_black_lines) >> 1; j != 0; j--)
+        {
+            M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+            p_dest += u_stride_out;
+            M4OSA_memset((M4OSA_MemAddr8)p_dest, u_width, 0);
+            p_dest += u_stride_out;
+            M4OSA_memset((M4OSA_MemAddr8)p_destu, u_widthuv, 128);
+            M4OSA_memset((M4OSA_MemAddr8)p_destv, u_widthuv, 128);
+            p_destu += u_stride_out_uv;
+            p_destv += u_stride_out_uv;
+        }
+    }
+
+    return 0;
+}
+
+/**
+ *************************************************************************************************
+ * M4OSA_ERR M4VIFI_ImageBlendingonYUV420 (void *pUserData,
+ *                                                  M4VIFI_ImagePlane *pPlaneIn1,
+ *                                                  M4VIFI_ImagePlane *pPlaneIn2,
+ *                                                  M4VIFI_ImagePlane *pPlaneOut,
+ *                                                  UInt32 Progress)
+ * @brief   Blends two YUV 4:2:0 Planar images.
+ * @note    Blends YUV420 planar images,
+ *          Map the value of progress from (0 - 1000) to (0 - 1024)
+ *          Set the range of blendingfactor,
+ *                  1. from 0 to (Progress << 1)            ;for Progress <= 512
+ *                  2. from (( Progress - 512)<< 1) to 1024 ;otherwise
+ *          Set the increment of blendingfactor for each element in the image row by the factor,
+ *                  =  (Range-1) / (image width-1)  ;for width >= range
+ *                  =  (Range) / (image width)      ;otherwise
+ *          Loop on each(= i) row of output Y plane (steps of 2)
+ *              Loop on each(= j) column of output Y plane (steps of 2)
+ *                  Get four Y samples and one U & V sample from two input YUV4:2:0 images and
+ *                  Compute four Y sample and one U & V sample for output YUV4:2:0 image
+ *                      using the following,
+ *                  Out(i,j) = blendingfactor(i,j) * In1(i,j)+ (l - blendingfactor(i,j)) *In2(i,j)
+ *              end loop column
+ *          end loop row.
+ * @param   pUserData: (IN)  User Specific Parameter
+ * @param   pPlaneIn1: (IN)  Pointer to an array of image plane structures maintained
+ *           for Y, U and V planes.
+ * @param   pPlaneIn2: (IN)  Pointer to an array of image plane structures maintained
+ *           for Y, U and V planes.
+ * @param   pPlaneOut: (OUT) Pointer to an array of image plane structures maintained
+ *           for Y, U and V planes.
+ * @param   Progress:  (IN)  Progress value (varies between 0 and 1000)
+ * @return  M4VIFI_OK: No error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
+ *************************************************************************************************
+*/
+
+/** Check for value is EVEN */
+#ifndef IS_EVEN
+#define IS_EVEN(a)  (!(a & 0x01))
+#endif
+
+/** Used for fixed point implementation */
+#ifndef MAX_SHORT
+#define MAX_SHORT   0x10000
+#endif
+
+#ifndef NULL
+#define NULL    0
+#endif
+
+#ifndef FALSE
+#define FALSE   0
+#define TRUE    !FALSE
+#endif
+
+unsigned char M4VIFI_ImageBlendingonYUV420 (void *pUserData,
+                                            M4ViComImagePlane *pPlaneIn1,
+                                            M4ViComImagePlane *pPlaneIn2,
+                                            M4ViComImagePlane *pPlaneOut,
+                                            UInt32 Progress)
+{
+    UInt8    *pu8_data_Y_start1,*pu8_data_U_start1,*pu8_data_V_start1;
+    UInt8    *pu8_data_Y_start2,*pu8_data_U_start2,*pu8_data_V_start2;
+    UInt8    *pu8_data_Y_start3,*pu8_data_U_start3,*pu8_data_V_start3;
+    UInt8    *pu8_data_Y_current1, *pu8_data_Y_next1, *pu8_data_U1, *pu8_data_V1;
+    UInt8    *pu8_data_Y_current2, *pu8_data_Y_next2, *pu8_data_U2, *pu8_data_V2;
+    UInt8    *pu8_data_Y_current3,*pu8_data_Y_next3, *pu8_data_U3, *pu8_data_V3;
+    UInt32   u32_stride_Y1, u32_stride2_Y1, u32_stride_U1, u32_stride_V1;
+    UInt32   u32_stride_Y2, u32_stride2_Y2, u32_stride_U2, u32_stride_V2;
+    UInt32   u32_stride_Y3, u32_stride2_Y3, u32_stride_U3, u32_stride_V3;
+    UInt32   u32_height,  u32_width;
+    UInt32   u32_blendfactor, u32_startA, u32_endA, u32_blend_inc, u32_x_accum;
+    UInt32   u32_col, u32_row, u32_rangeA, u32_progress;
+    UInt32   u32_U1,u32_V1,u32_U2,u32_V2, u32_Y1, u32_Y2;
+
+
+    /* Check the Y plane height is EVEN and image plane heights are same */
+    if( (IS_EVEN(pPlaneIn1[0].u_height) == FALSE)                ||
+        (IS_EVEN(pPlaneIn2[0].u_height) == FALSE)                ||
+        (IS_EVEN(pPlaneOut[0].u_height) == FALSE)                ||
+        (pPlaneIn1[0].u_height != pPlaneOut[0].u_height)         ||
+        (pPlaneIn2[0].u_height != pPlaneOut[0].u_height) )
+    {
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+    }
+
+    /* Check the Y plane width is EVEN and image plane widths are same */
+    if( (IS_EVEN(pPlaneIn1[0].u_width) == FALSE)                 ||
+        (IS_EVEN(pPlaneIn2[0].u_width) == FALSE)                 ||
+        (IS_EVEN(pPlaneOut[0].u_width) == FALSE)                 ||
+        (pPlaneIn1[0].u_width  != pPlaneOut[0].u_width)          ||
+        (pPlaneIn2[0].u_width  != pPlaneOut[0].u_width)  )
+    {
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+    }
+
+    /* Set the pointer to the beginning of the input1 YUV420 image planes */
+    pu8_data_Y_start1 = pPlaneIn1[0].pac_data + pPlaneIn1[0].u_topleft;
+    pu8_data_U_start1 = pPlaneIn1[1].pac_data + pPlaneIn1[1].u_topleft;
+    pu8_data_V_start1 = pPlaneIn1[2].pac_data + pPlaneIn1[2].u_topleft;
+
+    /* Set the pointer to the beginning of the input2 YUV420 image planes */
+    pu8_data_Y_start2 = pPlaneIn2[0].pac_data + pPlaneIn2[0].u_topleft;
+    pu8_data_U_start2 = pPlaneIn2[1].pac_data + pPlaneIn2[1].u_topleft;
+    pu8_data_V_start2 = pPlaneIn2[2].pac_data + pPlaneIn2[2].u_topleft;
+
+    /* Set the pointer to the beginning of the output YUV420 image planes */
+    pu8_data_Y_start3 = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+    pu8_data_U_start3 = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+    pu8_data_V_start3 = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+    /* Set the stride for the next row in each input1 YUV420 plane */
+    u32_stride_Y1 = pPlaneIn1[0].u_stride;
+    u32_stride_U1 = pPlaneIn1[1].u_stride;
+    u32_stride_V1 = pPlaneIn1[2].u_stride;
+
+    /* Set the stride for the next row in each input2 YUV420 plane */
+    u32_stride_Y2 = pPlaneIn2[0].u_stride;
+    u32_stride_U2 = pPlaneIn2[1].u_stride;
+    u32_stride_V2 = pPlaneIn2[2].u_stride;
+
+    /* Set the stride for the next row in each output YUV420 plane */
+    u32_stride_Y3 = pPlaneOut[0].u_stride;
+    u32_stride_U3 = pPlaneOut[1].u_stride;
+    u32_stride_V3 = pPlaneOut[2].u_stride;
+
+    u32_stride2_Y1   = u32_stride_Y1 << 1;
+    u32_stride2_Y2   = u32_stride_Y2 << 1;
+    u32_stride2_Y3   = u32_stride_Y3 << 1;
+
+    /* Get the size of the output image */
+    u32_height = pPlaneOut[0].u_height;
+    u32_width  = pPlaneOut[0].u_width;
+
+    /* User Specified Progress value */
+    u32_progress = Progress;
+
+    /* Map Progress value from (0 - 1000) to (0 - 1024) -> for optimisation */
+    if(u32_progress < 1000)
+        u32_progress = ((u32_progress << 10) / 1000);
+    else
+        u32_progress = 1024;
+
+    /* Set the range of blendingfactor */
+    if(u32_progress <= 512)
+    {
+        u32_startA = 0;
+        u32_endA   = (u32_progress << 1);
+    }
+    else /* u32_progress > 512 */
+    {
+        u32_startA = (u32_progress - 512) << 1;
+        u32_endA   =  1024;
+    }
+    u32_rangeA = u32_endA - u32_startA;
+
+    /* Set the increment of blendingfactor for each element in the image row */
+    if ((u32_width >= u32_rangeA) && (u32_rangeA > 0) )
+    {
+        u32_blend_inc   = ((u32_rangeA-1) * MAX_SHORT) / (u32_width - 1);
+    }
+    else /* (u32_width < u32_rangeA) || (u32_rangeA < 0) */
+    {
+        u32_blend_inc   = (u32_rangeA * MAX_SHORT) / (u32_width);
+    }
+
+    /* Two YUV420 rows are computed at each pass */
+    for (u32_row = u32_height; u32_row != 0; u32_row -=2)
+    {
+        /* Set pointers to the beginning of the row for each input image1 plane */
+        pu8_data_Y_current1 = pu8_data_Y_start1;
+        pu8_data_U1 = pu8_data_U_start1;
+        pu8_data_V1 = pu8_data_V_start1;
+
+        /* Set pointers to the beginning of the row for each input image2 plane */
+        pu8_data_Y_current2 = pu8_data_Y_start2;
+        pu8_data_U2 = pu8_data_U_start2;
+        pu8_data_V2 = pu8_data_V_start2;
+
+        /* Set pointers to the beginning of the row for each output image plane */
+        pu8_data_Y_current3 = pu8_data_Y_start3;
+        pu8_data_U3 = pu8_data_U_start3;
+        pu8_data_V3 = pu8_data_V_start3;
+
+        /* Set pointers to the beginning of the next row for image luma plane */
+        pu8_data_Y_next1 = pu8_data_Y_current1 + u32_stride_Y1;
+        pu8_data_Y_next2 = pu8_data_Y_current2 + u32_stride_Y2;
+        pu8_data_Y_next3 = pu8_data_Y_current3 + u32_stride_Y3;
+
+        /* Initialise blendfactor */
+        u32_blendfactor   = u32_startA;
+        /* Blendfactor Increment accumulator */
+        u32_x_accum = 0;
+
+        /* Loop on each column of the output image */
+        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+        {
+            /* Update the blending factor */
+            u32_blendfactor = u32_startA + (u32_x_accum >> 16);
+
+            /* Get Luma value (x,y) of input Image1 */
+            u32_Y1 = *pu8_data_Y_current1++;
+
+            /* Get chrominance2 value */
+            u32_U1 = *pu8_data_U1++;
+            u32_V1 = *pu8_data_V1++;
+
+            /* Get Luma value (x,y) of input Image2 */
+            u32_Y2 = *pu8_data_Y_current2++;
+
+            /* Get chrominance2 value */
+            u32_U2 = *pu8_data_U2++;
+            u32_V2 = *pu8_data_V2++;
+
+            /* Compute Luma value (x,y) of Output image */
+            *pu8_data_Y_current3++  = (UInt8)((u32_blendfactor * u32_Y2 +
+                                                     (1024 - u32_blendfactor)*u32_Y1) >> 10);
+            /* Compute chroma(U) value of Output image */
+            *pu8_data_U3++          = (UInt8)((u32_blendfactor * u32_U2 +
+                                                     (1024 - u32_blendfactor)*u32_U1) >> 10);
+            /* Compute chroma(V) value of Output image */
+            *pu8_data_V3++          = (UInt8)((u32_blendfactor * u32_V2 +
+                                                     (1024 - u32_blendfactor)*u32_V1) >> 10);
+
+            /* Get Luma value (x,y+1) of input Image1 */
+            u32_Y1 = *pu8_data_Y_next1++;
+
+             /* Get Luma value (x,y+1) of input Image2 */
+            u32_Y2 = *pu8_data_Y_next2++;
+
+            /* Compute Luma value (x,y+1) of Output image*/
+            *pu8_data_Y_next3++ = (UInt8)((u32_blendfactor * u32_Y2 +
+                                                    (1024 - u32_blendfactor)*u32_Y1) >> 10);
+            /* Update accumulator */
+            u32_x_accum += u32_blend_inc;
+
+            /* Update the blending factor */
+            u32_blendfactor = u32_startA + (u32_x_accum >> 16);
+
+            /* Get Luma value (x+1,y) of input Image1 */
+            u32_Y1 = *pu8_data_Y_current1++;
+
+            /* Get Luma value (x+1,y) of input Image2 */
+            u32_Y2 = *pu8_data_Y_current2++;
+
+            /* Compute Luma value (x+1,y) of Output image*/
+            *pu8_data_Y_current3++ = (UInt8)((u32_blendfactor * u32_Y2 +
+                                                 (1024 - u32_blendfactor)*u32_Y1) >> 10);
+
+            /* Get Luma value (x+1,y+1) of input Image1 */
+            u32_Y1 = *pu8_data_Y_next1++;
+
+            /* Get Luma value (x+1,y+1) of input Image2 */
+            u32_Y2 = *pu8_data_Y_next2++;
+
+            /* Compute Luma value (x+1,y+1) of Output image*/
+            *pu8_data_Y_next3++ = (UInt8)((u32_blendfactor * u32_Y2 +
+                                                 (1024 - u32_blendfactor)*u32_Y1) >> 10);
+            /* Update accumulator */
+            u32_x_accum += u32_blend_inc;
+
+            /* Working pointers are incremented just after each storage */
+
+        }/* End of row scanning */
+
+        /* Update working pointer of input image1 for next row */
+        pu8_data_Y_start1 += u32_stride2_Y1;
+        pu8_data_U_start1 += u32_stride_U1;
+        pu8_data_V_start1 += u32_stride_V1;
+
+        /* Update working pointer of input image2 for next row */
+        pu8_data_Y_start2 += u32_stride2_Y2;
+        pu8_data_U_start2 += u32_stride_U2;
+        pu8_data_V_start2 += u32_stride_V2;
+
+        /* Update working pointer of output image for next row */
+        pu8_data_Y_start3 += u32_stride2_Y3;
+        pu8_data_U_start3 += u32_stride_U3;
+        pu8_data_V_start3 += u32_stride_V3;
+
+    }/* End of column scanning */
+
+    return M4VIFI_OK;
+}
+/* End of file M4VIFI_ImageBlendingonYUV420.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c
new file mode 100755
index 0000000..f972ba5
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_BGR565toYUV420.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ****************************************************************************************
+ * @file     M4VIFI_BGR565toYUV420.c
+ * @brief    Contain video library function
+ * @note     Color Conversion Filter
+ *           -# Contains the format conversion filters from BGR565 to YUV420
+ ****************************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include    "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include    "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include    "M4VIFI_Clip.h"
+
+/**
+ *****************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_BGR565toYUV420 (void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ *                                                     M4VIFI_ImagePlane *pPlaneOut)
+ * @brief   Transform BGR565 image to a YUV420 image.
+ * @note    Convert BGR565 to YUV420,
+ *          Loop on each row ( 2 rows by 2 rows )
+ *              Loop on each column ( 2 col by 2 col )
+ *                  Get 4 BGR samples from input data and build 4 output Y samples
+ *                  and each single U & V data
+ *              end loop on col
+ *          end loop on row
+ * @param   pUserData: (IN) User Specific Data
+ * @param   pPlaneIn: (IN) Pointer to BGR565 Plane
+ * @param   pPlaneOut: (OUT) Pointer to  YUV420 buffer Plane
+ * @return  M4VIFI_OK: there is no error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
+ *****************************************************************************************
+*/
+
+M4VIFI_UInt8    M4VIFI_BGR565toYUV420(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+                                                      M4VIFI_ImagePlane *pPlaneOut)
+{
+    M4VIFI_UInt32   u32_width, u32_height;
+    M4VIFI_UInt32   u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V;
+    M4VIFI_UInt32   u32_stride_bgr, u32_stride_2bgr;
+    M4VIFI_UInt32   u32_col, u32_row;
+
+    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
+    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
+    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
+    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
+    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
+    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
+    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
+    M4VIFI_UInt8    *pu8_bgrn_data, *pu8_bgrn;
+    M4VIFI_UInt16   u16_pix1, u16_pix2, u16_pix3, u16_pix4;
+
+    /* Check planes height are appropriate */
+    if( (pPlaneIn->u_height != pPlaneOut[0].u_height)           ||
+        (pPlaneOut[0].u_height != (pPlaneOut[1].u_height<<1))   ||
+        (pPlaneOut[0].u_height != (pPlaneOut[2].u_height<<1)))
+    {
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+    }
+
+    /* Check planes width are appropriate */
+    if( (pPlaneIn->u_width != pPlaneOut[0].u_width)         ||
+        (pPlaneOut[0].u_width != (pPlaneOut[1].u_width<<1)) ||
+        (pPlaneOut[0].u_width != (pPlaneOut[2].u_width<<1)))
+    {
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+    }
+
+    /* Set the pointer to the beginning of the output data buffers */
+    pu8_y_data  = pPlaneOut[0].pac_data + pPlaneOut[0].u_topleft;
+    pu8_u_data  = pPlaneOut[1].pac_data + pPlaneOut[1].u_topleft;
+    pu8_v_data  = pPlaneOut[2].pac_data + pPlaneOut[2].u_topleft;
+
+    /* Set the pointer to the beginning of the input data buffers */
+    pu8_bgrn_data   = pPlaneIn->pac_data + pPlaneIn->u_topleft;
+
+    /* Get the size of the output image */
+    u32_width   = pPlaneOut[0].u_width;
+    u32_height  = pPlaneOut[0].u_height;
+
+    /* Set the size of the memory jumps corresponding to row jump in each output plane */
+    u32_stride_Y = pPlaneOut[0].u_stride;
+    u32_stride2_Y = u32_stride_Y << 1;
+    u32_stride_U = pPlaneOut[1].u_stride;
+    u32_stride_V = pPlaneOut[2].u_stride;
+
+    /* Set the size of the memory jumps corresponding to row jump in input plane */
+    u32_stride_bgr = pPlaneIn->u_stride;
+    u32_stride_2bgr = u32_stride_bgr << 1;
+
+    /* Loop on each row of the output image, input coordinates are estimated from output ones */
+    /* Two YUV rows are computed at each pass */
+    for (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+    {
+        /* Current Y plane row pointers */
+        pu8_yn = pu8_y_data;
+        /* Next Y plane row pointers */
+        pu8_ys = pu8_yn + u32_stride_Y;
+        /* Current U plane row pointer */
+        pu8_u = pu8_u_data;
+        /* Current V plane row pointer */
+        pu8_v = pu8_v_data;
+
+        pu8_bgrn = pu8_bgrn_data;
+
+        /* Loop on each column of the output image */
+        for (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+        {
+            /* Get four BGR 565 samples from input data */
+            u16_pix1 = *( (M4VIFI_UInt16 *) pu8_bgrn);
+            u16_pix2 = *( (M4VIFI_UInt16 *) (pu8_bgrn + CST_RGB_16_SIZE));
+            u16_pix3 = *( (M4VIFI_UInt16 *) (pu8_bgrn + u32_stride_bgr));
+            u16_pix4 = *( (M4VIFI_UInt16 *) (pu8_bgrn + u32_stride_bgr + CST_RGB_16_SIZE));
+            /* Unpack RGB565 to 8bit R, G, B */
+            /* (x,y) */
+            GET_BGR565(i32_b00, i32_g00, i32_r00, u16_pix1);
+            /* (x+1,y) */
+            GET_BGR565(i32_b10, i32_g10, i32_r10, u16_pix2);
+            /* (x,y+1) */
+            GET_BGR565(i32_b01, i32_g01, i32_r01, u16_pix3);
+            /* (x+1,y+1) */
+            GET_BGR565(i32_b11, i32_g11, i32_r11, u16_pix4);
+
+            /* Convert BGR value to YUV */
+            i32_u00 = U16(i32_r00, i32_g00, i32_b00);
+            i32_v00 = V16(i32_r00, i32_g00, i32_b00);
+            /* luminance value */
+            i32_y00 = Y16(i32_r00, i32_g00, i32_b00);
+
+            i32_u10 = U16(i32_r10, i32_g10, i32_b10);
+            i32_v10 = V16(i32_r10, i32_g10, i32_b10);
+            /* luminance value */
+            i32_y10 = Y16(i32_r10, i32_g10, i32_b10);
+
+            i32_u01 = U16(i32_r01, i32_g01, i32_b01);
+            i32_v01 = V16(i32_r01, i32_g01, i32_b01);
+            /* luminance value */
+            i32_y01 = Y16(i32_r01, i32_g01, i32_b01);
+
+            i32_u11 = U16(i32_r11, i32_g11, i32_b11);
+            i32_v11 = V16(i32_r11, i32_g11, i32_b11);
+            /* luminance value */
+            i32_y11 = Y16(i32_r11, i32_g11, i32_b11);
+
+            /* Store luminance data */
+            pu8_yn[0] = (M4VIFI_UInt8)i32_y00;
+            pu8_yn[1] = (M4VIFI_UInt8)i32_y10;
+            pu8_ys[0] = (M4VIFI_UInt8)i32_y01;
+            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+
+            /* Store chroma data */
+            *pu8_u = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+            *pu8_v = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+
+            /* Prepare for next column */
+            pu8_bgrn += (CST_RGB_16_SIZE<<1);
+            /* Update current Y plane line pointer*/
+            pu8_yn += 2;
+            /* Update next Y plane line pointer*/
+            pu8_ys += 2;
+            /* Update U plane line pointer*/
+            pu8_u ++;
+            /* Update V plane line pointer*/
+            pu8_v ++;
+        } /* End of horizontal scanning */
+
+        /* Prepare pointers for the next row */
+        pu8_y_data      += u32_stride2_Y;
+        pu8_u_data      += u32_stride_U;
+        pu8_v_data      += u32_stride_V;
+        pu8_bgrn_data   += u32_stride_2bgr;
+
+    } /* End of vertical scanning */
+
+    return M4VIFI_OK;
+}
+/* End of file M4VIFI_BGR565toYUV420.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c b/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c
new file mode 100755
index 0000000..800ee1e
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_Clip.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file     M4VIFI_Clip.c
+ * @brief    Management of the RGB Clipping matrix inclusion and Division Table
+ * @note     -# Clipping Matrix is used in order to properly manage the inclusion of
+ *           the external RGB Clipping matrix used for color conversion.
+ *           This file HAS TO BE compiled with all color conversion filters project
+ *           -# Division table is used in RGB to HLS color conversion
+ *           Important: This file must be compiled during the assembly library building
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include    "M4VIFI_FiltersAPI.h"
+
+CNST M4VIFI_UInt8   M4VIFI_ClipTable[1256]
+= {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03,
+0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
+0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13,
+0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b,
+0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
+0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33,
+0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
+0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43,
+0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
+0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53,
+0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b,
+0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63,
+0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b,
+0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73,
+0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b,
+0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83,
+0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b,
+0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93,
+0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b,
+0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3,
+0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab,
+0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3,
+0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb,
+0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3,
+0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb,
+0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3,
+0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb,
+0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3,
+0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb,
+0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3,
+0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb,
+0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+};
+
+/* Division table for ( 65535/x ); x = 0 to 512 */
+CNST M4VIFI_UInt16  M4VIFI_DivTable[512]
+= {
+0, 65535, 32768, 21845, 16384, 13107, 10922, 9362,
+8192, 7281, 6553, 5957, 5461, 5041, 4681, 4369,
+4096, 3855, 3640, 3449, 3276, 3120, 2978, 2849,
+2730, 2621, 2520, 2427, 2340, 2259, 2184, 2114,
+2048, 1985, 1927, 1872, 1820, 1771, 1724, 1680,
+1638, 1598, 1560, 1524, 1489, 1456, 1424, 1394,
+1365, 1337, 1310, 1285, 1260, 1236, 1213, 1191,
+1170, 1149, 1129, 1110, 1092, 1074, 1057, 1040,
+1024, 1008, 992, 978, 963, 949, 936, 923,
+910, 897, 885, 873, 862, 851, 840, 829,
+819, 809, 799, 789, 780, 771, 762, 753,
+744, 736, 728, 720, 712, 704, 697, 689,
+682, 675, 668, 661, 655, 648, 642, 636,
+630, 624, 618, 612, 606, 601, 595, 590,
+585, 579, 574, 569, 564, 560, 555, 550,
+546, 541, 537, 532, 528, 524, 520, 516,
+512, 508, 504, 500, 496, 492, 489, 485,
+481, 478, 474, 471, 468, 464, 461, 458,
+455, 451, 448, 445, 442, 439, 436, 434,
+431, 428, 425, 422, 420, 417, 414, 412,
+409, 407, 404, 402, 399, 397, 394, 392,
+390, 387, 385, 383, 381, 378, 376, 374,
+372, 370, 368, 366, 364, 362, 360, 358,
+356, 354, 352, 350, 348, 346, 344, 343,
+341, 339, 337, 336, 334, 332, 330, 329,
+327, 326, 324, 322, 321, 319, 318, 316,
+315, 313, 312, 310, 309, 307, 306, 304,
+303, 302, 300, 299, 297, 296, 295, 293,
+292, 291, 289, 288, 287, 286, 284, 283,
+282, 281, 280, 278, 277, 276, 275, 274,
+273, 271, 270, 269, 268, 267, 266, 265,
+264, 263, 262, 261, 260, 259, 258, 257,
+256, 255, 254, 253, 252, 251, 250, 249,
+248, 247, 246, 245, 244, 243, 242, 241,
+240, 240, 239, 238, 237, 236, 235, 234,
+234, 233, 232, 231, 230, 229, 229, 228,
+227, 226, 225, 225, 224, 223, 222, 222,
+221, 220, 219, 219, 218, 217, 217, 216,
+215, 214, 214, 213, 212, 212, 211, 210,
+210, 209, 208, 208, 207, 206, 206, 205,
+204, 204, 203, 202, 202, 201, 201, 200,
+199, 199, 198, 197, 197, 196, 196, 195,
+195, 194, 193, 193, 192, 192, 191, 191,
+190, 189, 189, 188, 188, 187, 187, 186,
+186, 185, 185, 184, 184, 183, 183, 182,
+182, 181, 181, 180, 180, 179, 179, 178,
+178, 177, 177, 176, 176, 175, 175, 174,
+174, 173, 173, 172, 172, 172, 171, 171,
+170, 170, 169, 169, 168, 168, 168, 167,
+167, 166, 166, 165, 165, 165, 164, 164,
+163, 163, 163, 162, 162, 161, 161, 161,
+160, 160, 159, 159, 159, 158, 158, 157,
+157, 157, 156, 156, 156, 155, 155, 154,
+154, 154, 153, 153, 153, 152, 152, 152,
+151, 151, 151, 150, 150, 149, 149, 149,
+148, 148, 148, 147, 147, 147, 146, 146,
+146, 145, 145, 145, 144, 144, 144, 144,
+143, 143, 143, 142, 142, 142, 141, 141,
+141, 140, 140, 140, 140, 139, 139, 139,
+138, 138, 138, 137, 137, 137, 137, 136,
+136, 136, 135, 135, 135, 135, 134, 134,
+134, 134, 133, 133, 133, 132, 132, 132,
+132, 131, 131, 131, 131, 130, 130, 130,
+130, 129, 129, 129, 129, 128, 128, 128
+};
+
+CNST M4VIFI_Int32  const_storage1[8]
+= {
+0x00002568, 0x00003343,0x00000649,0x00000d0f, 0x0000D86C, 0x0000D83B, 0x00010000, 0x00010000
+};
+
+CNST M4VIFI_Int32  const_storage[8]
+= {
+0x00002568, 0x00003343, 0x1BF800, 0x00000649, 0x00000d0f, 0x110180, 0x40cf, 0x22BE00
+};
+
+
+CNST M4VIFI_UInt16  *M4VIFI_DivTable_zero
+ = &M4VIFI_DivTable[0];
+
+CNST M4VIFI_UInt8   *M4VIFI_ClipTable_zero
+ = &M4VIFI_ClipTable[500];
+
+
+/* End of file M4VIFI_Clip.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c
new file mode 100755
index 0000000..d40a488
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_RGB888toYUV420.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include    "M4VIFI_FiltersAPI.h"
+
+#include    "M4VIFI_Defines.h"
+
+#include    "M4VIFI_Clip.h"
+
+/***************************************************************************
+Proto:
+M4VIFI_UInt8    M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn,
+                                     M4VIFI_ImagePlane PlaneOut[3]);
+Purpose:    filling of the YUV420 plane from a BGR24 plane
+Abstract:    Loop on each row ( 2 rows by 2 rows )
+                Loop on each column ( 2 col by 2 col )
+                    Get 4 BGR samples from input data and build 4 output Y samples and
+                    each single U & V data
+                end loop on col
+            end loop on row
+
+In:            RGB24 plane
+InOut:        none
+Out:        array of 3 M4VIFI_ImagePlane structures
+Modified:    ML: RGB function modified to BGR.
+***************************************************************************/
+M4VIFI_UInt8 M4VIFI_RGB888toYUV420(void *pUserData, M4VIFI_ImagePlane *PlaneIn,
+                                    M4VIFI_ImagePlane PlaneOut[3])
+{
+    M4VIFI_UInt32    u32_width, u32_height;
+    M4VIFI_UInt32    u32_stride_Y, u32_stride2_Y, u32_stride_U, u32_stride_V, u32_stride_rgb,\
+                     u32_stride_2rgb;
+    M4VIFI_UInt32    u32_col, u32_row;
+
+    M4VIFI_Int32    i32_r00, i32_r01, i32_r10, i32_r11;
+    M4VIFI_Int32    i32_g00, i32_g01, i32_g10, i32_g11;
+    M4VIFI_Int32    i32_b00, i32_b01, i32_b10, i32_b11;
+    M4VIFI_Int32    i32_y00, i32_y01, i32_y10, i32_y11;
+    M4VIFI_Int32    i32_u00, i32_u01, i32_u10, i32_u11;
+    M4VIFI_Int32    i32_v00, i32_v01, i32_v10, i32_v11;
+    M4VIFI_UInt8    *pu8_yn, *pu8_ys, *pu8_u, *pu8_v;
+    M4VIFI_UInt8    *pu8_y_data, *pu8_u_data, *pu8_v_data;
+    M4VIFI_UInt8    *pu8_rgbn_data, *pu8_rgbn;
+
+    /* check sizes */
+    if( (PlaneIn->u_height != PlaneOut[0].u_height)            ||
+        (PlaneOut[0].u_height != (PlaneOut[1].u_height<<1))    ||
+        (PlaneOut[0].u_height != (PlaneOut[2].u_height<<1)))
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+
+    if( (PlaneIn->u_width != PlaneOut[0].u_width)        ||
+        (PlaneOut[0].u_width != (PlaneOut[1].u_width<<1))    ||
+        (PlaneOut[0].u_width != (PlaneOut[2].u_width<<1)))
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+
+
+    /* set the pointer to the beginning of the output data buffers */
+    pu8_y_data    = PlaneOut[0].pac_data + PlaneOut[0].u_topleft;
+    pu8_u_data    = PlaneOut[1].pac_data + PlaneOut[1].u_topleft;
+    pu8_v_data    = PlaneOut[2].pac_data + PlaneOut[2].u_topleft;
+
+    /* idem for input buffer */
+    pu8_rgbn_data    = PlaneIn->pac_data + PlaneIn->u_topleft;
+
+    /* get the size of the output image */
+    u32_width    = PlaneOut[0].u_width;
+    u32_height    = PlaneOut[0].u_height;
+
+    /* set the size of the memory jumps corresponding to row jump in each output plane */
+    u32_stride_Y = PlaneOut[0].u_stride;
+    u32_stride2_Y= u32_stride_Y << 1;
+    u32_stride_U = PlaneOut[1].u_stride;
+    u32_stride_V = PlaneOut[2].u_stride;
+
+    /* idem for input plane */
+    u32_stride_rgb = PlaneIn->u_stride;
+    u32_stride_2rgb = u32_stride_rgb << 1;
+
+    /* loop on each row of the output image, input coordinates are estimated from output ones */
+    /* two YUV rows are computed at each pass */
+    for    (u32_row = u32_height ;u32_row != 0; u32_row -=2)
+    {
+        /* update working pointers */
+        pu8_yn    = pu8_y_data;
+        pu8_ys    = pu8_yn + u32_stride_Y;
+
+        pu8_u    = pu8_u_data;
+        pu8_v    = pu8_v_data;
+
+        pu8_rgbn= pu8_rgbn_data;
+
+        /* loop on each column of the output image*/
+        for    (u32_col = u32_width; u32_col != 0 ; u32_col -=2)
+        {
+            /* get RGB samples of 4 pixels */
+            GET_RGB24(i32_r00, i32_g00, i32_b00, pu8_rgbn, 0);
+            GET_RGB24(i32_r10, i32_g10, i32_b10, pu8_rgbn, CST_RGB_24_SIZE);
+            GET_RGB24(i32_r01, i32_g01, i32_b01, pu8_rgbn, u32_stride_rgb);
+            GET_RGB24(i32_r11, i32_g11, i32_b11, pu8_rgbn, u32_stride_rgb + CST_RGB_24_SIZE);
+
+            i32_u00    = U24(i32_r00, i32_g00, i32_b00);
+            i32_v00    = V24(i32_r00, i32_g00, i32_b00);
+            i32_y00    = Y24(i32_r00, i32_g00, i32_b00);        /* matrix luminance */
+            pu8_yn[0]= (M4VIFI_UInt8)i32_y00;
+
+            i32_u10    = U24(i32_r10, i32_g10, i32_b10);
+            i32_v10    = V24(i32_r10, i32_g10, i32_b10);
+            i32_y10    = Y24(i32_r10, i32_g10, i32_b10);
+            pu8_yn[1]= (M4VIFI_UInt8)i32_y10;
+
+            i32_u01    = U24(i32_r01, i32_g01, i32_b01);
+            i32_v01    = V24(i32_r01, i32_g01, i32_b01);
+            i32_y01    = Y24(i32_r01, i32_g01, i32_b01);
+            pu8_ys[0]= (M4VIFI_UInt8)i32_y01;
+
+            i32_u11    = U24(i32_r11, i32_g11, i32_b11);
+            i32_v11    = V24(i32_r11, i32_g11, i32_b11);
+            i32_y11    = Y24(i32_r11, i32_g11, i32_b11);
+            pu8_ys[1] = (M4VIFI_UInt8)i32_y11;
+
+            *pu8_u    = (M4VIFI_UInt8)((i32_u00 + i32_u01 + i32_u10 + i32_u11 + 2) >> 2);
+            *pu8_v    = (M4VIFI_UInt8)((i32_v00 + i32_v01 + i32_v10 + i32_v11 + 2) >> 2);
+
+            pu8_rgbn    +=  (CST_RGB_24_SIZE<<1);
+            pu8_yn        += 2;
+            pu8_ys        += 2;
+
+            pu8_u ++;
+            pu8_v ++;
+        } /* end of horizontal scanning */
+
+        pu8_y_data        += u32_stride2_Y;
+        pu8_u_data        += u32_stride_U;
+        pu8_v_data        += u32_stride_V;
+        pu8_rgbn_data    += u32_stride_2rgb;
+
+
+    } /* End of vertical scanning */
+
+    return M4VIFI_OK;
+}
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c
new file mode 100755
index 0000000..a21e1d0
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeRGB888toRGB888.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file     M4VIFI_ResizeYUV420toYUV420.c
+ * @brief    Contain video library function
+ * @note     This file has a Resize filter function
+ *           -# Generic resizing of YUV420 (Planar) image
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include    "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include    "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include    "M4VIFI_Clip.h"
+
+/**
+ ***********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData, M4VIFI_ImagePlane *pPlaneIn,
+ *                                                                  M4VIFI_ImagePlane *pPlaneOut)
+ * @brief   Resizes YUV420 Planar plane.
+ * @note    Basic structure of the function
+ *          Loop on each row (step 2)
+ *              Loop on each column (step 2)
+ *                  Get four Y samples and 1 U & V sample
+ *                  Resize the Y with corresponing U and V samples
+ *                  Place the YUV in the ouput plane
+ *              end loop column
+ *          end loop row
+ *          For resizing bilinear interpolation linearly interpolates along
+ *          each row, and then uses that result in a linear interpolation down each column.
+ *          Each estimated pixel in the output image is a weighted
+ *          combination of its four neighbours. The ratio of compression
+ *          or dilatation is estimated using input and output sizes.
+ * @param   pUserData: (IN) User Data
+ * @param   pPlaneIn: (IN) Pointer to YUV420 (Planar) plane buffer
+ * @param   pPlaneOut: (OUT) Pointer to YUV420 (Planar) plane
+ * @return  M4VIFI_OK: there is no error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: Error in height
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  Error in width
+ ***********************************************************************************************
+*/
+M4VIFI_UInt8    M4VIFI_ResizeBilinearRGB888toRGB888(void *pUserData,
+                                                                M4VIFI_ImagePlane *pPlaneIn,
+                                                                M4VIFI_ImagePlane *pPlaneOut)
+{
+    M4VIFI_UInt8    *pu8_data_in;
+    M4VIFI_UInt8    *pu8_data_out;
+    M4VIFI_UInt32   u32_width_in, u32_width_out, u32_height_in, u32_height_out;
+    M4VIFI_UInt32   u32_stride_in, u32_stride_out;
+    M4VIFI_UInt32   u32_x_inc, u32_y_inc;
+    M4VIFI_UInt32   u32_x_accum, u32_y_accum, u32_x_accum_start;
+    M4VIFI_UInt32   u32_width, u32_height;
+    M4VIFI_UInt32   u32_y_frac;
+    M4VIFI_UInt32   u32_x_frac;
+    M4VIFI_UInt32   u32_Rtemp_value,u32_Gtemp_value,u32_Btemp_value;
+    M4VIFI_UInt8    *pu8_src_top;
+    M4VIFI_UInt8    *pu8_src_bottom;
+    M4VIFI_UInt32    i32_b00, i32_g00, i32_r00;
+    M4VIFI_UInt32    i32_b01, i32_g01, i32_r01;
+    M4VIFI_UInt32    i32_b02, i32_g02, i32_r02;
+    M4VIFI_UInt32    i32_b03, i32_g03, i32_r03;
+
+    /* Check for the YUV width and height are even */
+    if( (IS_EVEN(pPlaneIn->u_height) == FALSE)    ||
+        (IS_EVEN(pPlaneOut->u_height) == FALSE))
+    {
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+    }
+
+    if( (IS_EVEN(pPlaneIn->u_width) == FALSE) ||
+        (IS_EVEN(pPlaneOut->u_width) == FALSE))
+    {
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+    }
+
+
+        /* Set the working pointers at the beginning of the input/output data field */
+        pu8_data_in     = (M4VIFI_UInt8*)(pPlaneIn->pac_data + pPlaneIn->u_topleft);
+        pu8_data_out    = (M4VIFI_UInt8*)(pPlaneOut->pac_data + pPlaneOut->u_topleft);
+
+        /* Get the memory jump corresponding to a row jump */
+        u32_stride_in   = pPlaneIn->u_stride;
+        u32_stride_out  = pPlaneOut->u_stride;
+
+        /* Set the bounds of the active image */
+        u32_width_in    = pPlaneIn->u_width;
+        u32_height_in   = pPlaneIn->u_height;
+
+        u32_width_out   = pPlaneOut->u_width;
+        u32_height_out  = pPlaneOut->u_height;
+
+        /* Compute horizontal ratio between src and destination width.*/
+        if (u32_width_out >= u32_width_in)
+        {
+            u32_x_inc   = ((u32_width_in-1) * MAX_SHORT) / (u32_width_out-1);
+        }
+        else
+        {
+            u32_x_inc   = (u32_width_in * MAX_SHORT) / (u32_width_out);
+        }
+
+        /* Compute vertical ratio between src and destination height.*/
+        if (u32_height_out >= u32_height_in)
+        {
+            u32_y_inc   = ((u32_height_in - 1) * MAX_SHORT) / (u32_height_out-1);
+        }
+        else
+        {
+            u32_y_inc = (u32_height_in * MAX_SHORT) / (u32_height_out);
+        }
+
+        /*
+        Calculate initial accumulator value : u32_y_accum_start.
+        u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+        */
+        if (u32_y_inc >= MAX_SHORT)
+        {
+            /*
+                Keep the fractionnal part, assimung that integer  part is coded
+                on the 16 high bits and the fractionnal on the 15 low bits
+            */
+            u32_y_accum = u32_y_inc & 0xffff;
+
+            if (!u32_y_accum)
+            {
+                u32_y_accum = MAX_SHORT;
+            }
+
+            u32_y_accum >>= 1;
+        }
+        else
+        {
+            u32_y_accum = 0;
+        }
+
+
+        /*
+            Calculate initial accumulator value : u32_x_accum_start.
+            u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+        */
+        if (u32_x_inc >= MAX_SHORT)
+        {
+            u32_x_accum_start = u32_x_inc & 0xffff;
+
+            if (!u32_x_accum_start)
+            {
+                u32_x_accum_start = MAX_SHORT;
+            }
+
+            u32_x_accum_start >>= 1;
+        }
+        else
+        {
+            u32_x_accum_start = 0;
+        }
+
+        u32_height = u32_height_out;
+
+        /*
+        Bilinear interpolation linearly interpolates along each row, and then uses that
+        result in a linear interpolation donw each column. Each estimated pixel in the
+        output image is a weighted combination of its four neighbours according to the formula:
+        F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+        with  R(x) = / x+1  -1 =< x =< 0 \ 1-x  0 =< x =< 1 and a (resp. b)weighting coefficient
+        is the distance from the nearest neighbor in the p (resp. q) direction
+        */
+
+        do { /* Scan all the row */
+
+            /* Vertical weight factor */
+            u32_y_frac = (u32_y_accum>>12)&15;
+
+            /* Reinit accumulator */
+            u32_x_accum = u32_x_accum_start;
+
+            u32_width = u32_width_out;
+
+            do { /* Scan along each row */
+                pu8_src_top = pu8_data_in + (u32_x_accum >> 16)*3;
+                pu8_src_bottom = pu8_src_top + (u32_stride_in);
+                u32_x_frac = (u32_x_accum >> 12)&15; /* Horizontal weight factor */
+
+                /* Weighted combination */
+                GET_RGB24(i32_b00,i32_g00,i32_r00,pu8_src_top,0);
+                GET_RGB24(i32_b01,i32_g01,i32_r01,pu8_src_top,3);
+                GET_RGB24(i32_b02,i32_g02,i32_r02,pu8_src_bottom,0);
+                GET_RGB24(i32_b03,i32_g03,i32_r03,pu8_src_bottom,3);
+
+                u32_Rtemp_value = (M4VIFI_UInt8)(((i32_r00*(16-u32_x_frac) +
+                                 i32_r01*u32_x_frac)*(16-u32_y_frac) +
+                                (i32_r02*(16-u32_x_frac) +
+                                 i32_r03*u32_x_frac)*u32_y_frac )>>8);
+
+                u32_Gtemp_value = (M4VIFI_UInt8)(((i32_g00*(16-u32_x_frac) +
+                                 i32_g01*u32_x_frac)*(16-u32_y_frac) +
+                                (i32_g02*(16-u32_x_frac) +
+                                 i32_g03*u32_x_frac)*u32_y_frac )>>8);
+
+                u32_Btemp_value =  (M4VIFI_UInt8)(((i32_b00*(16-u32_x_frac) +
+                                 i32_b01*u32_x_frac)*(16-u32_y_frac) +
+                                (i32_b02*(16-u32_x_frac) +
+                                 i32_b03*u32_x_frac)*u32_y_frac )>>8);
+
+                *pu8_data_out++ = u32_Btemp_value ;
+                *pu8_data_out++ = u32_Gtemp_value ;
+                *pu8_data_out++ = u32_Rtemp_value ;
+
+                /* Update horizontal accumulator */
+                u32_x_accum += u32_x_inc;
+
+            } while(--u32_width);
+
+            //pu16_data_out = pu16_data_out + (u32_stride_out>>1) - (u32_width_out);
+
+            /* Update vertical accumulator */
+            u32_y_accum += u32_y_inc;
+            if (u32_y_accum>>16)
+            {
+                pu8_data_in = pu8_data_in + (u32_y_accum >> 16) * (u32_stride_in) ;
+                u32_y_accum &= 0xffff;
+            }
+        } while(--u32_height);
+
+    return M4VIFI_OK;
+}
+/* End of file M4VIFI_ResizeRGB565toRGB565.c */
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c
new file mode 100755
index 0000000..10c0a43
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoBGR565.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file     M4VIFI_ResizeYUV420toBGR565.c
+ * @brief    Contain video library function
+ * @note     This file has a Combo filter function
+ *           -# Resizes YUV420 and converts to RGR565 with rotation
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include    "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include    "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include    "M4VIFI_Clip.h"
+
+/**
+ *********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toBGR565(void *pContext, M4VIFI_ImagePlane *pPlaneIn,
+ *                                                                  M4VIFI_ImagePlane *pPlaneOut)
+ * @brief   Resize YUV420 plane and converts to BGR565 with +90 rotation.
+ * @note    Basic sturture of the function
+ *          Loop on each row (step 2)
+ *              Loop on each column (step 2)
+ *                  Get four Y samples and 1 u & V sample
+ *                  Resize the Y with corresponing U and V samples
+ *                  Compute the four corresponding R G B values
+ *                  Place the R G B in the ouput plane in rotated fashion
+ *              end loop column
+ *          end loop row
+ *          For resizing bilinear interpolation linearly interpolates along
+ *          each row, and then uses that result in a linear interpolation down each column.
+ *          Each estimated pixel in the output image is a weighted
+ *          combination of its four neighbours. The ratio of compression
+ *          or dilatation is estimated using input and output sizes.
+ * @param   pPlaneIn: (IN) Pointer to YUV plane buffer
+ * @param   pContext: (IN) Context Pointer
+ * @param   pPlaneOut: (OUT) Pointer to BGR565 Plane
+ * @return  M4VIFI_OK: there is no error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
+ *********************************************************************************************
+*/
+M4VIFI_UInt8    M4VIFI_ResizeBilinearYUV420toBGR565(void* pContext,
+                                                                 M4VIFI_ImagePlane *pPlaneIn,
+                                                                 M4VIFI_ImagePlane *pPlaneOut)
+{
+    M4VIFI_UInt8    *pu8_data_in[PLANES], *pu8_data_in1[PLANES],*pu8_data_out;
+    M4VIFI_UInt32   *pu32_rgb_data_current, *pu32_rgb_data_next, *pu32_rgb_data_start;
+
+    M4VIFI_UInt32   u32_width_in[PLANES], u32_width_out, u32_height_in[PLANES], u32_height_out;
+    M4VIFI_UInt32   u32_stride_in[PLANES];
+    M4VIFI_UInt32   u32_stride_out, u32_stride2_out, u32_width2_RGB, u32_height2_RGB;
+    M4VIFI_UInt32   u32_x_inc[PLANES], u32_y_inc[PLANES];
+    M4VIFI_UInt32   u32_x_accum_Y, u32_x_accum_U, u32_x_accum_start;
+    M4VIFI_UInt32   u32_y_accum_Y, u32_y_accum_U;
+    M4VIFI_UInt32   u32_x_frac_Y, u32_x_frac_U, u32_y_frac_Y,u32_y_frac_U;
+    M4VIFI_Int32    U_32, V_32, Y_32, Yval_32;
+    M4VIFI_UInt8    u8_Red, u8_Green, u8_Blue;
+    M4VIFI_UInt32   u32_row, u32_col;
+
+    M4VIFI_UInt32   u32_plane;
+    M4VIFI_UInt32   u32_rgb_temp1, u32_rgb_temp2;
+    M4VIFI_UInt32   u32_rgb_temp3,u32_rgb_temp4;
+    M4VIFI_UInt32   u32_check_size;
+
+    M4VIFI_UInt8    *pu8_src_top_Y,*pu8_src_top_U,*pu8_src_top_V ;
+    M4VIFI_UInt8    *pu8_src_bottom_Y, *pu8_src_bottom_U, *pu8_src_bottom_V;
+
+    /* Check for the YUV width and height are even */
+    u32_check_size = IS_EVEN(pPlaneIn[0].u_height);
+    if( u32_check_size == FALSE )
+    {
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+    }
+    u32_check_size = IS_EVEN(pPlaneIn[0].u_width);
+    if (u32_check_size == FALSE )
+    {
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+
+    }
+    /* Make the ouput width and height as even */
+    pPlaneOut->u_height = pPlaneOut->u_height & 0xFFFFFFFE;
+    pPlaneOut->u_width = pPlaneOut->u_width & 0xFFFFFFFE;
+    pPlaneOut->u_stride = pPlaneOut->u_stride & 0xFFFFFFFC;
+
+    /* Assignment of output pointer */
+    pu8_data_out    = pPlaneOut->pac_data + pPlaneOut->u_topleft;
+    /* Assignment of output width(rotated) */
+    u32_width_out   = pPlaneOut->u_width;
+    /* Assignment of output height(rotated) */
+    u32_height_out  = pPlaneOut->u_height;
+
+    u32_width2_RGB  = pPlaneOut->u_width >> 1;
+    u32_height2_RGB = pPlaneOut->u_height >> 1;
+
+    u32_stride_out = pPlaneOut->u_stride >> 1;
+    u32_stride2_out = pPlaneOut->u_stride >> 2;
+
+    for(u32_plane = 0; u32_plane < PLANES; u32_plane++)
+    {
+        /* Set the working pointers at the beginning of the input/output data field */
+        pu8_data_in[u32_plane] = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
+
+        /* Get the memory jump corresponding to a row jump */
+        u32_stride_in[u32_plane] = pPlaneIn[u32_plane].u_stride;
+
+        /* Set the bounds of the active image */
+        u32_width_in[u32_plane] = pPlaneIn[u32_plane].u_width;
+        u32_height_in[u32_plane] = pPlaneIn[u32_plane].u_height;
+    }
+    /* Compute horizontal ratio between src and destination width for Y Plane. */
+    if (u32_width_out >= u32_width_in[YPlane])
+    {
+        u32_x_inc[YPlane]   = ((u32_width_in[YPlane]-1) * MAX_SHORT) / (u32_width_out-1);
+    }
+    else
+    {
+        u32_x_inc[YPlane]   = (u32_width_in[YPlane] * MAX_SHORT) / (u32_width_out);
+    }
+
+    /* Compute vertical ratio between src and destination height for Y Plane.*/
+    if (u32_height_out >= u32_height_in[YPlane])
+    {
+        u32_y_inc[YPlane]   = ((u32_height_in[YPlane]-1) * MAX_SHORT) / (u32_height_out-1);
+    }
+    else
+    {
+        u32_y_inc[YPlane] = (u32_height_in[YPlane] * MAX_SHORT) / (u32_height_out);
+    }
+
+    /* Compute horizontal ratio between src and destination width for U and V Planes. */
+    if (u32_width2_RGB >= u32_width_in[UPlane])
+    {
+        u32_x_inc[UPlane]   = ((u32_width_in[UPlane]-1) * MAX_SHORT) / (u32_width2_RGB-1);
+    }
+    else
+    {
+        u32_x_inc[UPlane]   = (u32_width_in[UPlane] * MAX_SHORT) / (u32_width2_RGB);
+    }
+
+    /* Compute vertical ratio between src and destination height for U and V Planes. */
+
+    if (u32_height2_RGB >= u32_height_in[UPlane])
+    {
+        u32_y_inc[UPlane]   = ((u32_height_in[UPlane]-1) * MAX_SHORT) / (u32_height2_RGB-1);
+    }
+    else
+    {
+        u32_y_inc[UPlane]  = (u32_height_in[UPlane] * MAX_SHORT) / (u32_height2_RGB);
+    }
+
+    u32_y_inc[VPlane] = u32_y_inc[UPlane];
+    u32_x_inc[VPlane] = u32_x_inc[UPlane];
+
+    /*
+        Calculate initial accumulator value : u32_y_accum_start.
+        u32_y_accum_start is coded on 15 bits,and represents a value between 0 and 0.5
+    */
+    if (u32_y_inc[YPlane] > MAX_SHORT)
+    {
+        /*
+            Keep the fractionnal part, assimung that integer  part is coded on the 16 high bits,
+            and the fractionnal on the 15 low bits
+        */
+        u32_y_accum_Y = u32_y_inc[YPlane] & 0xffff;
+        u32_y_accum_U = u32_y_inc[UPlane] & 0xffff;
+
+        if (!u32_y_accum_Y)
+        {
+            u32_y_accum_Y = MAX_SHORT;
+            u32_y_accum_U = MAX_SHORT;
+        }
+        u32_y_accum_Y >>= 1;
+        u32_y_accum_U >>= 1;
+    }
+    else
+    {
+        u32_y_accum_Y = 0;
+        u32_y_accum_U = 0;
+
+    }
+
+    /*
+        Calculate initial accumulator value : u32_x_accum_start.
+        u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+    */
+    if (u32_x_inc[YPlane] > MAX_SHORT)
+    {
+        u32_x_accum_start = u32_x_inc[YPlane] & 0xffff;
+
+        if (!u32_x_accum_start)
+        {
+            u32_x_accum_start = MAX_SHORT;
+        }
+
+        u32_x_accum_start >>= 1;
+    }
+    else
+    {
+        u32_x_accum_start = 0;
+    }
+
+    pu32_rgb_data_start = (M4VIFI_UInt32*)pu8_data_out;
+
+    /*
+        Bilinear interpolation linearly interpolates along each row, and then uses that
+        result in a linear interpolation donw each column. Each estimated pixel in the
+        output image is a weighted combination of its four neighbours according to the formula :
+        F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+        with  R(x) = / x+1  -1 =< x =< 0 \ 1-x  0 =< x =< 1 and a (resp. b) weighting coefficient
+        is the distance from the nearest neighbor in the p (resp. q) direction
+    */
+    for (u32_row = u32_height_out; u32_row != 0; u32_row -= 2)
+    {
+        u32_x_accum_Y = u32_x_accum_start;
+        u32_x_accum_U = u32_x_accum_start;
+
+        /* Vertical weight factor */
+        u32_y_frac_Y = (u32_y_accum_Y >> 12) & 15;
+        u32_y_frac_U = (u32_y_accum_U >> 12) & 15;
+
+        /* RGB current line position pointer */
+        pu32_rgb_data_current = pu32_rgb_data_start ;
+
+        /* RGB next line position pointer */
+        pu32_rgb_data_next    = pu32_rgb_data_current + (u32_stride2_out);
+
+        /* Y Plane next row pointer */
+        pu8_data_in1[YPlane] = pu8_data_in[YPlane];
+
+        u32_rgb_temp3 = u32_y_accum_Y + (u32_y_inc[YPlane]);
+        if (u32_rgb_temp3 >> 16)
+        {
+            pu8_data_in1[YPlane] =  pu8_data_in[YPlane] +
+                                                (u32_rgb_temp3 >> 16) * (u32_stride_in[YPlane]);
+            u32_rgb_temp3 &= 0xffff;
+        }
+        u32_rgb_temp4 = (u32_rgb_temp3 >> 12) & 15;
+
+        for (u32_col = u32_width_out; u32_col != 0; u32_col -= 2)
+        {
+
+            /* Input Y plane elements */
+            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16);
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Input U Plane elements */
+            pu8_src_top_U = pu8_data_in[UPlane] + (u32_x_accum_U >> 16);
+            pu8_src_bottom_U = pu8_src_top_U + u32_stride_in[UPlane];
+
+            pu8_src_top_V = pu8_data_in[VPlane] + (u32_x_accum_U >> 16);
+            pu8_src_bottom_V = pu8_src_top_V + u32_stride_in[VPlane];
+
+            /* Horizontal weight factor for Y plane */
+            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+            /* Horizontal weight factor for U and V planes */
+            u32_x_frac_U = (u32_x_accum_U >> 12)&15;
+
+            /* Weighted combination */
+            U_32 = (((pu8_src_top_U[0]*(16-u32_x_frac_U) + pu8_src_top_U[1]*u32_x_frac_U)
+                    *(16-u32_y_frac_U) + (pu8_src_bottom_U[0]*(16-u32_x_frac_U)
+                    + pu8_src_bottom_U[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+            V_32 = (((pu8_src_top_V[0]*(16-u32_x_frac_U) + pu8_src_top_V[1]*u32_x_frac_U)
+                    *(16-u32_y_frac_U)+ (pu8_src_bottom_V[0]*(16-u32_x_frac_U)
+                    + pu8_src_bottom_V[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+
+            u32_x_accum_U += (u32_x_inc[UPlane]);
+
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32 = Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32 = Y_32*0x2568;
+            #endif /* __RGB_V1__v */
+
+                    DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    u32_rgb_temp1 = PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    u32_rgb_temp1 = PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+
+            pu8_src_top_Y = pu8_data_in1[YPlane]+(u32_x_accum_Y >> 16);
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Weighted combination */
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 ) >> 8);
+
+            u32_x_accum_Y += u32_x_inc[YPlane];
+
+            /* Horizontal weight factor */
+            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32 = Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32 = Y_32*0x2568;
+            #endif  /* __RGB_V1__v */
+
+            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    u32_rgb_temp2 = PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    u32_rgb_temp2 = PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+
+            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16) ;
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Weighted combination */
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32 = Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32 = Y_32*0x2568;
+            #endif  /* __RGB_V1__v */
+
+            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+                                                        PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+                                                        PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+
+            pu8_src_top_Y = pu8_data_in1[YPlane]+ (u32_x_accum_Y >> 16);
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Weighted combination */
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 )>>8);
+
+            u32_x_accum_Y += u32_x_inc[YPlane];
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32=Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32=Y_32*0x2568;
+            #endif  /* __RGB_V1__v */
+
+            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+                                                        PACK_BGR565(16,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+                                                        PACK_BGR565(0,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+        }   /* End of horizontal scanning */
+
+        u32_y_accum_Y  =  u32_rgb_temp3 + (u32_y_inc[YPlane]);
+        u32_y_accum_U += (u32_y_inc[UPlane]);
+
+        /* Y plane row update */
+        if (u32_y_accum_Y >> 16)
+        {
+            pu8_data_in[YPlane] =  pu8_data_in1[YPlane] +
+                                                ((u32_y_accum_Y >> 16) * (u32_stride_in[YPlane]));
+            u32_y_accum_Y &= 0xffff;
+        }
+        else
+        {
+            pu8_data_in[YPlane] = pu8_data_in1[YPlane];
+        }
+        /* U and V planes row update */
+        if (u32_y_accum_U >> 16)
+        {
+            pu8_data_in[UPlane] =  pu8_data_in[UPlane] +
+                                                (u32_y_accum_U >> 16) * (u32_stride_in[UPlane]);
+            pu8_data_in[VPlane] =  pu8_data_in[VPlane] +
+                                                (u32_y_accum_U >> 16) * (u32_stride_in[VPlane]);
+            u32_y_accum_U &= 0xffff;
+        }
+        /* BGR pointer Update */
+        pu32_rgb_data_start += u32_stride_out;
+
+    }   /* End of vertical scanning */
+    return M4VIFI_OK;
+}
+
diff --git a/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c
new file mode 100755
index 0000000..c3df5ea
--- /dev/null
+++ b/libvideoeditor/vss/video_filters/src/M4VIFI_ResizeYUVtoRGB565.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright (C) 2004-2011 NXP Software
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ ******************************************************************************
+ * @file     M4VIFI_ResizeYUV420toRGB565RotatedRight.c
+ * @brief    Contain video library function
+ * @note     This file has a Combo filter function
+ *           -# Resizes YUV420 and converts to RGR565 with rotation
+ * @date
+ *           - 2004/08/11: Creation
+ ******************************************************************************
+*/
+
+/* Prototypes of functions, and type definitions */
+#include    "M4VIFI_FiltersAPI.h"
+/* Macro definitions */
+#include    "M4VIFI_Defines.h"
+/* Clip table declaration */
+#include    "M4VIFI_Clip.h"
+
+/**
+ ********************************************************************************************
+ * M4VIFI_UInt8 M4VIFI_ResizeBilinearYUV420toRGB565RotatedRight(void *pContext,
+ *                                                              M4VIFI_ImagePlane *pPlaneIn,
+ *                                                              M4VIFI_ImagePlane *pPlaneOut)
+ * @brief   Resize YUV420 plane and converts to RGB565 with +90 rotation.
+ * @note    Basic sturture of the function
+ *          Loop on each row (step 2)
+ *              Loop on each column (step 2)
+ *                  Get four Y samples and 1 u & V sample
+ *                  Resize the Y with corresponing U and V samples
+ *                  Compute the four corresponding R G B values
+ *                  Place the R G B in the ouput plane in rotated fashion
+ *              end loop column
+ *          end loop row
+ *          For resizing bilinear interpolation linearly interpolates along
+ *          each row, and then uses that result in a linear interpolation down each column.
+ *          Each estimated pixel in the output image is a weighted
+ *          combination of its four neighbours. The ratio of compression
+ *          or dilatation is estimated using input and output sizes.
+ * @param   pPlaneIn: (IN) Pointer to YUV plane buffer
+ * @param   pContext: (IN) Context Pointer
+ * @param   pPlaneOut: (OUT) Pointer to BGR565 Plane
+ * @return  M4VIFI_OK: there is no error
+ * @return  M4VIFI_ILLEGAL_FRAME_HEIGHT: YUV Plane height is ODD
+ * @return  M4VIFI_ILLEGAL_FRAME_WIDTH:  YUV Plane width is ODD
+ ********************************************************************************************
+*/
+M4VIFI_UInt8    M4VIFI_ResizeBilinearYUV420toRGB565(void* pContext,
+                                                    M4VIFI_ImagePlane *pPlaneIn,
+                                                    M4VIFI_ImagePlane *pPlaneOut)
+{
+    M4VIFI_UInt8    *pu8_data_in[PLANES], *pu8_data_in1[PLANES],*pu8_data_out;
+    M4VIFI_UInt32   *pu32_rgb_data_current, *pu32_rgb_data_next, *pu32_rgb_data_start;
+
+    M4VIFI_UInt32   u32_width_in[PLANES], u32_width_out, u32_height_in[PLANES], u32_height_out;
+    M4VIFI_UInt32   u32_stride_in[PLANES];
+    M4VIFI_UInt32   u32_stride_out, u32_stride2_out, u32_width2_RGB, u32_height2_RGB;
+    M4VIFI_UInt32   u32_x_inc[PLANES], u32_y_inc[PLANES];
+    M4VIFI_UInt32   u32_x_accum_Y, u32_x_accum_U, u32_x_accum_start;
+    M4VIFI_UInt32   u32_y_accum_Y, u32_y_accum_U;
+    M4VIFI_UInt32   u32_x_frac_Y, u32_x_frac_U, u32_y_frac_Y,u32_y_frac_U;
+    M4VIFI_Int32    U_32, V_32, Y_32, Yval_32;
+    M4VIFI_UInt8    u8_Red, u8_Green, u8_Blue;
+    M4VIFI_UInt32   u32_row, u32_col;
+
+    M4VIFI_UInt32   u32_plane;
+    M4VIFI_UInt32   u32_rgb_temp1, u32_rgb_temp2;
+    M4VIFI_UInt32   u32_rgb_temp3,u32_rgb_temp4;
+    M4VIFI_UInt32   u32_check_size;
+
+    M4VIFI_UInt8    *pu8_src_top_Y,*pu8_src_top_U,*pu8_src_top_V ;
+    M4VIFI_UInt8    *pu8_src_bottom_Y, *pu8_src_bottom_U, *pu8_src_bottom_V;
+
+    /* Check for the  width and height are even */
+    u32_check_size = IS_EVEN(pPlaneIn[0].u_height);
+    if( u32_check_size == FALSE )
+    {
+        return M4VIFI_ILLEGAL_FRAME_HEIGHT;
+    }
+    u32_check_size = IS_EVEN(pPlaneIn[0].u_width);
+    if (u32_check_size == FALSE )
+    {
+        return M4VIFI_ILLEGAL_FRAME_WIDTH;
+
+    }
+    /* Make the ouput width and height as even */
+    pPlaneOut->u_height = pPlaneOut->u_height & 0xFFFFFFFE;
+    pPlaneOut->u_width = pPlaneOut->u_width & 0xFFFFFFFE;
+    pPlaneOut->u_stride = pPlaneOut->u_stride & 0xFFFFFFFC;
+
+    /* Assignment of output pointer */
+    pu8_data_out    = pPlaneOut->pac_data + pPlaneOut->u_topleft;
+    /* Assignment of output width(rotated) */
+    u32_width_out   = pPlaneOut->u_width;
+    /* Assignment of output height(rotated) */
+    u32_height_out  = pPlaneOut->u_height;
+
+    /* Set the bounds of the active image */
+    u32_width2_RGB  = pPlaneOut->u_width >> 1;
+    u32_height2_RGB = pPlaneOut->u_height >> 1;
+    /* Get the memory jump corresponding to a row jump */
+    u32_stride_out = pPlaneOut->u_stride >> 1;
+    u32_stride2_out = pPlaneOut->u_stride >> 2;
+
+    for(u32_plane = 0; u32_plane < PLANES; u32_plane++)
+    {
+        /* Set the working pointers at the beginning of the input/output data field */
+        pu8_data_in[u32_plane] = pPlaneIn[u32_plane].pac_data + pPlaneIn[u32_plane].u_topleft;
+
+        /* Get the memory jump corresponding to a row jump */
+        u32_stride_in[u32_plane] = pPlaneIn[u32_plane].u_stride;
+
+        /* Set the bounds of the active image */
+        u32_width_in[u32_plane] = pPlaneIn[u32_plane].u_width;
+        u32_height_in[u32_plane] = pPlaneIn[u32_plane].u_height;
+    }
+    /* Compute horizontal ratio between src and destination width for Y Plane.*/
+    if (u32_width_out >= u32_width_in[YPlane])
+    {
+        u32_x_inc[YPlane]   = ((u32_width_in[YPlane]-1) * MAX_SHORT) / (u32_width_out-1);
+    }
+    else
+    {
+        u32_x_inc[YPlane]   = (u32_width_in[YPlane] * MAX_SHORT) / (u32_width_out);
+    }
+
+    /* Compute vertical ratio between src and destination height for Y Plane.*/
+    if (u32_height_out >= u32_height_in[YPlane])
+    {
+        u32_y_inc[YPlane]   = ((u32_height_in[YPlane]-1) * MAX_SHORT) / (u32_height_out-1);
+    }
+    else
+    {
+        u32_y_inc[YPlane] = (u32_height_in[YPlane] * MAX_SHORT) / (u32_height_out);
+    }
+
+    /* Compute horizontal ratio between src and destination width for U and V Planes.*/
+    if (u32_width2_RGB >= u32_width_in[UPlane])
+    {
+        u32_x_inc[UPlane]   = ((u32_width_in[UPlane]-1) * MAX_SHORT) / (u32_width2_RGB-1);
+    }
+    else
+    {
+        u32_x_inc[UPlane]   = (u32_width_in[UPlane] * MAX_SHORT) / (u32_width2_RGB);
+    }
+
+    /* Compute vertical ratio between src and destination height for U and V Planes.*/
+
+    if (u32_height2_RGB >= u32_height_in[UPlane])
+    {
+        u32_y_inc[UPlane]   = ((u32_height_in[UPlane]-1) * MAX_SHORT) / (u32_height2_RGB-1);
+    }
+    else
+    {
+        u32_y_inc[UPlane]  = (u32_height_in[UPlane] * MAX_SHORT) / (u32_height2_RGB);
+    }
+
+    u32_y_inc[VPlane] = u32_y_inc[UPlane];
+    u32_x_inc[VPlane] = u32_x_inc[UPlane];
+
+    /*
+    Calculate initial accumulator value : u32_y_accum_start.
+    u32_y_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+    */
+    if (u32_y_inc[YPlane] > MAX_SHORT)
+    {
+        /*
+        Keep the fractionnal part, assimung that integer  part is coded on the 16 high bits,
+        and the fractionnal on the 15 low bits
+        */
+        u32_y_accum_Y = u32_y_inc[YPlane] & 0xffff;
+        u32_y_accum_U = u32_y_inc[UPlane] & 0xffff;
+
+        if (!u32_y_accum_Y)
+        {
+            u32_y_accum_Y = MAX_SHORT;
+            u32_y_accum_U = MAX_SHORT;
+        }
+        u32_y_accum_Y >>= 1;
+        u32_y_accum_U >>= 1;
+    }
+    else
+    {
+        u32_y_accum_Y = 0;
+        u32_y_accum_U = 0;
+
+    }
+
+    /*
+    Calculate initial accumulator value : u32_x_accum_start.
+    u32_x_accum_start is coded on 15 bits, and represents a value between 0 and 0.5
+    */
+    if (u32_x_inc[YPlane] > MAX_SHORT)
+    {
+        u32_x_accum_start = u32_x_inc[YPlane] & 0xffff;
+
+        if (!u32_x_accum_start)
+        {
+            u32_x_accum_start = MAX_SHORT;
+        }
+
+        u32_x_accum_start >>= 1;
+    }
+    else
+    {
+        u32_x_accum_start = 0;
+    }
+    /* Intialise the RGB pointer */
+    pu32_rgb_data_start = (M4VIFI_UInt32*)pu8_data_out;
+
+    /*
+        Bilinear interpolation linearly interpolates along each row, and then uses that
+        result in a linear interpolation donw each column. Each estimated pixel in the
+        output image is a weighted combination of its four neighbours according to the formula :
+        F(p',q')=f(p,q)R(-a)R(b)+f(p,q-1)R(-a)R(b-1)+f(p+1,q)R(1-a)R(b)+f(p+&,q+1)R(1-a)R(b-1)
+        with  R(x) = / x+1  -1 =< x =< 0 \ 1-x  0 =< x =< 1 and a (resp. b) weighting coefficient
+        is the distance from the nearest neighbor in the p (resp. q) direction
+    */
+    for (u32_row = u32_height_out; u32_row != 0; u32_row -= 2)
+    {
+        u32_x_accum_Y = u32_x_accum_start;
+        u32_x_accum_U = u32_x_accum_start;
+
+        /* Vertical weight factor */
+        u32_y_frac_Y = (u32_y_accum_Y >> 12) & 15;
+        u32_y_frac_U = (u32_y_accum_U >> 12) & 15;
+
+        /* RGB current line Position Pointer */
+        pu32_rgb_data_current = pu32_rgb_data_start ;
+
+        /* RGB next line position pointer */
+        pu32_rgb_data_next    = pu32_rgb_data_current + (u32_stride2_out);
+
+        /* Y Plane next row pointer */
+        pu8_data_in1[YPlane] = pu8_data_in[YPlane];
+
+        u32_rgb_temp3 = u32_y_accum_Y + (u32_y_inc[YPlane]);
+        if (u32_rgb_temp3 >> 16)
+        {
+            pu8_data_in1[YPlane] =  pu8_data_in[YPlane] +
+                                                (u32_rgb_temp3 >> 16) * (u32_stride_in[YPlane]);
+            u32_rgb_temp3 &= 0xffff;
+        }
+        u32_rgb_temp4 = (u32_rgb_temp3 >> 12) & 15;
+
+        for (u32_col = u32_width_out; u32_col != 0; u32_col -= 2)
+        {
+
+            /* Input Y plane elements */
+            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16);
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Input U Plane elements */
+            pu8_src_top_U = pu8_data_in[UPlane] + (u32_x_accum_U >> 16);
+            pu8_src_bottom_U = pu8_src_top_U + u32_stride_in[UPlane];
+
+            pu8_src_top_V = pu8_data_in[VPlane] + (u32_x_accum_U >> 16);
+            pu8_src_bottom_V = pu8_src_top_V + u32_stride_in[VPlane];
+
+            /* Horizontal weight factor for Y Plane */
+            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+            /* Horizontal weight factor for U and V Planes */
+            u32_x_frac_U = (u32_x_accum_U >> 12)&15;
+
+            /* Weighted combination */
+            U_32 = (((pu8_src_top_U[0]*(16-u32_x_frac_U) + pu8_src_top_U[1]*u32_x_frac_U)
+                    *(16-u32_y_frac_U) + (pu8_src_bottom_U[0]*(16-u32_x_frac_U)
+                    + pu8_src_bottom_U[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+            V_32 = (((pu8_src_top_V[0]*(16-u32_x_frac_U) + pu8_src_top_V[1]*u32_x_frac_U)
+                    *(16-u32_y_frac_U) + (pu8_src_bottom_V[0]*(16-u32_x_frac_U)
+                    + pu8_src_bottom_V[1]*u32_x_frac_U)*u32_y_frac_U ) >> 8);
+
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+
+            u32_x_accum_U += (u32_x_inc[UPlane]);
+
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32 = Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32 = Y_32*0x2568;
+            #endif /* __RGB_V1__v */
+
+                    DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    u32_rgb_temp1 = PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    u32_rgb_temp1 = PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+
+            pu8_src_top_Y = pu8_data_in1[YPlane]+(u32_x_accum_Y >> 16);
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Weighted combination */
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 ) >> 8);
+
+            u32_x_accum_Y += u32_x_inc[YPlane];
+            /* Horizontal weight factor */
+            u32_x_frac_Y = (u32_x_accum_Y >> 12)&15;
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32 = Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32 = Y_32*0x2568;
+            #endif  /* __RGB_V1__v */
+
+            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    u32_rgb_temp2 = PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    u32_rgb_temp2 = PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+
+            pu8_src_top_Y = pu8_data_in[YPlane] + (u32_x_accum_Y >> 16) ;
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Weighted combination */
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_y_frac_Y) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_y_frac_Y ) >> 8);
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32 = Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32 = Y_32*0x2568;
+            #endif  /* __RGB_V1__v */
+
+            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+                                                        PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    *(pu32_rgb_data_current)++ = u32_rgb_temp1 |
+                                                        PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+
+            pu8_src_top_Y = pu8_data_in1[YPlane]+ (u32_x_accum_Y >> 16);
+            pu8_src_bottom_Y = pu8_src_top_Y + u32_stride_in[YPlane];
+
+            /* Weighted combination */
+            Y_32 = (((pu8_src_top_Y[0]*(16-u32_x_frac_Y) + pu8_src_top_Y[1]*u32_x_frac_Y)
+                    *(16-u32_rgb_temp4) + (pu8_src_bottom_Y[0]*(16-u32_x_frac_Y)
+                    + pu8_src_bottom_Y[1]*u32_x_frac_Y)*u32_rgb_temp4 )>>8);
+
+            u32_x_accum_Y += u32_x_inc[YPlane];
+            /* YUV to RGB */
+            #ifdef __RGB_V1__
+                    Yval_32=Y_32*37;
+            #else   /* __RGB_V1__v */
+                    Yval_32=Y_32*0x2568;
+            #endif  /* __RGB_V1__v */
+
+            DEMATRIX(u8_Red,u8_Green,u8_Blue,Yval_32,U_32,V_32);
+
+            /* Pack 8 bit R,G,B to RGB565 */
+            #ifdef  LITTLE_ENDIAN
+                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+                                                        PACK_RGB565(16,u8_Red,u8_Green,u8_Blue);
+            #else   /* LITTLE_ENDIAN */
+                    *(pu32_rgb_data_next)++ = u32_rgb_temp2 |
+                                                        PACK_RGB565(0,u8_Red,u8_Green,u8_Blue);
+            #endif  /* LITTLE_ENDIAN */
+
+        }   /* End of horizontal scanning */
+
+        u32_y_accum_Y  =  u32_rgb_temp3 + (u32_y_inc[YPlane]);
+        u32_y_accum_U += (u32_y_inc[UPlane]);
+
+        /* Y plane row update */
+        if (u32_y_accum_Y >> 16)
+        {
+            pu8_data_in[YPlane] =  pu8_data_in1[YPlane] +
+                                                ((u32_y_accum_Y >> 16) * (u32_stride_in[YPlane]));
+            u32_y_accum_Y &= 0xffff;
+        }
+        else
+        {
+            pu8_data_in[YPlane] = pu8_data_in1[YPlane];
+        }
+        /* U and V planes row update */
+        if (u32_y_accum_U >> 16)
+        {
+            pu8_data_in[UPlane] =  pu8_data_in[UPlane] +
+                                                (u32_y_accum_U >> 16) * (u32_stride_in[UPlane]);
+            pu8_data_in[VPlane] =  pu8_data_in[VPlane] +
+                                                (u32_y_accum_U >> 16) * (u32_stride_in[VPlane]);
+            u32_y_accum_U &= 0xffff;
+        }
+
+        pu32_rgb_data_start += u32_stride_out;
+
+    }   /* End of vertical scanning */
+    return M4VIFI_OK;
+}
+