Merge "Mark the logging heap read-only to media.log service"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 94aac5c..b8a9711 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -49,6 +49,9 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libmedia_native.so)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudioflinger_intermediates)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudioflinger.so)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/SHARED_LIBRARIES/libaudiopolicy_intermediates)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudiopolicy.so)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 6b726e0..1567cd1 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -273,7 +273,8 @@
     if ( (res = checkType(tag, TYPE_BYTE)) != OK) {
         return res;
     }
-    return updateImpl(tag, (const void*)string.string(), string.size());
+    // string.size() doesn't count the null termination character.
+    return updateImpl(tag, (const void*)string.string(), string.size() + 1);
 }
 
 status_t CameraMetadata::updateImpl(uint32_t tag, const void *data,
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index b2abc0f..0566d14 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -374,7 +374,7 @@
     }
 
     sp<IMediaPlayer> player =
-        service->create(client, 0);
+        service->create(client, AUDIO_SESSION_ALLOCATE);
 
     if (player != NULL && player->setDataSource(source) == NO_ERROR) {
         player->setVideoSurfaceTexture(surface->getIGraphicBufferProducer());
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index f3024b7..f98002d 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -217,8 +217,9 @@
      *      higher priorities, 0 being the normal priority.
      * cbf:         optional callback function (see effect_callback_t)
      * user:        pointer to context for use by the callback receiver.
-     * sessionID:   audio session this effect is associated to. If 0, the effect will be global to
-     *      the output mix. If not 0, the effect will be applied to all players
+     * sessionID:   audio session this effect is associated to.
+     *      If equal to AUDIO_SESSION_OUTPUT_MIX, the effect will be global to
+     *      the output mix.  Otherwise, the effect will be applied to all players
      *      (AudioTrack or MediaPLayer) within the same audio session.
      * io:  HAL audio output or input stream to which this effect must be attached. Leave at 0 for
      *      automatic output selection by AudioFlinger.
@@ -229,8 +230,8 @@
                   int32_t priority = 0,
                   effect_callback_t cbf = NULL,
                   void* user = NULL,
-                  int sessionId = 0,
-                  audio_io_handle_t io = 0
+                  int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                  audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                   );
 
     /* Constructor.
@@ -241,8 +242,8 @@
                     int32_t priority = 0,
                     effect_callback_t cbf = NULL,
                     void* user = NULL,
-                    int sessionId = 0,
-                    audio_io_handle_t io = 0
+                    int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                    audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                     );
 
     /* Terminates the AudioEffect and unregisters it from AudioFlinger.
@@ -263,8 +264,8 @@
                             int32_t priority = 0,
                             effect_callback_t cbf = NULL,
                             void* user = NULL,
-                            int sessionId = 0,
-                            audio_io_handle_t io = 0
+                            int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+                            audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
                             );
 
     /* Result of constructing the AudioEffect. This must be checked
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 28fdfd4..402b479 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -113,7 +113,6 @@
     // returns the audio output stream latency in ms. Corresponds to
     // audio_stream_out->get_latency()
     static status_t getLatency(audio_io_handle_t output,
-                               audio_stream_type_t stream,
                                uint32_t* latency);
 
     static bool routedToA2dpOutput(audio_stream_type_t streamType);
@@ -125,8 +124,7 @@
     static status_t setVoiceVolume(float volume);
 
     // return the number of audio frames written by AudioFlinger to audio HAL and
-    // audio dsp to DAC since the output on which the specified stream is playing
-    // has exited standby.
+    // audio dsp to DAC since the specified output I/O handle has exited standby.
     // returned status (from utils/Errors.h) can be:
     // - NO_ERROR: successful operation, halFrames and dspFrames point to valid data
     // - INVALID_OPERATION: Not supported on current hardware platform
@@ -135,13 +133,18 @@
     // necessary to check returned status before using the returned values.
     static status_t getRenderPosition(audio_io_handle_t output,
                                       uint32_t *halFrames,
-                                      uint32_t *dspFrames,
-                                      audio_stream_type_t stream = AUDIO_STREAM_DEFAULT);
+                                      uint32_t *dspFrames);
 
     // return the number of input frames lost by HAL implementation, or 0 if the handle is invalid
     static uint32_t getInputFramesLost(audio_io_handle_t ioHandle);
 
+    // Allocate a new audio session ID and return that new ID.
+    // If unable to contact AudioFlinger, returns AUDIO_SESSION_ALLOCATE instead.
+    // FIXME If AudioFlinger were to ever exhaust the session ID namespace,
+    //       this method could fail by returning either AUDIO_SESSION_ALLOCATE
+    //       or an unspecified existing session ID.
     static int newAudioSessionId();
+
     static void acquireAudioSessionId(int audioSession, pid_t pid);
     static void releaseAudioSessionId(int audioSession, pid_t pid);
 
@@ -318,8 +321,6 @@
 
     static sp<IAudioPolicyService> gAudioPolicyService;
 
-    // mapping between stream types and outputs
-    static DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> gStreamOutputMap;
     // list of output descriptors containing cached parameters
     // (sampling rate, framecount, channel count...)
     static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 647748b..2c48bbf 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -36,11 +36,6 @@
 class AudioTrack : public RefBase
 {
 public:
-    enum channel_index {
-        MONO   = 0,
-        LEFT   = 0,
-        RIGHT  = 1
-    };
 
     /* Events used by AudioTrack callback function (callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
@@ -82,6 +77,7 @@
                                   // (currently ignored, but will make the primary field in future)
 
         size_t      size;         // input/output in bytes == frameCount * frameSize
+                                  // on input it is unused
                                   // on output is the number of bytes actually filled
                                   // FIXME this is redundant with respect to frameCount,
                                   // and TRANSFER_OBTAIN mode is broken for 8-bit data
@@ -91,7 +87,7 @@
             void*       raw;
             short*      i16;      // signed 16-bit
             int8_t*     i8;       // unsigned 8-bit, offset by 0x80
-        };
+        };                        // input: unused, output: pointer to buffer
     };
 
     /* As a convenience, if a callback is supplied, a handler thread
@@ -455,7 +451,8 @@
      *  none.
      *
      * Returned value:
-     *  handle on audio hardware output
+     *  handle on audio hardware output, or AUDIO_IO_HANDLE_NONE if the
+     *  track needed to be re-created but that failed
      */
             audio_io_handle_t    getOutput() const;
 
@@ -533,15 +530,6 @@
                                      struct timespec *elapsed = NULL, size_t *nonContig = NULL);
 public:
 
-//EL_FIXME to be reconciled with new obtainBuffer() return codes and control block proxy
-//            enum {
-//            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
-//            TEAR_DOWN       = 0x80000002,
-//            STOPPED = 1,
-//            STREAM_END_WAIT,
-//            STREAM_END
-//        };
-
     /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */
     // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(Buffer* audioBuffer);
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index f8e4e3b..3ca3095 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -223,6 +223,7 @@
             status_t        getDuration(int *msec);
             status_t        reset();
             status_t        setAudioStreamType(audio_stream_type_t type);
+            status_t        getAudioStreamType(audio_stream_type_t *type);
             status_t        setLooping(int loop);
             bool            isLooping();
             status_t        setVolume(float leftVolume, float rightVolume);
diff --git a/include/media/stagefright/FileSource.h b/include/media/stagefright/FileSource.h
index d994cb3..9838ed2 100644
--- a/include/media/stagefright/FileSource.h
+++ b/include/media/stagefright/FileSource.h
@@ -30,6 +30,7 @@
 class FileSource : public DataSource {
 public:
     FileSource(const char *filename);
+    // FileSource takes ownership and will close the fd
     FileSource(int fd, int64_t offset, int64_t length);
 
     virtual status_t initCheck() const;
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index 590623b..01a5daf 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -60,6 +60,7 @@
         SECTION_DECODER,
         SECTION_ENCODERS,
         SECTION_ENCODER,
+        SECTION_INCLUDE,
     };
 
     struct CodecInfo {
@@ -73,7 +74,9 @@
 
     status_t mInitCheck;
     Section mCurrentSection;
+    Vector<Section> mPastSections;
     int32_t mDepth;
+    AString mHrefBase;
 
     Vector<CodecInfo> mCodecInfos;
     KeyedVector<AString, size_t> mCodecQuirks;
@@ -83,7 +86,8 @@
     ~MediaCodecList();
 
     status_t initCheck() const;
-    void parseXMLFile(FILE *file);
+    void parseXMLFile(const char *path);
+    void parseTopLevelXMLFile(const char *path);
 
     static void StartElementHandlerWrapper(
             void *me, const char *name, const char **attrs);
@@ -93,6 +97,7 @@
     void startElementHandler(const char *name, const char **attrs);
     void endElementHandler(const char *name);
 
+    status_t includeXMLFile(const char **attrs);
     status_t addMediaCodecFromAttributes(bool encoder, const char **attrs);
     void addMediaCodec(bool encoder, const char *name, const char *type = NULL);
 
diff --git a/libvideoeditor/lvpp/Android.mk b/libvideoeditor/lvpp/Android.mk
index 77a21ac..8318d28 100755
--- a/libvideoeditor/lvpp/Android.mk
+++ b/libvideoeditor/lvpp/Android.mk
@@ -46,7 +46,7 @@
 
 
 LOCAL_SHARED_LIBRARIES :=     \
-    libaudioflinger           \
+    libaudioresampler         \
     libaudioutils             \
     libbinder                 \
     libcutils                 \
@@ -80,7 +80,6 @@
     $(TOP)/frameworks/av/services/audioflinger \
     $(TOP)/frameworks/native/include/media/editor \
     $(TOP)/frameworks/native/include/media/openmax \
-    $(TOP)/frameworks/native/services/audioflinger
 
 
 LOCAL_SHARED_LIBRARIES += libdl
@@ -99,8 +98,6 @@
     -DUSE_STAGEFRIGHT_READERS \
     -DUSE_STAGEFRIGHT_3GPP_READER
 
-LOCAL_32_BIT_ONLY := true
-
 include $(BUILD_SHARED_LIBRARY)
 
 #include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
index cb4b23e..e60030e 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
@@ -534,7 +534,8 @@
         mAudioTrack = new AudioTrack(
                 AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT,
                 audio_channel_out_mask_from_count(numChannels),
-                0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
+                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
+                0 /*notificationFrames*/);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
             mAudioTrack.clear();
diff --git a/libvideoeditor/vss/src/Android.mk b/libvideoeditor/vss/src/Android.mk
index 8856c41..47627ec 100755
--- a/libvideoeditor/vss/src/Android.mk
+++ b/libvideoeditor/vss/src/Android.mk
@@ -53,7 +53,7 @@
 LOCAL_MODULE_TAGS := optional
 
 LOCAL_SHARED_LIBRARIES :=       \
-    libaudioflinger             \
+    libaudioresampler           \
     libaudioutils               \
     libbinder                   \
     libcutils                   \
@@ -81,7 +81,6 @@
     $(TOP)/frameworks/av/libvideoeditor/vss/stagefrightshells/inc \
     $(TOP)/frameworks/av/services/audioflinger \
     $(TOP)/frameworks/native/include/media/openmax \
-    $(TOP)/frameworks/native/services/audioflinger \
     $(TOP)/system/media/audio_effects/include \
     $(TOP)/system/media/audio_utils/include
 
@@ -96,6 +95,4 @@
     -DM4xVSS_RESERVED_MOOV_DISK_SPACEno \
     -DDECODE_GIF_ON_SAVING
 
-LOCAL_32_BIT_ONLY := true
-
 include $(BUILD_SHARED_LIBRARY)
diff --git a/libvideoeditor/vss/src/VideoEditorResampler.cpp b/libvideoeditor/vss/src/VideoEditorResampler.cpp
index 1129c3c..53537f0 100755
--- a/libvideoeditor/vss/src/VideoEditorResampler.cpp
+++ b/libvideoeditor/vss/src/VideoEditorResampler.cpp
@@ -17,7 +17,7 @@
 #define LOG_NDEBUG 1
 #include <audio_utils/primitives.h>
 #include <utils/Log.h>
-#include "AudioMixer.h"
+#include "AudioResampler.h"
 #include "VideoEditorResampler.h"
 
 namespace android {
diff --git a/libvideoeditor/vss/stagefrightshells/src/Android.mk b/libvideoeditor/vss/stagefrightshells/src/Android.mk
index a060c0d..9188942 100755
--- a/libvideoeditor/vss/stagefrightshells/src/Android.mk
+++ b/libvideoeditor/vss/stagefrightshells/src/Android.mk
@@ -64,6 +64,4 @@
 
 LOCAL_MODULE_TAGS := optional
 
-LOCAL_32_BIT_ONLY := true
-
 include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 961b0a2..a7bf380 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -471,7 +471,7 @@
 
     audio_io_handle_t input = AudioSystem::getInput(mInputSource, mSampleRate, mFormat,
             mChannelMask, mSessionId);
-    if (input == 0) {
+    if (input == AUDIO_IO_HANDLE_NONE) {
         ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
               "channel mask %#x, session %d",
               mInputSource, mSampleRate, mFormat, mChannelMask, mSessionId);
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 140fb66..2f16444 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -35,9 +35,9 @@
 sp<IAudioFlinger> AudioSystem::gAudioFlinger;
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
-// Cached values
 
-DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0);
+// Cached values for output handles
+DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(NULL);
 
 // Cached values for recording queries, all protected by gLock
 uint32_t AudioSystem::gPrevInSamplingRate;
@@ -196,12 +196,12 @@
 
 status_t AudioSystem::setParameters(const String8& keyValuePairs)
 {
-    return setParameters((audio_io_handle_t) 0, keyValuePairs);
+    return setParameters(AUDIO_IO_HANDLE_NONE, keyValuePairs);
 }
 
 String8 AudioSystem::getParameters(const String8& keys)
 {
-    return getParameters((audio_io_handle_t) 0, keys);
+    return getParameters(AUDIO_IO_HANDLE_NONE, keys);
 }
 
 // convert volume steps to natural log scale
@@ -284,7 +284,7 @@
     }
 
     output = getOutput(streamType);
-    if (output == 0) {
+    if (output == AUDIO_IO_HANDLE_NONE) {
         return PERMISSION_DENIED;
     }
 
@@ -329,15 +329,14 @@
     }
 
     output = getOutput(streamType);
-    if (output == 0) {
+    if (output == AUDIO_IO_HANDLE_NONE) {
         return PERMISSION_DENIED;
     }
 
-    return getLatency(output, streamType, latency);
+    return getLatency(output, latency);
 }
 
 status_t AudioSystem::getLatency(audio_io_handle_t output,
-                                 audio_stream_type_t streamType __unused,
                                  uint32_t* latency)
 {
     OutputDescriptor *outputDesc;
@@ -354,7 +353,7 @@
         gLock.unlock();
     }
 
-    ALOGV("getLatency() streamType %d, output %d, latency %d", streamType, output, *latency);
+    ALOGV("getLatency() output %d, latency %d", output, *latency);
 
     return NO_ERROR;
 }
@@ -401,19 +400,11 @@
 }
 
 status_t AudioSystem::getRenderPosition(audio_io_handle_t output, uint32_t *halFrames,
-                                        uint32_t *dspFrames, audio_stream_type_t stream)
+                                        uint32_t *dspFrames)
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
 
-    if (stream == AUDIO_STREAM_DEFAULT) {
-        stream = AUDIO_STREAM_MUSIC;
-    }
-
-    if (output == 0) {
-        output = getOutput(stream);
-    }
-
     return af->getRenderPosition(halFrames, dspFrames, output);
 }
 
@@ -422,7 +413,7 @@
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     uint32_t result = 0;
     if (af == 0) return result;
-    if (ioHandle == 0) return result;
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) return result;
 
     result = af->getInputFramesLost(ioHandle);
     return result;
@@ -431,7 +422,7 @@
 int AudioSystem::newAudioSessionId()
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-    if (af == 0) return 0;
+    if (af == 0) return AUDIO_SESSION_ALLOCATE;
     return af->newAudioSessionId();
 }
 
@@ -473,7 +464,7 @@
     const OutputDescriptor *desc;
     audio_stream_type_t stream;
 
-    if (ioHandle == 0) return;
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) return;
 
     Mutex::Autolock _l(AudioSystem::gLock);
 
@@ -739,7 +730,7 @@
 audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
-    if (aps == 0) return (audio_devices_t)0;
+    if (aps == 0) return AUDIO_DEVICE_NONE;
     return aps->getDevicesForStream(stream);
 }
 
@@ -747,7 +738,7 @@
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     // FIXME change return type to status_t, and return PERMISSION_DENIED here
-    if (aps == 0) return 0;
+    if (aps == 0) return AUDIO_IO_HANDLE_NONE;
     return aps->getOutputForEffect(desc);
 }
 
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 20c1cdb..fbfd3da 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -333,8 +333,8 @@
         mOffloadInfo = NULL;
     }
 
-    mVolume[LEFT] = 1.0f;
-    mVolume[RIGHT] = 1.0f;
+    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
+    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
     mSendLevel = 0.0f;
     // mFrameCount is initialized in createTrack_l
     mReqFrameCount = frameCount;
@@ -371,16 +371,6 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
-        // Use of direct and offloaded output streams is ref counted by audio policy manager.
-#if 0   // FIXME This should no longer be needed
-        //Use of direct and offloaded output streams is ref counted by audio policy manager.
-        // As getOutput was called above and resulted in an output stream to be opened,
-        // we need to release it.
-        if (mOutput != 0) {
-            AudioSystem::releaseOutput(mOutput);
-            mOutput = 0;
-        }
-#endif
         return status;
     }
 
@@ -556,7 +546,7 @@
     mAudioTrack->pause();
 
     if (isOffloaded_l()) {
-        if (mOutput != 0) {
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
             uint32_t halFrames;
             // OffloadThread sends HAL pause in its threadLoop.. time saved
             // here can be slightly off
@@ -573,8 +563,8 @@
     }
 
     AutoMutex lock(mLock);
-    mVolume[LEFT] = left;
-    mVolume[RIGHT] = right;
+    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
+    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
 
     mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
 
@@ -643,7 +633,7 @@
     // query the HAL and update if needed.
 // FIXME use Proxy return channel to update the rate from server and avoid polling here
     if (isOffloaded_l()) {
-        if (mOutput != 0) {
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
             uint32_t sampleRate = 0;
             status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
             if (status == NO_ERROR) {
@@ -789,7 +779,7 @@
             return NO_ERROR;
         }
 
-        if (mOutput != 0) {
+        if (mOutput != AUDIO_IO_HANDLE_NONE) {
             uint32_t halFrames;
             AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
         }
@@ -865,7 +855,7 @@
 
     audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
             mChannelMask, mFlags, mOffloadInfo);
-    if (output == 0) {
+    if (output == AUDIO_IO_HANDLE_NONE) {
         ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
               "channel mask %#x, flags %#x",
               mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
@@ -878,7 +868,7 @@
     // Not all of these values are needed under all conditions, but it is easier to get them all
 
     uint32_t afLatency;
-    status = AudioSystem::getLatency(output, mStreamType, &afLatency);
+    status = AudioSystem::getLatency(output, &afLatency);
     if (status != NO_ERROR) {
         ALOGE("getLatency(%d) failed status %d", output, status);
         goto release;
@@ -1134,8 +1124,8 @@
         mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
         mProxy = mStaticProxy;
     }
-    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
-            uint16_t(mVolume[LEFT] * 0x1000));
+    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[AUDIO_INTERLEAVE_RIGHT] * 0x1000)) << 16) |
+            uint16_t(mVolume[AUDIO_INTERLEAVE_LEFT] * 0x1000));
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
     mProxy->setEpoch(epoch);
@@ -1775,16 +1765,6 @@
         }
     }
     if (result != NO_ERROR) {
-        // Use of direct and offloaded output streams is ref counted by audio policy manager.
-#if 0   // FIXME This should no longer be needed
-        //Use of direct and offloaded output streams is ref counted by audio policy manager.
-        // As getOutput was called above and resulted in an output stream to be opened,
-        // we need to release it.
-        if (mOutput != 0) {
-            AudioSystem::releaseOutput(mOutput);
-            mOutput = 0;
-        }
-#endif
         ALOGW("restoreTrack_l() failed status %d", result);
         mState = STATE_STOPPED;
     }
@@ -1818,7 +1798,7 @@
 String8 AudioTrack::getParameters(const String8& keys)
 {
     audio_io_handle_t output = getOutput();
-    if (output != 0) {
+    if (output != AUDIO_IO_HANDLE_NONE) {
         return AudioSystem::getParameters(output, keys);
     } else {
         return String8::empty();
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index fdd1a12..58c9fc1 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -200,7 +200,7 @@
             ts = &remaining;
             break;
         default:
-            LOG_FATAL("obtainBuffer() timeout=%d", timeout);
+            LOG_ALWAYS_FATAL("obtainBuffer() timeout=%d", timeout);
             ts = NULL;
             break;
         }
@@ -429,7 +429,7 @@
             ts = &remaining;
             break;
         default:
-            LOG_FATAL("waitStreamEndDone() timeout=%d", timeout);
+            LOG_ALWAYS_FATAL("waitStreamEndDone() timeout=%d", timeout);
             ts = NULL;
             break;
         }
@@ -470,7 +470,7 @@
 
 void StaticAudioTrackClientProxy::flush()
 {
-    LOG_FATAL("static flush");
+    LOG_ALWAYS_FATAL("static flush");
 }
 
 void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int loopCount)
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
index eb091ac..4992798 100644
--- a/media/libmedia/CharacterEncodingDetector.cpp
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -90,6 +90,7 @@
         char buf[1024];
         buf[0] = 0;
         int idx;
+        bool allprintable = true;
         for (int i = 0; i < size; i++) {
             const char *name = mNames.getEntry(i);
             const char *value = mValues.getEntry(i);
@@ -103,18 +104,60 @@
                 strlcat(buf, value, sizeof(buf));
                 // separate tags by space so ICU's ngram detector can do its job
                 strlcat(buf, " ", sizeof(buf));
+                allprintable = false;
             }
         }
-        ucsdet_setText(csd, buf, strlen(buf), &status);
 
-        int32_t matches;
-        const UCharsetMatch** ucma = ucsdet_detectAll(csd, &matches, &status);
-        const char *combinedenc = "???";
+        const char *combinedenc = "UTF-8";
+        if (allprintable) {
+            // since 'buf' is empty, ICU would return a UTF-8 matcher with low confidence, so
+            // no need to even call it
+            ALOGV("all tags are printable, assuming ascii (%d)", strlen(buf));
+        } else {
+            ucsdet_setText(csd, buf, strlen(buf), &status);
+            int32_t matches;
+            const UCharsetMatch** ucma = ucsdet_detectAll(csd, &matches, &status);
+            bool goodmatch = true;
+            const UCharsetMatch* bestCombinedMatch = getPreferred(buf, strlen(buf),
+                    ucma, matches, &goodmatch);
 
-        const UCharsetMatch* bestCombinedMatch = getPreferred(buf, strlen(buf), ucma, matches);
+            if (!goodmatch && strlen(buf) < 20) {
+                ALOGV("not a good match, trying with more data");
+                // This string might be too short for ICU to do anything useful with.
+                // (real world example: "Björk" in ISO-8859-1 might be detected as GB18030, because
+                //  the ISO detector reports a confidence of 0, while the GB18030 detector reports
+                //  a confidence of 10 with no invalid characters)
+                // Append artist, album and title if they were previously omitted because they
+                // were printable ascii.
+                bool added = false;
+                for (int i = 0; i < size; i++) {
+                    const char *name = mNames.getEntry(i);
+                    const char *value = mValues.getEntry(i);
+                    if (isPrintableAscii(value, strlen(value)) && (
+                                !strcmp(name, "artist") ||
+                                !strcmp(name, "album") ||
+                                !strcmp(name, "title"))) {
+                        strlcat(buf, value, sizeof(buf));
+                        strlcat(buf, " ", sizeof(buf));
+                        added = true;
+                    }
+                }
+                if (added) {
+                    ucsdet_setText(csd, buf, strlen(buf), &status);
+                    ucma = ucsdet_detectAll(csd, &matches, &status);
+                    bestCombinedMatch = getPreferred(buf, strlen(buf),
+                            ucma, matches, &goodmatch);
+                    if (!goodmatch) {
+                        ALOGV("still not a good match after adding printable tags");
+                    }
+                } else {
+                    ALOGV("no printable tags to add");
+                }
+            }
 
-        if (bestCombinedMatch != NULL) {
-            combinedenc = ucsdet_getName(bestCombinedMatch, &status);
+            if (bestCombinedMatch != NULL) {
+                combinedenc = ucsdet_getName(bestCombinedMatch, &status);
+            }
         }
 
         for (int i = 0; i < size; i++) {
@@ -128,24 +171,29 @@
             int32_t inputLength = strlen(s);
             const char *enc;
 
-            if (!strcmp(name, "artist") ||
+            if (!allprintable && (!strcmp(name, "artist") ||
                     !strcmp(name, "albumartist") ||
                     !strcmp(name, "composer") ||
                     !strcmp(name, "genre") ||
                     !strcmp(name, "album") ||
-                    !strcmp(name, "title")) {
+                    !strcmp(name, "title"))) {
                 // use encoding determined from the combination of artist/album/title etc.
                 enc = combinedenc;
             } else {
-                ucsdet_setText(csd, s, inputLength, &status);
-                ucm = ucsdet_detect(csd, &status);
-                if (!ucm) {
-                    mValues.setEntry(i, "???");
-                    continue;
+                if (isPrintableAscii(s, inputLength)) {
+                    enc = "UTF-8";
+                    ALOGV("@@@@ %s is ascii", mNames.getEntry(i));
+                } else {
+                    ucsdet_setText(csd, s, inputLength, &status);
+                    ucm = ucsdet_detect(csd, &status);
+                    if (!ucm) {
+                        mValues.setEntry(i, "???");
+                        continue;
+                    }
+                    enc = ucsdet_getName(ucm, &status);
+                    ALOGV("@@@@ recognized charset: %s for %s confidence %d",
+                            enc, mNames.getEntry(i), ucsdet_getConfidence(ucm, &status));
                 }
-                enc = ucsdet_getName(ucm, &status);
-                ALOGV("@@@@ recognized charset: %s for %s confidence %d",
-                        enc, mNames.getEntry(i), ucsdet_getConfidence(ucm, &status));
             }
 
             if (strcmp(enc,"UTF-8") != 0) {
@@ -207,10 +255,15 @@
  *   algorithm and larger frequent character lists than ICU
  * - devalue encoding where the conversion contains unlikely characters (symbols, reserved, etc)
  * - pick the highest match
+ * - signal to the caller whether this match is considered good: confidence > 15, and confidence
+ *   delta with the next runner up > 15
  */
 const UCharsetMatch *CharacterEncodingDetector::getPreferred(
-        const char *input, size_t len, const UCharsetMatch** ucma, size_t nummatches) {
+        const char *input, size_t len,
+        const UCharsetMatch** ucma, size_t nummatches,
+        bool *goodmatch) {
 
+    *goodmatch = false;
     Vector<const UCharsetMatch*> matches;
     UErrorCode status = U_ZERO_ERROR;
 
@@ -227,6 +280,10 @@
         return NULL;
     }
     if (num == 1) {
+        int confidence = ucsdet_getConfidence(matches[0], &status);
+        if (confidence > 15) {
+            *goodmatch = true;
+        }
         return matches[0];
     }
 
@@ -326,15 +383,35 @@
     // find match with highest confidence after adjusting for unlikely characters
     int highest = newconfidence[0];
     size_t highestidx = 0;
+    int runnerup = -10000;
+    int runnerupidx = -10000;
     num = newconfidence.size();
     for (size_t i = 1; i < num; i++) {
         if (newconfidence[i] > highest) {
+            runnerup = highest;
+            runnerupidx = highestidx;
             highest = newconfidence[i];
             highestidx = i;
+        } else if (newconfidence[i] > runnerup){
+            runnerup = newconfidence[i];
+            runnerupidx = i;
         }
     }
     status = U_ZERO_ERROR;
-    ALOGV("selecting '%s' w/ %d confidence", ucsdet_getName(matches[highestidx], &status), highest);
+    ALOGV("selecting: '%s' w/ %d confidence",
+            ucsdet_getName(matches[highestidx], &status), highest);
+    if (runnerupidx < 0) {
+        ALOGV("no runner up");
+        if (highest > 15) {
+            *goodmatch = true;
+        }
+    } else {
+        ALOGV("runner up: '%s' w/ %d confidence",
+                ucsdet_getName(matches[runnerupidx], &status), runnerup);
+        if ((highest - runnerup) > 15) {
+            *goodmatch = true;
+        }
+    }
     return matches[highestidx];
 }
 
diff --git a/media/libmedia/CharacterEncodingDetector.h b/media/libmedia/CharacterEncodingDetector.h
index 3655a91..7b5ed86 100644
--- a/media/libmedia/CharacterEncodingDetector.h
+++ b/media/libmedia/CharacterEncodingDetector.h
@@ -41,7 +41,9 @@
 
     private:
         const UCharsetMatch *getPreferred(
-                const char *input, size_t len, const UCharsetMatch** ucma, size_t matches);
+                const char *input, size_t len,
+                const UCharsetMatch** ucma, size_t matches,
+                bool *goodmatch);
 
         bool isFrequent(const uint16_t *values, uint32_t c);
 
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 762681e..eb813bd 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -109,6 +109,7 @@
         data.writeInt32(frameCount);
         track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
         data.writeInt32(lFlags);
+        // haveSharedBuffer
         if (sharedBuffer != 0) {
             data.writeInt32(true);
             data.writeStrongBinder(sharedBuffer->asBinder());
@@ -410,7 +411,7 @@
                                          const audio_offload_info_t *offloadInfo)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
         uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
         audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
         audio_channel_mask_t channelMask = pChannelMask != NULL ?
@@ -424,6 +425,7 @@
         data.writeInt32(channelMask);
         data.writeInt32(latency);
         data.writeInt32((int32_t) flags);
+        // hasOffloadInfo
         if (offloadInfo == NULL) {
             data.writeInt32(0);
         } else {
@@ -501,7 +503,7 @@
                                         audio_channel_mask_t *pChannelMask)
     {
         Parcel data, reply;
-        audio_devices_t devices = pDevices != NULL ? *pDevices : (audio_devices_t)0;
+        audio_devices_t devices = pDevices != NULL ? *pDevices : AUDIO_DEVICE_NONE;
         uint32_t samplingRate = pSamplingRate != NULL ? *pSamplingRate : 0;
         audio_format_t format = pFormat != NULL ? *pFormat : AUDIO_FORMAT_DEFAULT;
         audio_channel_mask_t channelMask = pChannelMask != NULL ?
@@ -599,7 +601,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         status_t status = remote()->transact(NEW_AUDIO_SESSION_ID, data, &reply);
-        int id = 0;
+        int id = AUDIO_SESSION_ALLOCATE;
         if (status == NO_ERROR) {
             id = reply.readInt32();
         }
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 1a027a6..9bb4a49 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -137,6 +137,7 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(static_cast <uint32_t>(flags));
+        // hasOffloadInfo
         if (offloadInfo == NULL) {
             data.writeInt32(0);
         } else {
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index adef3be..61b6d36 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -1057,7 +1057,7 @@
                       0,    // notificationFrames
                       0,    // sharedBuffer
                       mThreadCanCallJava,
-                      0,    // sessionId
+                      AUDIO_SESSION_ALLOCATE,
                       AudioTrack::TRANSFER_CALLBACK);
 
     if (mpAudioTrack->initCheck() != NO_ERROR) {
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index d94c7c5..0be01a9 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -531,6 +531,14 @@
     return OK;
 }
 
+status_t MediaPlayer::getAudioStreamType(audio_stream_type_t *type)
+{
+    ALOGV("getAudioStreamType");
+    Mutex::Autolock _l(mLock);
+    *type = mStreamType;
+    return OK;
+}
+
 status_t MediaPlayer::setLooping(int loop)
 {
     ALOGV("MediaPlayer::setLooping");
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 200c561..778eb9a 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1679,7 +1679,7 @@
 
 ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size)
 {
-    LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
+    LOG_ALWAYS_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
 
     //ALOGV("write(%p, %u)", buffer, size);
     if (mTrack != 0) {
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 9c8461c..4adf018 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -30,6 +30,23 @@
 
 namespace android {
 
+static uint64_t cacheN; // output of CCHelper::getLocalFreq()
+static bool cacheValid; // whether cacheN is valid
+static pthread_once_t cacheOnceControl = PTHREAD_ONCE_INIT;
+
+static void cacheOnceInit()
+{
+    CCHelper tmpHelper;
+    status_t res;
+    if (OK != (res = tmpHelper.getLocalFreq(&cacheN))) {
+        ALOGE("Failed to fetch local time frequency when constructing a"
+              " MonoPipe (res = %d).  getNextWriteTimestamp calls will be"
+              " non-functional", res);
+        return;
+    }
+    cacheValid = true;
+}
+
 MonoPipe::MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock) :
         NBAIO_Sink(format),
         mUpdateSeq(0),
@@ -47,8 +64,6 @@
         mTimestampMutator(&mTimestampShared),
         mTimestampObserver(&mTimestampShared)
 {
-    CCHelper tmpHelper;
-    status_t res;
     uint64_t N, D;
 
     mNextRdPTS = AudioBufferProvider::kInvalidPTS;
@@ -59,12 +74,13 @@
     mSamplesToLocalTime.a_to_b_denom = 0;
 
     D = Format_sampleRate(format);
-    if (OK != (res = tmpHelper.getLocalFreq(&N))) {
-        ALOGE("Failed to fetch local time frequency when constructing a"
-              " MonoPipe (res = %d).  getNextWriteTimestamp calls will be"
-              " non-functional", res);
+
+    (void) pthread_once(&cacheOnceControl, cacheOnceInit);
+    if (!cacheValid) {
+        // log has already been done
         return;
     }
+    N = cacheN;
 
     LinearTransform::reduce(&N, &D);
     static const uint64_t kSignedHiBitsMask   = ~(0x7FFFFFFFull);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index e9e96d1..9164e5c 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -644,8 +644,8 @@
 
     // FIXME: assume that surface is controlled by app (native window
     // returns the number for the case when surface is not controlled by app)
-    (*minUndequeuedBuffers)++;
-
+    // FIXME2: This means that minUndeqeueudBufs can be 1 larger than reported
+    // For now, try to allocate 1 more buffer, but don't fail if unsuccessful
 
     // Use conservative allocation while also trying to reduce starvation
     //
@@ -653,7 +653,8 @@
     //    minimum needed for the consumer to be able to work
     // 2. try to allocate two (2) additional buffers to reduce starvation from
     //    the consumer
-    for (OMX_U32 extraBuffers = 2; /* condition inside loop */; extraBuffers--) {
+    //    plus an extra buffer to account for incorrect minUndequeuedBufs
+    for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
         OMX_U32 newBufferCount =
             def.nBufferCountMin + *minUndequeuedBuffers + extraBuffers;
         def.nBufferCountActual = newBufferCount;
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index a9b0c73..714b5e0 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -118,8 +118,6 @@
 
 LOCAL_MODULE_TAGS := optional
 
-LOCAL_32_BIT_ONLY := true
-
 include $(BUILD_SHARED_LIBRARY)
 
 include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 8623100..2669849 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -221,7 +221,8 @@
 
         mAudioTrack = new AudioTrack(
                 AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
-                0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
+                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
+                0 /*notificationFrames*/);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
             mAudioTrack.clear();
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 6248e90..8a451c8 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -48,22 +48,43 @@
 
 MediaCodecList::MediaCodecList()
     : mInitCheck(NO_INIT) {
-    FILE *file = fopen("/etc/media_codecs.xml", "r");
+    parseTopLevelXMLFile("/etc/media_codecs.xml");
+}
 
-    if (file == NULL) {
-        ALOGW("unable to open media codecs configuration xml file.");
+void MediaCodecList::parseTopLevelXMLFile(const char *codecs_xml) {
+    // get href_base
+    char *href_base_end = strrchr(codecs_xml, '/');
+    if (href_base_end != NULL) {
+        mHrefBase = AString(codecs_xml, href_base_end - codecs_xml + 1);
+    }
+
+    mInitCheck = OK;
+    mCurrentSection = SECTION_TOPLEVEL;
+    mDepth = 0;
+
+    parseXMLFile(codecs_xml);
+
+    if (mInitCheck != OK) {
+        mCodecInfos.clear();
+        mCodecQuirks.clear();
         return;
     }
 
-    parseXMLFile(file);
+    // These are currently still used by the video editing suite.
+    addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm");
+    addMediaCodec(
+            false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
 
-    if (mInitCheck == OK) {
-        // These are currently still used by the video editing suite.
+    for (size_t i = mCodecInfos.size(); i-- > 0;) {
+        CodecInfo *info = &mCodecInfos.editItemAt(i);
 
-        addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm");
+        if (info->mTypes == 0) {
+            // No types supported by this component???
+            ALOGW("Component %s does not support any type of media?",
+                  info->mName.c_str());
 
-        addMediaCodec(
-                false /* encoder */, "OMX.google.raw.decoder", "audio/raw");
+            mCodecInfos.removeAt(i);
+        }
     }
 
 #if 0
@@ -84,9 +105,6 @@
         ALOGI("%s", line.c_str());
     }
 #endif
-
-    fclose(file);
-    file = NULL;
 }
 
 MediaCodecList::~MediaCodecList() {
@@ -96,10 +114,14 @@
     return mInitCheck;
 }
 
-void MediaCodecList::parseXMLFile(FILE *file) {
-    mInitCheck = OK;
-    mCurrentSection = SECTION_TOPLEVEL;
-    mDepth = 0;
+void MediaCodecList::parseXMLFile(const char *path) {
+    FILE *file = fopen(path, "r");
+
+    if (file == NULL) {
+        ALOGW("unable to open media codecs configuration xml file: %s", path);
+        mInitCheck = NAME_NOT_FOUND;
+        return;
+    }
 
     XML_Parser parser = ::XML_ParserCreate(NULL);
     CHECK(parser != NULL);
@@ -112,7 +134,7 @@
     while (mInitCheck == OK) {
         void *buff = ::XML_GetBuffer(parser, BUFF_SIZE);
         if (buff == NULL) {
-            ALOGE("failed to in call to XML_GetBuffer()");
+            ALOGE("failed in call to XML_GetBuffer()");
             mInitCheck = UNKNOWN_ERROR;
             break;
         }
@@ -124,8 +146,9 @@
             break;
         }
 
-        if (::XML_ParseBuffer(parser, bytes_read, bytes_read == 0)
-                != XML_STATUS_OK) {
+        XML_Status status = ::XML_ParseBuffer(parser, bytes_read, bytes_read == 0);
+        if (status != XML_STATUS_OK) {
+            ALOGE("malformed (%s)", ::XML_ErrorString(::XML_GetErrorCode(parser)));
             mInitCheck = ERROR_MALFORMED;
             break;
         }
@@ -137,25 +160,8 @@
 
     ::XML_ParserFree(parser);
 
-    if (mInitCheck == OK) {
-        for (size_t i = mCodecInfos.size(); i-- > 0;) {
-            CodecInfo *info = &mCodecInfos.editItemAt(i);
-
-            if (info->mTypes == 0) {
-                // No types supported by this component???
-
-                ALOGW("Component %s does not support any type of media?",
-                      info->mName.c_str());
-
-                mCodecInfos.removeAt(i);
-            }
-        }
-    }
-
-    if (mInitCheck != OK) {
-        mCodecInfos.clear();
-        mCodecQuirks.clear();
-    }
+    fclose(file);
+    file = NULL;
 }
 
 // static
@@ -169,12 +175,63 @@
     static_cast<MediaCodecList *>(me)->endElementHandler(name);
 }
 
+status_t MediaCodecList::includeXMLFile(const char **attrs) {
+    const char *href = NULL;
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "href")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            href = attrs[i + 1];
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+        ++i;
+    }
+
+    // For security reasons and for simplicity, file names can only contain
+    // [a-zA-Z0-9_.] and must start with  media_codecs_ and end with .xml
+    for (i = 0; href[i] != '\0'; i++) {
+        if (href[i] == '.' || href[i] == '_' ||
+                (href[i] >= '0' && href[i] <= '9') ||
+                (href[i] >= 'A' && href[i] <= 'Z') ||
+                (href[i] >= 'a' && href[i] <= 'z')) {
+            continue;
+        }
+        ALOGE("invalid include file name: %s", href);
+        return -EINVAL;
+    }
+
+    AString filename = href;
+    if (!filename.startsWith("media_codecs_") ||
+        !filename.endsWith(".xml")) {
+        ALOGE("invalid include file name: %s", href);
+        return -EINVAL;
+    }
+    filename.insert(mHrefBase, 0);
+
+    parseXMLFile(filename.c_str());
+    return mInitCheck;
+}
+
 void MediaCodecList::startElementHandler(
         const char *name, const char **attrs) {
     if (mInitCheck != OK) {
         return;
     }
 
+    if (!strcmp(name, "Include")) {
+        mInitCheck = includeXMLFile(attrs);
+        if (mInitCheck == OK) {
+            mPastSections.push(mCurrentSection);
+            mCurrentSection = SECTION_INCLUDE;
+        }
+        ++mDepth;
+        return;
+    }
+
     switch (mCurrentSection) {
         case SECTION_TOPLEVEL:
         {
@@ -264,6 +321,15 @@
             break;
         }
 
+        case SECTION_INCLUDE:
+        {
+            if (!strcmp(name, "Include") && mPastSections.size() > 0) {
+                mCurrentSection = mPastSections.top();
+                mPastSections.pop();
+            }
+            break;
+        }
+
         default:
             break;
     }
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 545ca9d..1cfe6c0 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1806,7 +1806,8 @@
     }
     // FIXME: assume that surface is controlled by app (native window
     // returns the number for the case when surface is not controlled by app)
-    minUndequeuedBufs++;
+    // FIXME2: This means that minUndeqeueudBufs can be 1 larger than reported
+    // For now, try to allocate 1 more buffer, but don't fail if unsuccessful
 
     // Use conservative allocation while also trying to reduce starvation
     //
@@ -1814,10 +1815,11 @@
     //    minimum needed for the consumer to be able to work
     // 2. try to allocate two (2) additional buffers to reduce starvation from
     //    the consumer
-    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d",
+    //    plus an extra buffer to account for incorrect minUndequeuedBufs
+    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
             def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
 
-    for (OMX_U32 extraBuffers = 2; /* condition inside loop */; extraBuffers--) {
+    for (OMX_U32 extraBuffers = 2 + 1; /* condition inside loop */; extraBuffers--) {
         OMX_U32 newBufferCount =
             def.nBufferCountMin + minUndequeuedBufs + extraBuffers;
         def.nBufferCountActual = newBufferCount;
@@ -1836,7 +1838,7 @@
             return err;
         }
     }
-    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d",
+    CODEC_LOGI("OMX-buffers: min=%u actual=%u undeq=%d+1",
             def.nBufferCountMin, def.nBufferCountActual, minUndequeuedBufs);
 
     err = native_window_set_buffer_count(
diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk
index 04dc487..58ec3ba 100644
--- a/media/libstagefright/codecs/aacenc/Android.mk
+++ b/media/libstagefright/codecs/aacenc/Android.mk
@@ -117,7 +117,6 @@
 
   LOCAL_MODULE := libstagefright_soft_aacenc
   LOCAL_MODULE_TAGS := optional
-  LOCAL_32_BIT_ONLY := true
 
   include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/codecs/avc/enc/Android.mk b/media/libstagefright/codecs/avc/enc/Android.mk
index c2e7b81..537ba42 100644
--- a/media/libstagefright/codecs/avc/enc/Android.mk
+++ b/media/libstagefright/codecs/avc/enc/Android.mk
@@ -20,7 +20,6 @@
 
 
 LOCAL_MODULE := libstagefright_avcenc
-LOCAL_32_BIT_ONLY := true
 
 LOCAL_C_INCLUDES := \
     $(LOCAL_PATH)/src \
@@ -71,7 +70,6 @@
 
 LOCAL_MODULE := libstagefright_soft_h264enc
 LOCAL_MODULE_TAGS := optional
-LOCAL_32_BIT_ONLY := true
 
 LOCAL_CFLAGS += -Werror
 
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
index 40661e7..0c62ec0 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
@@ -247,7 +247,7 @@
 
             if (defParams->nPortIndex == 0) {
                 if (defParams->nBufferSize > kMaxInputBufferSize) {
-                    ALOGE("Input buffer size must be at most %zu bytes",
+                    ALOGE("Input buffer size must be at most %d bytes",
                         kMaxInputBufferSize);
                     return OMX_ErrorUnsupportedSetting;
                 }
@@ -354,12 +354,12 @@
             size_t bytes, unsigned samples,
             unsigned current_frame) {
     UNUSED_UNLESS_VERBOSE(current_frame);
-    ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%d, samples=%d, curr_frame=%d)",
+    ALOGV("SoftFlacEncoder::onEncodedFlacAvailable(bytes=%zu, samples=%u, curr_frame=%u)",
             bytes, samples, current_frame);
 
 #ifdef WRITE_FLAC_HEADER_IN_FIRST_BUFFER
     if (samples == 0) {
-        ALOGI(" saving %d bytes of header", bytes);
+        ALOGI(" saving %zu bytes of header", bytes);
         memcpy(mHeader + mHeaderOffset, buffer, bytes);
         mHeaderOffset += bytes;// will contain header size when finished receiving header
         return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
@@ -370,7 +370,7 @@
     if ((samples == 0) || !mEncoderWriteData) {
         // called by the encoder because there's header data to save, but it's not the role
         // of this component (unless WRITE_FLAC_HEADER_IN_FIRST_BUFFER is defined)
-        ALOGV("ignoring %d bytes of header data (samples=%d)", bytes, samples);
+        ALOGV("ignoring %zu bytes of header data (samples=%d)", bytes, samples);
         return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
     }
 
@@ -391,9 +391,9 @@
 #endif
 
     // write encoded data
-    ALOGV(" writing %d bytes of encoded data on output port", bytes);
+    ALOGV(" writing %zu bytes of encoded data on output port", bytes);
     if (bytes > outHeader->nAllocLen - outHeader->nOffset - outHeader->nFilledLen) {
-        ALOGE(" not enough space left to write encoded data, dropping %u bytes", bytes);
+        ALOGE(" not enough space left to write encoded data, dropping %zu bytes", bytes);
         // a fatal error would stop the encoding
         return FLAC__STREAM_ENCODER_WRITE_STATUS_OK;
     }
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index 2c73e57..ee8dcf2 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -33,6 +33,8 @@
 
 #include "SoftMPEG4Encoder.h"
 
+#include <inttypes.h>
+
 namespace android {
 
 template<class T>
@@ -725,7 +727,7 @@
             if (!PVEncodeVideoFrame(mHandle, &vin, &vout,
                     &modTimeMs, outPtr, &dataLength, &nLayer) ||
                 !PVGetHintTrack(mHandle, &hintTrack)) {
-                ALOGE("Failed to encode frame or get hink track at frame %lld",
+                ALOGE("Failed to encode frame or get hink track at frame %" PRId64,
                     mNumInputFrames);
                 mSignalledError = true;
                 notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 5efe022..b3a6bcc 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -141,9 +141,9 @@
       mWidth(176),
       mHeight(144),
       mBitrate(192000),  // in bps
+      mFramerate(30 << 16), // in Q16 format
       mBitrateUpdated(false),
       mBitrateControlMode(VPX_VBR),  // variable bitrate
-      mFrameDurationUs(33333),  // Defaults to 30 fps
       mDCTPartitions(0),
       mErrorResilience(OMX_FALSE),
       mColorFormat(OMX_COLOR_FormatYUV420Planar),
@@ -180,9 +180,8 @@
     inputPort.format.video.nStride = inputPort.format.video.nFrameWidth;
     inputPort.format.video.nSliceHeight = inputPort.format.video.nFrameHeight;
     inputPort.format.video.nBitrate = 0;
-    // frameRate is reciprocal of frameDuration, which is
-    // in microseconds. It is also in Q16 format.
-    inputPort.format.video.xFramerate = (1000000/mFrameDurationUs) << 16;
+    // frameRate is in Q16 format.
+    inputPort.format.video.xFramerate = mFramerate;
     inputPort.format.video.bFlagErrorConcealment = OMX_FALSE;
     inputPort.nPortIndex = kInputPortIndex;
     inputPort.eDir = OMX_DirInput;
@@ -220,7 +219,7 @@
     outputPort.format.video.eCompressionFormat = OMX_VIDEO_CodingVP8;
     outputPort.format.video.eColorFormat = OMX_COLOR_FormatUnused;
     outputPort.format.video.pNativeWindow = NULL;
-    outputPort.nBufferSize = 256 * 1024;  // arbitrary
+    outputPort.nBufferSize = 1024 * 1024; // arbitrary
 
     addPort(outputPort);
 }
@@ -277,8 +276,39 @@
     mCodecConfiguration->g_timebase.num = 1;
     mCodecConfiguration->g_timebase.den = 1000000;
     // rc_target_bitrate is in kbps, mBitrate in bps
-    mCodecConfiguration->rc_target_bitrate = mBitrate/1000;
+    mCodecConfiguration->rc_target_bitrate = mBitrate / 1000;
     mCodecConfiguration->rc_end_usage = mBitrateControlMode;
+    // Disable frame drop - not allowed in MediaCodec now.
+    mCodecConfiguration->rc_dropframe_thresh = 0;
+    if (mBitrateControlMode == VPX_CBR) {
+        // Disable spatial resizing.
+        mCodecConfiguration->rc_resize_allowed = 0;
+        // Single-pass mode.
+        mCodecConfiguration->g_pass = VPX_RC_ONE_PASS;
+        // Minimum quantization level.
+        mCodecConfiguration->rc_min_quantizer = 2;
+        // Maximum quantization level.
+        mCodecConfiguration->rc_max_quantizer = 63;
+        // Maximum amount of bits that can be subtracted from the target
+        // bitrate - expressed as percentage of the target bitrate.
+        mCodecConfiguration->rc_undershoot_pct = 100;
+        // Maximum amount of bits that can be added to the target
+        // bitrate - expressed as percentage of the target bitrate.
+        mCodecConfiguration->rc_overshoot_pct = 15;
+        // Initial value of the buffer level in ms.
+        mCodecConfiguration->rc_buf_initial_sz = 500;
+        // Amount of data that the encoder should try to maintain in ms.
+        mCodecConfiguration->rc_buf_optimal_sz = 600;
+        // The amount of data that may be buffered by the decoding
+        // application in ms.
+        mCodecConfiguration->rc_buf_sz = 1000;
+        // Enable error resilience - needed for packet loss.
+        mCodecConfiguration->g_error_resilient = 1;
+        // Disable lagged encoding.
+        mCodecConfiguration->g_lag_in_frames = 0;
+        // Encoder determines optimal key frame placement automatically.
+        mCodecConfiguration->kf_mode = VPX_KF_AUTO;
+    }
 
     codec_return = vpx_codec_enc_init(mCodecContext,
                                       mCodecInterface,
@@ -298,6 +328,33 @@
         return UNKNOWN_ERROR;
     }
 
+    // Extra CBR settings
+    if (mBitrateControlMode == VPX_CBR) {
+        codec_return = vpx_codec_control(mCodecContext,
+                                         VP8E_SET_STATIC_THRESHOLD,
+                                         1);
+        if (codec_return == VPX_CODEC_OK) {
+            uint32_t rc_max_intra_target =
+                mCodecConfiguration->rc_buf_optimal_sz * (mFramerate >> 17) / 10;
+            // Don't go below 3 times per frame bandwidth.
+            if (rc_max_intra_target < 300) {
+                rc_max_intra_target = 300;
+            }
+            codec_return = vpx_codec_control(mCodecContext,
+                                             VP8E_SET_MAX_INTRA_BITRATE_PCT,
+                                             rc_max_intra_target);
+        }
+        if (codec_return == VPX_CODEC_OK) {
+            codec_return = vpx_codec_control(mCodecContext,
+                                             VP8E_SET_CPUUSED,
+                                             -8);
+        }
+        if (codec_return != VPX_CODEC_OK) {
+            ALOGE("Error setting cbr parameters for vpx encoder.");
+            return UNKNOWN_ERROR;
+        }
+    }
+
     if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar || mInputDataIsMeta) {
         if (mConversionBuffer == NULL) {
             mConversionBuffer = (uint8_t *)malloc(mWidth * mHeight * 3 / 2);
@@ -361,9 +418,7 @@
                 }
 
                 formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                // Converting from microseconds
-                // Also converting to Q16 format
-                formatParams->xFramerate = (1000000/mFrameDurationUs) << 16;
+                formatParams->xFramerate = mFramerate;
                 return OMX_ErrorNone;
             } else if (formatParams->nPortIndex == kOutputPortIndex) {
                 formatParams->eCompressionFormat = OMX_VIDEO_CodingVP8;
@@ -660,9 +715,7 @@
         mHeight = port->format.video.nFrameHeight;
 
         // xFramerate comes in Q16 format, in frames per second unit
-        const uint32_t framerate = port->format.video.xFramerate >> 16;
-        // frame duration is in microseconds
-        mFrameDurationUs = (1000000/framerate);
+        mFramerate = port->format.video.xFramerate;
 
         if (port->format.video.eColorFormat == OMX_COLOR_FormatYUV420Planar ||
             port->format.video.eColorFormat == OMX_COLOR_FormatYUV420SemiPlanar ||
@@ -684,6 +737,13 @@
         return OMX_ErrorNone;
     } else if (port->nPortIndex == kOutputPortIndex) {
         mBitrate = port->format.video.nBitrate;
+        mWidth = port->format.video.nFrameWidth;
+        mHeight = port->format.video.nFrameHeight;
+
+        OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef;
+        def->format.video.nFrameWidth = mWidth;
+        def->format.video.nFrameHeight = mHeight;
+        def->format.video.nBitrate = mBitrate;
         return OMX_ErrorNone;
     } else {
         return OMX_ErrorBadPortIndex;
@@ -814,11 +874,12 @@
             mBitrateUpdated = false;
         }
 
+        uint32_t frameDuration = (uint32_t)(((uint64_t)1000000 << 16) / mFramerate);
         codec_return = vpx_codec_encode(
                 mCodecContext,
                 &raw_frame,
                 inputBufferHeader->nTimeStamp,  // in timebase units
-                mFrameDurationUs,  // frame duration in timebase units
+                frameDuration,  // frame duration in timebase units
                 flags,  // frame flags
                 VPX_DL_REALTIME);  // encoding deadline
         if (codec_return != VPX_CODEC_OK) {
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
index 076830f..1c983ab 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.h
@@ -130,16 +130,15 @@
     // Target bitrate set for the encoder, in bits per second.
     uint32_t mBitrate;
 
+    // Target framerate set for the encoder.
+    uint32_t mFramerate;
+
     // If a request for a change it bitrate has been received.
     bool mBitrateUpdated;
 
     // Bitrate control mode, either constant or variable
     vpx_rc_mode mBitrateControlMode;
 
-    // Frame duration is the reciprocal of framerate, denoted
-    // in microseconds
-    uint64_t mFrameDurationUs;
-
     // vp8 specific configuration parameter
     // that enables token partitioning of
     // the stream into substreams
diff --git a/media/libstagefright/data/media_codecs_google_audio.xml b/media/libstagefright/data/media_codecs_google_audio.xml
new file mode 100644
index 0000000..b1f93de
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_audio.xml
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="OMX.google.mp3.decoder" type="audio/mpeg" />
+        <MediaCodec name="OMX.google.amrnb.decoder" type="audio/3gpp" />
+        <MediaCodec name="OMX.google.amrwb.decoder" type="audio/amr-wb" />
+        <MediaCodec name="OMX.google.aac.decoder" type="audio/mp4a-latm" />
+        <MediaCodec name="OMX.google.g711.alaw.decoder" type="audio/g711-alaw" />
+        <MediaCodec name="OMX.google.g711.mlaw.decoder" type="audio/g711-mlaw" />
+        <MediaCodec name="OMX.google.vorbis.decoder" type="audio/vorbis" />
+        <MediaCodec name="OMX.google.opus.decoder" type="audio/opus" />
+    </Decoders>
+
+    <Encoders>
+        <MediaCodec name="OMX.google.aac.encoder" type="audio/mp4a-latm" />
+        <MediaCodec name="OMX.google.amrnb.encoder" type="audio/3gpp" />
+        <MediaCodec name="OMX.google.amrwb.encoder" type="audio/amr-wb" />
+        <MediaCodec name="OMX.google.flac.encoder" type="audio/flac" />
+    </Encoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_telephony.xml b/media/libstagefright/data/media_codecs_google_telephony.xml
new file mode 100644
index 0000000..28f5ffc
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_telephony.xml
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="OMX.google.gsm.decoder" type="audio/gsm" />
+    </Decoders>
+</Included>
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
new file mode 100644
index 0000000..41e0efb
--- /dev/null
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" />
+        <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" />
+        <MediaCodec name="OMX.google.h264.decoder" type="video/avc" />
+        <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" />
+        <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" />
+    </Decoders>
+
+    <Encoders>
+        <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" />
+        <MediaCodec name="OMX.google.h264.encoder" type="video/avc" />
+        <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" />
+        <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" />
+    </Encoders>
+</Included>
diff --git a/media/libstagefright/foundation/ANetworkSession.cpp b/media/libstagefright/foundation/ANetworkSession.cpp
index 08c4a87..af5be70 100644
--- a/media/libstagefright/foundation/ANetworkSession.cpp
+++ b/media/libstagefright/foundation/ANetworkSession.cpp
@@ -579,7 +579,7 @@
 
         if (err == -EAGAIN) {
             if (!mOutFragments.empty()) {
-                ALOGI("%d datagrams remain queued.", mOutFragments.size());
+                ALOGI("%zu datagrams remain queued.", mOutFragments.size());
             }
             err = OK;
         }
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 19db6eb..fd42e77 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -43,6 +43,7 @@
 #include <utils/Mutex.h>
 
 #include <ctype.h>
+#include <inttypes.h>
 #include <openssl/aes.h>
 #include <openssl/md5.h>
 
@@ -168,7 +169,7 @@
         if (stream == STREAMTYPE_AUDIO || stream == STREAMTYPE_VIDEO) {
             int64_t timeUs;
             CHECK((*accessUnit)->meta()->findInt64("timeUs",  &timeUs));
-            ALOGV("[%s] read buffer at time %lld us", streamStr, timeUs);
+            ALOGV("[%s] read buffer at time %" PRId64 " us", streamStr, timeUs);
 
             mLastDequeuedTimeUs = timeUs;
             mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
@@ -622,7 +623,7 @@
  * - block_size == 0 means entire range
  *
  */
-status_t LiveSession::fetchFile(
+ssize_t LiveSession::fetchFile(
         const char *url, sp<ABuffer> *out,
         int64_t range_offset, int64_t range_length,
         uint32_t block_size, /* download block size */
@@ -673,8 +674,9 @@
         buffer->setRange(0, 0);
     }
 
+    ssize_t bytesRead = 0;
     // adjust range_length if only reading partial block
-    if (block_size > 0 && (range_length == -1 || buffer->size() + block_size < range_length)) {
+    if (block_size > 0 && (range_length == -1 || (int64_t)(buffer->size() + block_size) < range_length)) {
         range_length = buffer->size() + block_size;
     }
     for (;;) {
@@ -683,7 +685,7 @@
         if (bufferRemaining == 0 && getSizeErr != OK) {
             bufferRemaining = 32768;
 
-            ALOGV("increasing download buffer to %d bytes",
+            ALOGV("increasing download buffer to %zu bytes",
                  buffer->size() + bufferRemaining);
 
             sp<ABuffer> copy = new ABuffer(buffer->size() + bufferRemaining);
@@ -696,7 +698,7 @@
         size_t maxBytesToRead = bufferRemaining;
         if (range_length >= 0) {
             int64_t bytesLeftInRange = range_length - buffer->size();
-            if (bytesLeftInRange < maxBytesToRead) {
+            if (bytesLeftInRange < (int64_t)maxBytesToRead) {
                 maxBytesToRead = bytesLeftInRange;
 
                 if (bytesLeftInRange == 0) {
@@ -720,6 +722,7 @@
         }
 
         buffer->setRange(0, buffer->size() + (size_t)n);
+        bytesRead += n;
     }
 
     *out = buffer;
@@ -730,7 +733,7 @@
         }
     }
 
-    return OK;
+    return bytesRead;
 }
 
 sp<M3UParser> LiveSession::fetchPlaylist(
@@ -741,9 +744,9 @@
 
     sp<ABuffer> buffer;
     String8 actualUrl;
-    status_t err = fetchFile(url, &buffer, 0, -1, 0, NULL, &actualUrl);
+    ssize_t  err = fetchFile(url, &buffer, 0, -1, 0, NULL, &actualUrl);
 
-    if (err != OK) {
+    if (err <= 0) {
         return NULL;
     }
 
@@ -962,7 +965,7 @@
 
     mPrevBandwidthIndex = bandwidthIndex;
 
-    ALOGV("changeConfiguration => timeUs:%lld us, bwIndex:%d, pickTrack:%d",
+    ALOGV("changeConfiguration => timeUs:%" PRId64 " us, bwIndex:%zu, pickTrack:%d",
           timeUs, bandwidthIndex, pickTrack);
 
     if (pickTrack) {
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index f489ec4..d7ed56f 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -203,7 +203,7 @@
     //
     // For reused HTTP sources, the caller must download a file sequentially without
     // any overlaps or gaps to prevent reconnection.
-    status_t fetchFile(
+    ssize_t fetchFile(
             const char *url, sp<ABuffer> *out,
             /* request/open a file starting at range_offset for range_length bytes */
             int64_t range_offset = 0, int64_t range_length = -1,
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index dacdd40..f22d650 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -163,21 +163,21 @@
 
     if (select) {
         if (index >= mMediaItems.size()) {
-            ALOGE("track %d does not exist", index);
+            ALOGE("track %zu does not exist", index);
             return INVALID_OPERATION;
         }
         if (mSelectedIndex == (ssize_t)index) {
-            ALOGE("track %d already selected", index);
+            ALOGE("track %zu already selected", index);
             return BAD_VALUE;
         }
-        ALOGV("selected track %d", index);
+        ALOGV("selected track %zu", index);
         mSelectedIndex = index;
     } else {
         if (mSelectedIndex != (ssize_t)index) {
-            ALOGE("track %d is not selected", index);
+            ALOGE("track %zu is not selected", index);
             return BAD_VALUE;
         }
-        ALOGV("unselected track %d", index);
+        ALOGV("unselected track %zu", index);
         mSelectedIndex = -1;
     }
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 9d7cb99..5011bc1 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -40,6 +40,7 @@
 #include <media/stagefright/Utils.h>
 
 #include <ctype.h>
+#include <inttypes.h>
 #include <openssl/aes.h>
 #include <openssl/md5.h>
 
@@ -48,6 +49,7 @@
 // static
 const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
 const int64_t PlaylistFetcher::kMaxMonitorDelayUs = 3000000ll;
+const int32_t PlaylistFetcher::kDownloadBlockSize = 192;
 const int32_t PlaylistFetcher::kNumSkipFrames = 10;
 
 PlaylistFetcher::PlaylistFetcher(
@@ -216,9 +218,9 @@
     if (index >= 0) {
         key = mAESKeyForURI.valueAt(index);
     } else {
-        status_t err = mSession->fetchFile(keyURI.c_str(), &key);
+        ssize_t err = mSession->fetchFile(keyURI.c_str(), &key);
 
-        if (err != OK) {
+        if (err < 0) {
             ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
             return ERROR_IO;
         } else if (key->size() != 16) {
@@ -315,7 +317,7 @@
         maxDelayUs = minDelayUs;
     }
     if (delayUs > maxDelayUs) {
-        ALOGV("Need to refresh playlist in %lld", maxDelayUs);
+        ALOGV("Need to refresh playlist in %" PRId64 , maxDelayUs);
         delayUs = maxDelayUs;
     }
     sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
@@ -626,7 +628,7 @@
 
             int64_t bufferedStreamDurationUs =
                 mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult);
-            ALOGV("buffered %lld for stream %d",
+            ALOGV("buffered %" PRId64 " for stream %d",
                     bufferedStreamDurationUs, mPacketSources.keyAt(i));
             if (bufferedStreamDurationUs > bufferedDurationUs) {
                 bufferedDurationUs = bufferedStreamDurationUs;
@@ -639,7 +641,7 @@
     if (!mPrepared && bufferedDurationUs > targetDurationUs && downloadMore) {
         mPrepared = true;
 
-        ALOGV("prepared, buffered=%lld > %lld",
+        ALOGV("prepared, buffered=%" PRId64 " > %" PRId64 "",
                 bufferedDurationUs, targetDurationUs);
         sp<AMessage> msg = mNotify->dup();
         msg->setInt32("what", kWhatTemporarilyDoneFetching);
@@ -647,7 +649,7 @@
     }
 
     if (finalResult == OK && downloadMore) {
-        ALOGV("monitoring, buffered=%lld < %lld",
+        ALOGV("monitoring, buffered=%" PRId64 " < %" PRId64 "",
                 bufferedDurationUs, durationToBufferUs);
         // delay the next download slightly; hopefully this gives other concurrent fetchers
         // a better chance to run.
@@ -663,7 +665,7 @@
         msg->post();
 
         int64_t delayUs = mPrepared ? kMaxMonitorDelayUs : targetDurationUs / 2;
-        ALOGV("pausing for %lld, buffered=%lld > %lld",
+        ALOGV("pausing for %" PRId64 ", buffered=%" PRId64 " > %" PRId64 "",
                 delayUs, bufferedDurationUs, durationToBufferUs);
         // :TRICKY: need to enforce minimum delay because the delay to
         // refresh the playlist will become 0
@@ -704,6 +706,11 @@
     return OK;
 }
 
+// static
+bool PlaylistFetcher::bufferStartsWithTsSyncByte(const sp<ABuffer>& buffer) {
+    return buffer->size() > 0 && buffer->data()[0] == 0x47;
+}
+
 void PlaylistFetcher::onDownloadNext() {
     if (refreshPlaylist() != OK) {
         return;
@@ -732,7 +739,7 @@
 
         if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
             mSeqNumber = getSeqNumberForTime(mStartTimeUs);
-            ALOGV("Initial sequence number for time %lld is %d from (%d .. %d)",
+            ALOGV("Initial sequence number for time %" PRId64 " is %d from (%d .. %d)",
                     mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         } else {
@@ -766,7 +773,7 @@
                     delayUs = kMaxMonitorDelayUs;
                 }
                 ALOGV("sequence number high: %d from (%d .. %d), "
-                      "monitor in %lld (retry=%d)",
+                      "monitor in %" PRId64 " (retry=%d)",
                         mSeqNumber, firstSeqNumberInPlaylist,
                         lastSeqNumberInPlaylist, delayUs, mNumRetries);
                 postMonitorQueue(delayUs);
@@ -791,7 +798,7 @@
             ALOGE("Cannot find sequence number %d in playlist "
                  "(contains %d - %d)",
                  mSeqNumber, firstSeqNumberInPlaylist,
-                 firstSeqNumberInPlaylist + mPlaylist->size() - 1);
+                  firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1);
 
             notifyError(ERROR_END_OF_STREAM);
             return;
@@ -824,64 +831,159 @@
 
     ALOGV("fetching '%s'", uri.c_str());
 
-    sp<ABuffer> buffer;
-    status_t err = mSession->fetchFile(
-            uri.c_str(), &buffer, range_offset, range_length);
-
-    if (err != OK) {
-        ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
-        notifyError(err);
-        return;
-    }
-
-    CHECK(buffer != NULL);
-
-    err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, buffer);
-    if (err == OK) {
-        err = checkDecryptPadding(buffer);
-    }
-
-    if (err != OK) {
-        ALOGE("decryptBuffer failed w/ error %d", err);
-
-        notifyError(err);
-        return;
-    }
-
-    if (mStartup || seekDiscontinuity || explicitDiscontinuity) {
-        // Signal discontinuity.
-
-        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-            // If this was a live event this made no sense since
-            // we don't have access to all the segment before the current
-            // one.
-            mNextPTSTimeUs = getSegmentStartTimeUs(mSeqNumber);
-        }
-
-        if (seekDiscontinuity || explicitDiscontinuity) {
-            ALOGI("queueing discontinuity (seek=%d, explicit=%d)",
-                 seekDiscontinuity, explicitDiscontinuity);
-
-            queueDiscontinuity(
-                    explicitDiscontinuity
-                        ? ATSParser::DISCONTINUITY_FORMATCHANGE
-                        : ATSParser::DISCONTINUITY_SEEK,
-                    NULL /* extra */);
+    sp<DataSource> source;
+    sp<ABuffer> buffer, tsBuffer;
+    // decrypt a junk buffer to prefetch key; since a session uses only one http connection,
+    // this avoids interleaved connections to the key and segment file.
+    {
+        sp<ABuffer> junk = new ABuffer(16);
+        junk->setRange(0, 16);
+        status_t err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, junk,
+                true /* first */);
+        if (err != OK) {
+            notifyError(err);
+            return;
         }
     }
 
-    err = extractAndQueueAccessUnits(buffer, itemMeta);
+    // block-wise download
+    ssize_t bytesRead;
+    do {
+        bytesRead = mSession->fetchFile(
+                uri.c_str(), &buffer, range_offset, range_length, kDownloadBlockSize, &source);
 
-    if (err == -EAGAIN) {
-        // bad starting sequence number hint
-        postMonitorQueue();
+        if (bytesRead < 0) {
+            status_t err = bytesRead;
+            ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
+            notifyError(err);
+            return;
+        }
+
+        CHECK(buffer != NULL);
+
+        size_t size = buffer->size();
+        // Set decryption range.
+        buffer->setRange(size - bytesRead, bytesRead);
+        status_t err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, buffer,
+                buffer->offset() == 0 /* first */);
+        // Unset decryption range.
+        buffer->setRange(0, size);
+
+        if (err != OK) {
+            ALOGE("decryptBuffer failed w/ error %d", err);
+
+            notifyError(err);
+            return;
+        }
+
+        if (mStartup || seekDiscontinuity || explicitDiscontinuity) {
+            // Signal discontinuity.
+
+            if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+                // If this was a live event this made no sense since
+                // we don't have access to all the segment before the current
+                // one.
+                mNextPTSTimeUs = getSegmentStartTimeUs(mSeqNumber);
+            }
+
+            if (seekDiscontinuity || explicitDiscontinuity) {
+                ALOGI("queueing discontinuity (seek=%d, explicit=%d)",
+                     seekDiscontinuity, explicitDiscontinuity);
+
+                queueDiscontinuity(
+                        explicitDiscontinuity
+                            ? ATSParser::DISCONTINUITY_FORMATCHANGE
+                            : ATSParser::DISCONTINUITY_SEEK,
+                        NULL /* extra */);
+            }
+        }
+
+        err = OK;
+        if (bufferStartsWithTsSyncByte(buffer)) {
+            // Incremental extraction is only supported for MPEG2 transport streams.
+            if (tsBuffer == NULL) {
+                tsBuffer = new ABuffer(buffer->data(), buffer->capacity());
+                tsBuffer->setRange(0, 0);
+            } else if (tsBuffer->capacity() != buffer->capacity()) {
+                size_t tsOff = tsBuffer->offset(), tsSize = tsBuffer->size();
+                tsBuffer = new ABuffer(buffer->data(), buffer->capacity());
+                tsBuffer->setRange(tsOff, tsSize);
+            }
+            tsBuffer->setRange(tsBuffer->offset(), tsBuffer->size() + bytesRead);
+
+            err = extractAndQueueAccessUnitsFromTs(tsBuffer);
+        }
+
+        if (err == -EAGAIN) {
+            // bad starting sequence number hint
+            postMonitorQueue();
+            return;
+        }
+
+        if (err == ERROR_OUT_OF_RANGE) {
+            // reached stopping point
+            stopAsync(/* selfTriggered = */ true);
+            return;
+        }
+
+        if (err != OK) {
+            notifyError(err);
+            return;
+        }
+
+        mStartup = false;
+    } while (bytesRead != 0);
+
+    if (bufferStartsWithTsSyncByte(buffer)) {
+        // If we still don't see a stream after fetching a full ts segment mark it as
+        // nonexistent.
+        const size_t kNumTypes = ATSParser::NUM_SOURCE_TYPES;
+        ATSParser::SourceType srcTypes[kNumTypes] =
+                { ATSParser::VIDEO, ATSParser::AUDIO };
+        LiveSession::StreamType streamTypes[kNumTypes] =
+                { LiveSession::STREAMTYPE_VIDEO, LiveSession::STREAMTYPE_AUDIO };
+
+        for (size_t i = 0; i < kNumTypes; i++) {
+            ATSParser::SourceType srcType = srcTypes[i];
+            LiveSession::StreamType streamType = streamTypes[i];
+
+            sp<AnotherPacketSource> source =
+                static_cast<AnotherPacketSource *>(
+                    mTSParser->getSource(srcType).get());
+
+            if (source == NULL) {
+                ALOGW("MPEG2 Transport stream does not contain %s data.",
+                      srcType == ATSParser::VIDEO ? "video" : "audio");
+
+                mStreamTypeMask &= ~streamType;
+                mPacketSources.removeItem(streamType);
+            }
+        }
+
+    }
+
+    if (checkDecryptPadding(buffer) != OK) {
+        ALOGE("Incorrect padding bytes after decryption.");
+        notifyError(ERROR_MALFORMED);
         return;
     }
 
-    if (err == ERROR_OUT_OF_RANGE) {
-        // reached stopping point
-        stopAsync(/* selfTriggered = */ true);
-        return;
+    status_t err = OK;
+    if (tsBuffer != NULL) {
+        AString method;
+        CHECK(buffer->meta()->findString("cipher-method", &method));
+        if ((tsBuffer->size() > 0 && method == "NONE")
+                || tsBuffer->size() > 16) {
+            ALOGE("MPEG2 transport stream is not an even multiple of 188 "
+                    "bytes in length.");
+            notifyError(ERROR_MALFORMED);
+            return;
+        }
+    }
+
+    // bulk extract non-ts files
+    if (tsBuffer == NULL) {
+      err = extractAndQueueAccessUnits(buffer, itemMeta);
     }
 
     if (err != OK) {
@@ -892,8 +994,6 @@
     ++mSeqNumber;
 
     postMonitorQueue();
-
-    mStartup = false;
 }
 
 int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
@@ -928,173 +1028,163 @@
     return firstSeqNumberInPlaylist + index;
 }
 
-status_t PlaylistFetcher::extractAndQueueAccessUnits(
-        const sp<ABuffer> &buffer, const sp<AMessage> &itemMeta) {
-    if (buffer->size() > 0 && buffer->data()[0] == 0x47) {
-        // Let's assume this is an MPEG2 transport stream.
+status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer) {
+    if (mTSParser == NULL) {
+        // Use TS_TIMESTAMPS_ARE_ABSOLUTE so pts carry over between fetchers.
+        mTSParser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
+    }
 
-        if ((buffer->size() % 188) != 0) {
-            ALOGE("MPEG2 transport stream is not an even multiple of 188 "
-                  "bytes in length.");
-            return ERROR_MALFORMED;
-        }
+    if (mNextPTSTimeUs >= 0ll) {
+        sp<AMessage> extra = new AMessage;
+        // Since we are using absolute timestamps, signal an offset of 0 to prevent
+        // ATSParser from skewing the timestamps of access units.
+        extra->setInt64(IStreamListener::kKeyMediaTimeUs, 0);
 
-        if (mTSParser == NULL) {
-            // Use TS_TIMESTAMPS_ARE_ABSOLUTE so pts carry over between fetchers.
-            mTSParser = new ATSParser(ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
-        }
+        mTSParser->signalDiscontinuity(
+                ATSParser::DISCONTINUITY_SEEK, extra);
 
-        if (mNextPTSTimeUs >= 0ll) {
-            sp<AMessage> extra = new AMessage;
-            // Since we are using absolute timestamps, signal an offset of 0 to prevent
-            // ATSParser from skewing the timestamps of access units.
-            extra->setInt64(IStreamListener::kKeyMediaTimeUs, 0);
+        mNextPTSTimeUs = -1ll;
+    }
 
-            mTSParser->signalDiscontinuity(
-                    ATSParser::DISCONTINUITY_SEEK, extra);
-
-            mNextPTSTimeUs = -1ll;
-        }
-
-        size_t offset = 0;
-        while (offset < buffer->size()) {
-            status_t err = mTSParser->feedTSPacket(buffer->data() + offset, 188);
-
-            if (err != OK) {
-                return err;
-            }
-
-            offset += 188;
-        }
-
-        status_t err = OK;
-        for (size_t i = mPacketSources.size(); i-- > 0;) {
-            sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
-
-            const char *key;
-            ATSParser::SourceType type;
-            const LiveSession::StreamType stream = mPacketSources.keyAt(i);
-            switch (stream) {
-
-                case LiveSession::STREAMTYPE_VIDEO:
-                    type = ATSParser::VIDEO;
-                    key = "timeUsVideo";
-                    break;
-
-                case LiveSession::STREAMTYPE_AUDIO:
-                    type = ATSParser::AUDIO;
-                    key = "timeUsAudio";
-                    break;
-
-                case LiveSession::STREAMTYPE_SUBTITLES:
-                {
-                    ALOGE("MPEG2 Transport streams do not contain subtitles.");
-                    return ERROR_MALFORMED;
-                    break;
-                }
-
-                default:
-                    TRESPASS();
-            }
-
-            sp<AnotherPacketSource> source =
-                static_cast<AnotherPacketSource *>(
-                        mTSParser->getSource(type).get());
-
-            if (source == NULL) {
-                ALOGW("MPEG2 Transport stream does not contain %s data.",
-                      type == ATSParser::VIDEO ? "video" : "audio");
-
-                mStreamTypeMask &= ~mPacketSources.keyAt(i);
-                mPacketSources.removeItemsAt(i);
-                continue;
-            }
-
-            int64_t timeUs;
-            sp<ABuffer> accessUnit;
-            status_t finalResult;
-            while (source->hasBufferAvailable(&finalResult)
-                    && source->dequeueAccessUnit(&accessUnit) == OK) {
-
-                CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-                if (mMinStartTimeUs > 0) {
-                    if (timeUs < mMinStartTimeUs) {
-                        // TODO untested path
-                        // try a later ts
-                        int32_t targetDuration;
-                        mPlaylist->meta()->findInt32("target-duration", &targetDuration);
-                        int32_t incr = (mMinStartTimeUs - timeUs) / 1000000 / targetDuration;
-                        if (incr == 0) {
-                            // increment mSeqNumber by at least one
-                            incr = 1;
-                        }
-                        mSeqNumber += incr;
-                        err = -EAGAIN;
-                        break;
-                    } else {
-                        int64_t startTimeUs;
-                        if (mStartTimeUsNotify != NULL
-                                && !mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
-                            mStartTimeUsNotify->setInt64(key, timeUs);
-
-                            uint32_t streamMask = 0;
-                            mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
-                            streamMask |= mPacketSources.keyAt(i);
-                            mStartTimeUsNotify->setInt32("streamMask", streamMask);
-
-                            if (streamMask == mStreamTypeMask) {
-                                mStartTimeUsNotify->post();
-                                mStartTimeUsNotify.clear();
-                            }
-                        }
-                    }
-                }
-
-                if (mStopParams != NULL) {
-                    // Queue discontinuity in original stream.
-                    int64_t stopTimeUs;
-                    if (!mStopParams->findInt64(key, &stopTimeUs) || timeUs >= stopTimeUs) {
-                        packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
-                        mStreamTypeMask &= ~stream;
-                        mPacketSources.removeItemsAt(i);
-                        break;
-                    }
-                }
-
-                // Note that we do NOT dequeue any discontinuities except for format change.
-
-                // for simplicity, store a reference to the format in each unit
-                sp<MetaData> format = source->getFormat();
-                if (format != NULL) {
-                    accessUnit->meta()->setObject("format", format);
-                }
-
-                // Stash the sequence number so we can hint future fetchers where to start at.
-                accessUnit->meta()->setInt32("seq", mSeqNumber);
-                packetSource->queueAccessUnit(accessUnit);
-            }
-
-            if (err != OK) {
-                break;
-            }
-        }
+    size_t offset = 0;
+    while (offset + 188 <= buffer->size()) {
+        status_t err = mTSParser->feedTSPacket(buffer->data() + offset, 188);
 
         if (err != OK) {
-            for (size_t i = mPacketSources.size(); i-- > 0;) {
-                sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
-                packetSource->clear();
-            }
             return err;
         }
 
-        if (!mStreamTypeMask) {
-            // Signal gap is filled between original and new stream.
-            ALOGV("ERROR OUT OF RANGE");
-            return ERROR_OUT_OF_RANGE;
+        offset += 188;
+    }
+    // setRange to indicate consumed bytes.
+    buffer->setRange(buffer->offset() + offset, buffer->size() - offset);
+
+    status_t err = OK;
+    for (size_t i = mPacketSources.size(); i-- > 0;) {
+        sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
+
+        const char *key;
+        ATSParser::SourceType type;
+        const LiveSession::StreamType stream = mPacketSources.keyAt(i);
+        switch (stream) {
+            case LiveSession::STREAMTYPE_VIDEO:
+                type = ATSParser::VIDEO;
+                key = "timeUsVideo";
+                break;
+
+            case LiveSession::STREAMTYPE_AUDIO:
+                type = ATSParser::AUDIO;
+                key = "timeUsAudio";
+                break;
+
+            case LiveSession::STREAMTYPE_SUBTITLES:
+            {
+                ALOGE("MPEG2 Transport streams do not contain subtitles.");
+                return ERROR_MALFORMED;
+                break;
+            }
+
+            default:
+                TRESPASS();
         }
 
-        return OK;
-    } else if (buffer->size() >= 7 && !memcmp("WEBVTT\n", buffer->data(), 7)) {
+        sp<AnotherPacketSource> source =
+            static_cast<AnotherPacketSource *>(
+                    mTSParser->getSource(type).get());
+
+        if (source == NULL) {
+            continue;
+        }
+
+        int64_t timeUs;
+        sp<ABuffer> accessUnit;
+        status_t finalResult;
+        while (source->hasBufferAvailable(&finalResult)
+                && source->dequeueAccessUnit(&accessUnit) == OK) {
+
+            CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+            if (mMinStartTimeUs > 0) {
+                if (timeUs < mMinStartTimeUs) {
+                    // TODO untested path
+                    // try a later ts
+                    int32_t targetDuration;
+                    mPlaylist->meta()->findInt32("target-duration", &targetDuration);
+                    int32_t incr = (mMinStartTimeUs - timeUs) / 1000000 / targetDuration;
+                    if (incr == 0) {
+                        // increment mSeqNumber by at least one
+                        incr = 1;
+                    }
+                    mSeqNumber += incr;
+                    err = -EAGAIN;
+                    break;
+                } else {
+                    int64_t startTimeUs;
+                    if (mStartTimeUsNotify != NULL
+                            && !mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
+                        mStartTimeUsNotify->setInt64(key, timeUs);
+
+                        uint32_t streamMask = 0;
+                        mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
+                        streamMask |= mPacketSources.keyAt(i);
+                        mStartTimeUsNotify->setInt32("streamMask", streamMask);
+
+                        if (streamMask == mStreamTypeMask) {
+                            mStartTimeUsNotify->post();
+                            mStartTimeUsNotify.clear();
+                        }
+                    }
+                }
+            }
+
+            if (mStopParams != NULL) {
+                // Queue discontinuity in original stream.
+                int64_t stopTimeUs;
+                if (!mStopParams->findInt64(key, &stopTimeUs) || timeUs >= stopTimeUs) {
+                    packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
+                    mStreamTypeMask &= ~stream;
+                    mPacketSources.removeItemsAt(i);
+                    break;
+                }
+            }
+
+            // Note that we do NOT dequeue any discontinuities except for format change.
+
+            // for simplicity, store a reference to the format in each unit
+            sp<MetaData> format = source->getFormat();
+            if (format != NULL) {
+                accessUnit->meta()->setObject("format", format);
+            }
+
+            // Stash the sequence number so we can hint future playlist where to start at.
+            accessUnit->meta()->setInt32("seq", mSeqNumber);
+            packetSource->queueAccessUnit(accessUnit);
+        }
+
+        if (err != OK) {
+            break;
+        }
+    }
+
+    if (err != OK) {
+        for (size_t i = mPacketSources.size(); i-- > 0;) {
+            sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
+            packetSource->clear();
+        }
+        return err;
+    }
+
+    if (!mStreamTypeMask) {
+        // Signal gap is filled between original and new stream.
+        ALOGV("ERROR OUT OF RANGE");
+        return ERROR_OUT_OF_RANGE;
+    }
+
+    return OK;
+}
+
+status_t PlaylistFetcher::extractAndQueueAccessUnits(
+        const sp<ABuffer> &buffer, const sp<AMessage> &itemMeta) {
+    if (buffer->size() >= 7 && !memcmp("WEBVTT\n", buffer->data(), 7)) {
         if (mStreamTypeMask != LiveSession::STREAMTYPE_SUBTITLES) {
             ALOGE("This stream only contains subtitles.");
             return ERROR_MALFORMED;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 8404b8d..7e21523 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -87,8 +87,11 @@
 
     static const int64_t kMinBufferedDurationUs;
     static const int64_t kMaxMonitorDelayUs;
+    static const int32_t kDownloadBlockSize;
     static const int32_t kNumSkipFrames;
 
+    static bool bufferStartsWithTsSyncByte(const sp<ABuffer>& buffer);
+
     // notifications to mSession
     sp<AMessage> mNotify;
     sp<AMessage> mStartTimeUsNotify;
@@ -169,6 +172,8 @@
     // Resume a fetcher to continue until the stopping point stored in msg.
     status_t onResumeUntil(const sp<AMessage> &msg);
 
+    status_t extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer);
+
     status_t extractAndQueueAccessUnits(
             const sp<ABuffer> &buffer, const sp<AMessage> &itemMeta);
 
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index f0f203c..7f221a0 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -41,9 +41,9 @@
     }
 
     virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
-        off64_t available = (offset >= mSize) ? 0ll : mSize - offset;
+        off64_t available = (offset >= (off64_t)mSize) ? 0ll : mSize - offset;
 
-        size_t copy = (available > size) ? size : available;
+        size_t copy = (available > (off64_t)size) ? size : available;
         memcpy(data, mData + offset, copy);
 
         return copy;
@@ -172,7 +172,7 @@
     }
 
     if (size > kMaxMetadataSize) {
-        ALOGE("skipping huge ID3 metadata of size %d", size);
+        ALOGE("skipping huge ID3 metadata of size %zu", size);
         return false;
     }
 
@@ -633,8 +633,8 @@
             mFrameSize += 6;
 
             if (mOffset + mFrameSize > mParent.mSize) {
-                ALOGV("partial frame at offset %d (size = %d, bytes-remaining = %d)",
-                     mOffset, mFrameSize, mParent.mSize - mOffset - 6);
+                ALOGV("partial frame at offset %zu (size = %zu, bytes-remaining = %zu)",
+                    mOffset, mFrameSize, mParent.mSize - mOffset - (size_t)6);
                 return;
             }
 
@@ -674,8 +674,8 @@
             mFrameSize = 10 + baseSize;
 
             if (mOffset + mFrameSize > mParent.mSize) {
-                ALOGV("partial frame at offset %d (size = %d, bytes-remaining = %d)",
-                     mOffset, mFrameSize, mParent.mSize - mOffset - 10);
+                ALOGV("partial frame at offset %zu (size = %zu, bytes-remaining = %zu)",
+                    mOffset, mFrameSize, mParent.mSize - mOffset - (size_t)10);
                 return;
             }
 
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index 6ec9263..d4a7c7f 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -33,6 +33,8 @@
 #include <media/stagefright/Utils.h>
 #include <utils/String8.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 struct DataSourceReader : public mkvparser::IMkvReader {
@@ -103,7 +105,7 @@
 
 private:
     MatroskaExtractor *mExtractor;
-    unsigned long mTrackNum;
+    long long mTrackNum;
 
     const mkvparser::Cluster *mCluster;
     const mkvparser::BlockEntry *mBlockEntry;
@@ -183,7 +185,7 @@
         CHECK_GE(avccSize, 5u);
 
         mNALSizeLen = 1 + (avcc[4] & 3);
-        ALOGV("mNALSizeLen = %d", mNALSizeLen);
+        ALOGV("mNALSizeLen = %zu", mNALSizeLen);
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
         mType = AAC;
     }
@@ -320,7 +322,7 @@
     // Special case the 0 seek to avoid loading Cues when the application
     // extraneously seeks to 0 before playing.
     if (seekTimeNs <= 0) {
-        ALOGV("Seek to beginning: %lld", seekTimeUs);
+        ALOGV("Seek to beginning: %" PRId64, seekTimeUs);
         mCluster = pSegment->GetFirst();
         mBlockEntryIndex = 0;
         do {
@@ -329,7 +331,7 @@
         return;
     }
 
-    ALOGV("Seeking to: %lld", seekTimeUs);
+    ALOGV("Seeking to: %" PRId64, seekTimeUs);
 
     // If the Cues have not been located then find them.
     const mkvparser::Cues* pCues = pSegment->GetCues();
@@ -378,7 +380,7 @@
     for (size_t index = 0; index < pTracks->GetTracksCount(); ++index) {
         pTrack = pTracks->GetTrackByIndex(index);
         if (pTrack && pTrack->GetType() == 1) { // VIDEO_TRACK
-            ALOGV("Video track located at %d", index);
+            ALOGV("Video track located at %zu", index);
             break;
         }
     }
@@ -409,7 +411,7 @@
         if (isAudio || block()->IsKey()) {
             // Accept the first key frame
             *actualFrameTimeUs = (block()->GetTime(mCluster) + 500LL) / 1000LL;
-            ALOGV("Requested seek point: %lld actual: %lld",
+            ALOGV("Requested seek point: %" PRId64 " actual: %" PRId64,
                   seekTimeUs, *actualFrameTimeUs);
             break;
         }
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index d039f7d..d1afd8b 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -36,6 +36,8 @@
 #include <media/IStreamSource.h>
 #include <utils/KeyedVector.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 // I want the expression "y" evaluated even if verbose logging is off.
@@ -586,7 +588,7 @@
         // Increment in multiples of 64K.
         neededSize = (neededSize + 65535) & ~65535;
 
-        ALOGI("resizing buffer to %d bytes", neededSize);
+        ALOGI("resizing buffer to %zu bytes", neededSize);
 
         sp<ABuffer> newBuffer = new ABuffer(neededSize);
         memcpy(newBuffer->data(), mBuffer->data(), mBuffer->size());
@@ -748,7 +750,7 @@
             PTS |= br->getBits(15);
             CHECK_EQ(br->getBits(1), 1u);
 
-            ALOGV("PTS = 0x%016llx (%.2f)", PTS, PTS / 90000.0);
+            ALOGV("PTS = 0x%016" PRIx64 " (%.2f)", PTS, PTS / 90000.0);
 
             optional_bytes_remaining -= 5;
 
@@ -764,7 +766,7 @@
                 DTS |= br->getBits(15);
                 CHECK_EQ(br->getBits(1), 1u);
 
-                ALOGV("DTS = %llu", DTS);
+                ALOGV("DTS = %" PRIu64, DTS);
 
                 optional_bytes_remaining -= 5;
             }
@@ -782,7 +784,7 @@
             ESCR |= br->getBits(15);
             CHECK_EQ(br->getBits(1), 1u);
 
-            ALOGV("ESCR = %llu", ESCR);
+            ALOGV("ESCR = %" PRIu64, ESCR);
             MY_LOGV("ESCR_extension = %u", br->getBits(9));
 
             CHECK_EQ(br->getBits(1), 1u);
@@ -812,7 +814,7 @@
 
             if (br->numBitsLeft() < dataLength * 8) {
                 ALOGE("PES packet does not carry enough data to contain "
-                     "payload. (numBitsLeft = %d, required = %d)",
+                     "payload. (numBitsLeft = %zu, required = %u)",
                      br->numBitsLeft(), dataLength * 8);
 
                 return ERROR_MALFORMED;
@@ -832,7 +834,7 @@
             size_t payloadSizeBits = br->numBitsLeft();
             CHECK_EQ(payloadSizeBits % 8, 0u);
 
-            ALOGV("There's %d bytes of payload.", payloadSizeBits / 8);
+            ALOGV("There's %zu bytes of payload.", payloadSizeBits / 8);
         }
     } else if (stream_id == 0xbe) {  // padding_stream
         CHECK_NE(PES_packet_length, 0u);
@@ -850,7 +852,7 @@
         return OK;
     }
 
-    ALOGV("flushing stream 0x%04x size = %d", mElementaryPID, mBuffer->size());
+    ALOGV("flushing stream 0x%04x size = %zu", mElementaryPID, mBuffer->size());
 
     ABitReader br(mBuffer->data(), mBuffer->size());
 
@@ -1172,7 +1174,7 @@
 
             uint64_t PCR = PCR_base * 300 + PCR_ext;
 
-            ALOGV("PID 0x%04x: PCR = 0x%016llx (%.2f)",
+            ALOGV("PID 0x%04x: PCR = 0x%016" PRIx64 " (%.2f)",
                   PID, PCR, PCR / 27E6);
 
             // The number of bytes received by this parser up to and
@@ -1268,7 +1270,7 @@
 
 void ATSParser::updatePCR(
         unsigned /* PID */, uint64_t PCR, size_t byteOffsetFromStart) {
-    ALOGV("PCR 0x%016llx @ %d", PCR, byteOffsetFromStart);
+    ALOGV("PCR 0x%016" PRIx64 " @ %zu", PCR, byteOffsetFromStart);
 
     if (mNumPCRs == 2) {
         mPCR[0] = mPCR[1];
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index d4e30b4..86b025f 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -71,8 +71,9 @@
     void signalEOS(status_t finalResult);
 
     enum SourceType {
-        VIDEO,
-        AUDIO
+        VIDEO = 0,
+        AUDIO = 1,
+        NUM_SOURCE_TYPES = 2
     };
     sp<MediaSource> getSource(SourceType type);
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 6dfaa94..021b640 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -26,6 +26,8 @@
 #include <media/stagefright/MetaData.h>
 #include <utils/Vector.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 const int64_t kNearEOSMarkUs = 2000000ll; // 2 secs
@@ -186,7 +188,7 @@
     int64_t lastQueuedTimeUs;
     CHECK(buffer->meta()->findInt64("timeUs", &lastQueuedTimeUs));
     mLastQueuedTimeUs = lastQueuedTimeUs;
-    ALOGV("queueAccessUnit timeUs=%lld us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
+    ALOGV("queueAccessUnit timeUs=%" PRIi64 " us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
 
     Mutex::Autolock autoLock(mLock);
     mBuffers.push_back(buffer);
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index c0c9717..f7abf01 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -31,6 +31,7 @@
 
 #include "include/avc_utils.h"
 
+#include <inttypes.h>
 #include <netinet/in.h>
 
 namespace android {
@@ -264,7 +265,7 @@
 
                 if (startOffset > 0) {
                     ALOGI("found something resembling an H.264/MPEG syncword "
-                          "at offset %d",
+                          "at offset %zd",
                           startOffset);
                 }
 
@@ -297,7 +298,7 @@
 
                 if (startOffset > 0) {
                     ALOGI("found something resembling an H.264/MPEG syncword "
-                          "at offset %d",
+                          "at offset %zd",
                           startOffset);
                 }
 
@@ -330,7 +331,7 @@
 
                 if (startOffset > 0) {
                     ALOGI("found something resembling an AAC syncword at "
-                          "offset %d",
+                          "offset %zd",
                           startOffset);
                 }
 
@@ -358,7 +359,7 @@
 
                 if (startOffset > 0) {
                     ALOGI("found something resembling an AC3 syncword at "
-                          "offset %d",
+                          "offset %zd",
                           startOffset);
                 }
 
@@ -385,7 +386,7 @@
 
                 if (startOffset > 0) {
                     ALOGI("found something resembling an MPEG audio "
-                          "syncword at offset %d",
+                          "syncword at offset %zd",
                           startOffset);
                 }
 
@@ -409,7 +410,7 @@
     if (mBuffer == NULL || neededSize > mBuffer->capacity()) {
         neededSize = (neededSize + 65535) & ~65535;
 
-        ALOGV("resizing buffer to size %d", neededSize);
+        ALOGV("resizing buffer to size %zu", neededSize);
 
         sp<ABuffer> buffer = new ABuffer(neededSize);
         if (mBuffer != NULL) {
@@ -432,7 +433,7 @@
 
 #if 0
     if (mMode == AAC) {
-        ALOGI("size = %d, timeUs = %.2f secs", size, timeUs / 1E6);
+        ALOGI("size = %zu, timeUs = %.2f secs", size, timeUs / 1E6);
         hexdump(data, size);
     }
 #endif
@@ -1027,7 +1028,7 @@
 
                 accessUnit->meta()->setInt64("timeUs", timeUs);
 
-                ALOGV("returning MPEG video access unit at time %lld us",
+                ALOGV("returning MPEG video access unit at time %" PRId64 " us",
                       timeUs);
 
                 // hexdump(accessUnit->data(), accessUnit->size());
@@ -1186,7 +1187,7 @@
 
                     accessUnit->meta()->setInt64("timeUs", timeUs);
 
-                    ALOGV("returning MPEG4 video access unit at time %lld us",
+                    ALOGV("returning MPEG4 video access unit at time %" PRId64 " us",
                          timeUs);
 
                     // hexdump(accessUnit->data(), accessUnit->size());
diff --git a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
index bc2a16d..85859f7 100644
--- a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
@@ -36,6 +36,8 @@
 #include <media/stagefright/Utils.h>
 #include <utils/String8.h>
 
+#include <inttypes.h>
+
 namespace android {
 
 struct MPEG2PSExtractor::Track : public MediaSource {
@@ -409,7 +411,7 @@
             PTS |= br.getBits(15);
             CHECK_EQ(br.getBits(1), 1u);
 
-            ALOGV("PTS = %llu", PTS);
+            ALOGV("PTS = %" PRIu64, PTS);
             // ALOGI("PTS = %.2f secs", PTS / 90000.0f);
 
             optional_bytes_remaining -= 5;
@@ -426,7 +428,7 @@
                 DTS |= br.getBits(15);
                 CHECK_EQ(br.getBits(1), 1u);
 
-                ALOGV("DTS = %llu", DTS);
+                ALOGV("DTS = %" PRIu64, DTS);
 
                 optional_bytes_remaining -= 5;
             }
@@ -444,7 +446,7 @@
             ESCR |= br.getBits(15);
             CHECK_EQ(br.getBits(1), 1u);
 
-            ALOGV("ESCR = %llu", ESCR);
+            ALOGV("ESCR = %" PRIu64, ESCR);
             /* unsigned ESCR_extension = */br.getBits(9);
 
             CHECK_EQ(br.getBits(1), 1u);
@@ -473,7 +475,7 @@
 
         if (br.numBitsLeft() < dataLength * 8) {
             ALOGE("PES packet does not carry enough data to contain "
-                 "payload. (numBitsLeft = %d, required = %d)",
+                 "payload. (numBitsLeft = %zu, required = %u)",
                  br.numBitsLeft(), dataLength * 8);
 
             return ERROR_MALFORMED;
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index 8b79af4..447b29e 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -17,4 +17,6 @@
 
 LOCAL_MODULE_TAGS := tests
 
+LOCAL_32_BIT_ONLY := true
+
 include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index a6825eb..4bc67e8 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -124,7 +124,7 @@
 }
 
 void AAVCAssembler::addSingleNALUnit(const sp<ABuffer> &buffer) {
-    ALOGV("addSingleNALUnit of size %d", buffer->size());
+    ALOGV("addSingleNALUnit of size %zu", buffer->size());
 #if !LOG_NDEBUG
     hexdump(buffer->data(), buffer->size());
 #endif
@@ -191,7 +191,7 @@
     CHECK((indicator & 0x1f) == 28);
 
     if (size < 2) {
-        ALOGV("Ignoring malformed FU buffer (size = %d)", size);
+        ALOGV("Ignoring malformed FU buffer (size = %zu)", size);
 
         queue->erase(queue->begin());
         ++mNextExpectedSeqNo;
@@ -225,7 +225,7 @@
     } else {
         List<sp<ABuffer> >::iterator it = ++queue->begin();
         while (it != queue->end()) {
-            ALOGV("sequence length %d", totalCount);
+            ALOGV("sequence length %zu", totalCount);
 
             const sp<ABuffer> &buffer = *it;
 
@@ -294,7 +294,7 @@
     for (size_t i = 0; i < totalCount; ++i) {
         const sp<ABuffer> &buffer = *it;
 
-        ALOGV("piece #%d/%d", i + 1, totalCount);
+        ALOGV("piece #%zu/%zu", i + 1, totalCount);
 #if !LOG_NDEBUG
         hexdump(buffer->data(), buffer->size());
 #endif
@@ -317,7 +317,7 @@
 void AAVCAssembler::submitAccessUnit() {
     CHECK(!mNALUnits.empty());
 
-    ALOGV("Access unit complete (%d nal units)", mNALUnits.size());
+    ALOGV("Access unit complete (%zu nal units)", mNALUnits.size());
 
     size_t totalSize = 0;
     for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index eefceba..98b50dd 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -365,7 +365,7 @@
 void AMPEG4ElementaryAssembler::submitAccessUnit() {
     CHECK(!mPackets.empty());
 
-    ALOGV("Access unit complete (%d nal units)", mPackets.size());
+    ALOGV("Access unit complete (%zu nal units)", mPackets.size());
 
     sp<ABuffer> accessUnit;
 
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index af369b5..372fbe9 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -563,7 +563,7 @@
 
             default:
             {
-                ALOGW("Unknown RTCP packet type %u of size %d",
+                ALOGW("Unknown RTCP packet type %u of size %zu",
                      (unsigned)data[1], headerLength);
                 break;
             }
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index c46d16f..793d116 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -277,7 +277,7 @@
     }
 
     if (mediaBuf->range_length() > 0) {
-        ALOGV("read buffer of size %d", mediaBuf->range_length());
+        ALOGV("read buffer of size %zu", mediaBuf->range_length());
 
         if (mMode == H264) {
             StripStartcode(mediaBuf);
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index 13e8da3..09f7eee 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -125,7 +125,7 @@
         ssize_t readSize = mHTTPDataSource->readAt(0, buffer->data(), sdpSize);
 
         if (readSize < 0) {
-            ALOGE("Failed to read SDP, error code = %d", readSize);
+            ALOGE("Failed to read SDP, error code = %zu", readSize);
             err = UNKNOWN_ERROR;
         } else {
             desc = new ASessionDescription;
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 06ce16b..903af49 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -41,6 +41,8 @@
 	frameworks/av/media/libstagefright/include \
 	$(TOP)/frameworks/native/include/media/openmax \
 
+LOCAL_32_BIT_ONLY := true
+
 include $(BUILD_EXECUTABLE)
 
 endif
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp
index 9fb0afe..a070487 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.cpp
+++ b/media/libstagefright/timedtext/TimedTextPlayer.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "TimedTextPlayer"
 #include <utils/Log.h>
 
+#include <inttypes.h>
 #include <limits.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -271,7 +272,7 @@
     sp<MediaPlayerBase> listener = mListener.promote();
     if (listener == NULL) {
         // TODO: it may be better to return kInvalidTimeUs
-        ALOGE("%s: Listener is NULL. (fireTimeUs = %lld)",
+        ALOGE("%s: Listener is NULL. (fireTimeUs = %" PRId64" )",
               __FUNCTION__, fireTimeUs);
         return 0;
     }
diff --git a/media/libstagefright/webm/WebmElement.cpp b/media/libstagefright/webm/WebmElement.cpp
index c978966..a008cab 100644
--- a/media/libstagefright/webm/WebmElement.cpp
+++ b/media/libstagefright/webm/WebmElement.cpp
@@ -119,7 +119,7 @@
     off64_t mapSize = curOff - alignedOff;
     off64_t pageOff = off - alignedOff;
     void *dst = ::mmap64(NULL, mapSize, PROT_WRITE, MAP_SHARED, fd, alignedOff);
-    if ((int) dst == -1) {
+    if (dst == MAP_FAILED) {
         ALOGE("mmap64 failed; errno = %d", errno);
         ALOGE("fd %d; flags: %o", fd, ::fcntl(fd, F_GETFL, 0));
         return errno;
diff --git a/media/libstagefright/webm/WebmFrameThread.cpp b/media/libstagefright/webm/WebmFrameThread.cpp
index 5addd3c..a4b8a42 100644
--- a/media/libstagefright/webm/WebmFrameThread.cpp
+++ b/media/libstagefright/webm/WebmFrameThread.cpp
@@ -48,7 +48,7 @@
 status_t WebmFrameThread::stop() {
     void *status;
     pthread_join(mThread, &status);
-    return (status_t) status;
+    return (status_t)(intptr_t)status;
 }
 
 //=================================================================================================
diff --git a/media/mtp/MtpProperty.cpp b/media/mtp/MtpProperty.cpp
index 3838ce8..c500901 100644
--- a/media/mtp/MtpProperty.cpp
+++ b/media/mtp/MtpProperty.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "MtpProperty"
 
 #include <inttypes.h>
+#include <cutils/compiler.h>
 #include "MtpDataPacket.h"
 #include "MtpDebug.h"
 #include "MtpProperty.h"
@@ -518,8 +519,14 @@
 
 MtpPropertyValue* MtpProperty::readArrayValues(MtpDataPacket& packet, int& length) {
     length = packet.getUInt32();
-    if (length == 0)
+    // Fail if resulting array is over 2GB.  This is because the maximum array
+    // size may be less than SIZE_MAX on some platforms.
+    if ( CC_UNLIKELY(
+            length == 0 ||
+            length >= INT32_MAX / sizeof(MtpPropertyValue)) ) {
+        length = 0;
         return NULL;
+    }
     MtpPropertyValue* result = new MtpPropertyValue[length];
     for (int i = 0; i < length; i++)
         readValue(packet, result[i]);
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index de7e3c3..6cb0299 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -29,10 +29,6 @@
     Tracks.cpp                  \
     Effects.cpp                 \
     AudioMixer.cpp.arm          \
-    AudioResampler.cpp.arm      \
-    AudioResamplerCubic.cpp.arm \
-    AudioResamplerSinc.cpp.arm  \
-    AudioResamplerDyn.cpp.arm
 
 LOCAL_SRC_FILES += StateQueue.cpp
 
@@ -42,6 +38,7 @@
     $(call include-path-for, audio-utils)
 
 LOCAL_SHARED_LIBRARIES := \
+    libaudioresampler \
     libaudioutils \
     libcommon_time_client \
     libcutils \
@@ -53,7 +50,6 @@
     libhardware \
     libhardware_legacy \
     libeffects \
-    libdl \
     libpowermanager
 
 LOCAL_STATIC_LIBRARIES := \
@@ -87,10 +83,6 @@
 
 LOCAL_SRC_FILES:=               \
     test-resample.cpp           \
-    AudioResampler.cpp.arm      \
-    AudioResamplerCubic.cpp.arm \
-    AudioResamplerSinc.cpp.arm  \
-    AudioResamplerDyn.cpp.arm
 
 LOCAL_C_INCLUDES := \
     $(call include-path-for, audio-utils)
@@ -99,6 +91,7 @@
     libsndfile
 
 LOCAL_SHARED_LIBRARIES := \
+    libaudioresampler \
     libaudioutils \
     libdl \
     libcutils \
@@ -111,4 +104,21 @@
 
 include $(BUILD_EXECUTABLE)
 
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+    AudioResampler.cpp.arm \
+    AudioResamplerCubic.cpp.arm \
+    AudioResamplerSinc.cpp.arm \
+    AudioResamplerDyn.cpp.arm
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libdl \
+    liblog
+
+LOCAL_MODULE := libaudioresampler
+
+include $(BUILD_SHARED_LIBRARY)
+
 include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index c62c627..bb8c15e 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -159,6 +159,7 @@
 AudioFlinger::AudioFlinger()
     : BnAudioFlinger(),
       mPrimaryHardwareDev(NULL),
+      mAudioHwDevs(NULL),
       mHardwareStatus(AUDIO_HW_IDLE),
       mMasterVolume(1.0f),
       mMasterMute(false),
@@ -876,7 +877,7 @@
 
     AutoMutex lock(mLock);
     PlaybackThread *thread = NULL;
-    if (output) {
+    if (output != AUDIO_IO_HANDLE_NONE) {
         thread = checkPlaybackThread_l(output);
         if (thread == NULL) {
             return BAD_VALUE;
@@ -925,7 +926,7 @@
 
     AutoMutex lock(mLock);
     float volume;
-    if (output) {
+    if (output != AUDIO_IO_HANDLE_NONE) {
         PlaybackThread *thread = checkPlaybackThread_l(output);
         if (thread == NULL) {
             return 0.0f;
@@ -958,8 +959,8 @@
         return PERMISSION_DENIED;
     }
 
-    // ioHandle == 0 means the parameters are global to the audio hardware interface
-    if (ioHandle == 0) {
+    // AUDIO_IO_HANDLE_NONE means the parameters are global to the audio hardware interface
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) {
         Mutex::Autolock _l(mLock);
         status_t final_result = NO_ERROR;
         {
@@ -1041,7 +1042,7 @@
 
     Mutex::Autolock _l(mLock);
 
-    if (ioHandle == 0) {
+    if (ioHandle == AUDIO_IO_HANDLE_NONE) {
         String8 out_s8;
 
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
@@ -1561,15 +1562,15 @@
     ALOGV("openOutput(), offloadInfo %p version 0x%04x",
           offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version);
 
-    if (pDevices == NULL || *pDevices == 0) {
-        return 0;
+    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     Mutex::Autolock _l(mLock);
 
     AudioHwDevice *outHwDev = findSuitableHwDev_l(module, *pDevices);
     if (outHwDev == NULL) {
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
@@ -1641,7 +1642,7 @@
         return id;
     }
 
-    return 0;
+    return AUDIO_IO_HANDLE_NONE;
 }
 
 audio_io_handle_t AudioFlinger::openDuplicateOutput(audio_io_handle_t output1,
@@ -1654,7 +1655,7 @@
     if (thread1 == NULL || thread2 == NULL) {
         ALOGW("openDuplicateOutput() wrong output mixer type for output %d or %d", output1,
                 output2);
-        return 0;
+        return AUDIO_IO_HANDLE_NONE;
     }
 
     audio_io_handle_t id = nextUniqueId();
@@ -1775,7 +1776,7 @@
     audio_format_t reqFormat = config.format;
     audio_channel_mask_t reqChannelMask = config.channel_mask;
 
-    if (pDevices == NULL || *pDevices == 0) {
+    if (pDevices == NULL || *pDevices == AUDIO_DEVICE_NONE) {
         return 0;
     }
 
@@ -2089,7 +2090,7 @@
 
 uint32_t AudioFlinger::nextUniqueId()
 {
-    return android_atomic_inc(&mNextUniqueId);
+    return (uint32_t) android_atomic_inc(&mNextUniqueId);
 }
 
 AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l() const
@@ -2281,7 +2282,7 @@
 
         // return effect descriptor
         *pDesc = desc;
-        if (io == 0 && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+        if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
             // if the output returned by getOutputForEffect() is removed before we lock the
             // mutex below, the call to checkPlaybackThread_l(io) below will detect it
             // and we will exit safely
@@ -2296,7 +2297,7 @@
         // If output is 0 here, sessionId is neither SESSION_OUTPUT_STAGE nor SESSION_OUTPUT_MIX
         // because of code checking output when entering the function.
         // Note: io is never 0 when creating an effect on an input
-        if (io == 0) {
+        if (io == AUDIO_IO_HANDLE_NONE) {
             if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
                 // output must be specified by AudioPolicyManager when using session
                 // AUDIO_SESSION_OUTPUT_STAGE
@@ -2321,7 +2322,7 @@
             // If no output thread contains the requested session ID, default to
             // first output. The effect chain will be moved to the correct output
             // thread when a track with the same session ID is created
-            if (io == 0 && mPlaybackThreads.size()) {
+            if (io == AUDIO_IO_HANDLE_NONE && mPlaybackThreads.size() > 0) {
                 io = mPlaybackThreads.keyAt(0);
             }
             ALOGV("createEffect() got io %d for effect %s", io, desc.name);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 2367d7d..ec32edd 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -454,7 +454,14 @@
                                 { return mStreamTypes[stream].volume; }
               void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2);
 
-              // allocate an audio_io_handle_t, session ID, or effect ID
+              // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t.
+              // They all share the same ID space, but the namespaces are actually independent
+              // because there are separate KeyedVectors for each kind of ID.
+              // The return value is uint32_t, but is cast to signed for some IDs.
+              // FIXME This API does not handle rollover to zero (for unsigned IDs),
+              //       or from positive to negative (for signed IDs).
+              //       Thus it may fail by returning an ID of the wrong sign,
+              //       or by returning a non-unique ID.
               uint32_t nextUniqueId();
 
               status_t moveEffectChain_l(int sessionId,
@@ -590,7 +597,11 @@
                 DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> >    mRecordThreads;
 
                 DefaultKeyedVector< pid_t, sp<NotificationClient> >    mNotificationClients;
+
                 volatile int32_t                    mNextUniqueId;  // updated by android_atomic_inc
+                // nextUniqueId() returns uint32_t, but this is declared int32_t
+                // because the atomic operations require an int32_t
+
                 audio_mode_t                        mMode;
                 bool                                mBtNrecIsOff;
 
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 3ac5da9..a1783fe 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -449,7 +449,7 @@
             }
             } break;
         default:
-            LOG_FATAL("bad param");
+            LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
         }
         break;
 
@@ -474,7 +474,7 @@
             invalidateState(1 << name);
             break;
         default:
-            LOG_FATAL("bad param");
+            LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
         }
         break;
 
@@ -522,12 +522,12 @@
             }
             break;
         default:
-            LOG_FATAL("bad param");
+            LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
         }
         break;
 
     default:
-        LOG_FATAL("bad target");
+        LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
     }
 }
 
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 90122e0..adb4aca 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -212,7 +212,7 @@
         case FastMixerState::MIX_WRITE:
             break;
         default:
-            LOG_FATAL("bad command %d", command);
+            LOG_ALWAYS_FATAL("bad command %d", command);
         }
 
         // there is a non-idle state available to us; did the state change?
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index b5e763d..e9c6834 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -96,6 +96,8 @@
     void reset();
     bool isFlushPending() const { return mFlushHwPending; }
     void flushAck();
+    bool isResumePending();
+    void resumeAck();
 
     bool isOutputTrack() const {
         return (mStreamType == AUDIO_STREAM_CNT);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 12d453e..7700780 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1133,7 +1133,7 @@
 
     // mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
     // There is no AUDIO_STREAM_MIN, and ++ operator does not compile
-    for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
+    for (audio_stream_type_t stream = AUDIO_STREAM_MIN; stream < AUDIO_STREAM_CNT;
             stream = (audio_stream_type_t) (stream + 1)) {
         mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
         mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
@@ -1711,24 +1711,24 @@
 
 void AudioFlinger::PlaybackThread::readOutputParameters_l()
 {
-    // unfortunately we have no way of recovering from errors here, hence the LOG_FATAL
+    // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
     mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
     mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
     if (!audio_is_output_channel(mChannelMask)) {
-        LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
+        LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
     if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
-        LOG_FATAL("HAL channel mask %#x not supported for mixed output; "
+        LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output; "
                 "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
     }
     mChannelCount = popcount(mChannelMask);
     mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
     if (!audio_is_valid_format(mFormat)) {
-        LOG_FATAL("HAL format %#x not valid for output", mFormat);
+        LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
     }
     if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) {
-        LOG_FATAL("HAL format %#x not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT",
-                mFormat);
+        LOG_ALWAYS_FATAL("HAL format %#x not supported for mixed output; "
+                "must be AUDIO_FORMAT_PCM_16_BIT", mFormat);
     }
     mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
     mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
@@ -3118,7 +3118,7 @@
                 break;
             case TrackBase::IDLE:
             default:
-                LOG_FATAL("unexpected track state %d", track->mState);
+                LOG_ALWAYS_FATAL("unexpected track state %d", track->mState);
             }
 
             if (isActive) {
@@ -3149,7 +3149,7 @@
                     // because we're about to decrement the last sp<> on those tracks.
                     block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
                 } else {
-                    LOG_FATAL("fast track %d should have been active", j);
+                    LOG_ALWAYS_FATAL("fast track %d should have been active", j);
                 }
                 tracksToRemove->add(track);
                 // Avoids a misleading display in dumpsys
@@ -4223,32 +4223,34 @@
             if (last) {
                 mFlushPending = true;
             }
-        } else if (track->framesReady() && track->isReady() &&
+        } else if (track->isResumePending()){
+            track->resumeAck();
+            if (last) {
+                if (mPausedBytesRemaining) {
+                    // Need to continue write that was interrupted
+                    mCurrentWriteLength = mPausedWriteLength;
+                    mBytesRemaining = mPausedBytesRemaining;
+                    mPausedBytesRemaining = 0;
+                }
+                if (mHwPaused) {
+                    doHwResume = true;
+                    mHwPaused = false;
+                    // threadLoop_mix() will handle the case that we need to
+                    // resume an interrupted write
+                }
+                // enable write to audio HAL
+                sleepTime = 0;
+
+                // Do not handle new data in this iteration even if track->framesReady()
+                mixerStatus = MIXER_TRACKS_ENABLED;
+            }
+        }  else if (track->framesReady() && track->isReady() &&
                 !track->isPaused() && !track->isTerminated() && !track->isStopping_2()) {
             ALOGVV("OffloadThread: track %d s=%08x [OK]", track->name(), cblk->mServer);
             if (track->mFillingUpStatus == Track::FS_FILLED) {
                 track->mFillingUpStatus = Track::FS_ACTIVE;
                 // make sure processVolume_l() will apply new volume even if 0
                 mLeftVolFloat = mRightVolFloat = -1.0;
-                if (track->mState == TrackBase::RESUMING) {
-                    track->mState = TrackBase::ACTIVE;
-                    if (last) {
-                        if (mPausedBytesRemaining) {
-                            // Need to continue write that was interrupted
-                            mCurrentWriteLength = mPausedWriteLength;
-                            mBytesRemaining = mPausedBytesRemaining;
-                            mPausedBytesRemaining = 0;
-                        }
-                        if (mHwPaused) {
-                            doHwResume = true;
-                            mHwPaused = false;
-                            // threadLoop_mix() will handle the case that we need to
-                            // resume an interrupted write
-                        }
-                        // enable write to audio HAL
-                        sleepTime = 0;
-                    }
-                }
             }
 
             if (last) {
@@ -4763,7 +4765,7 @@
                     continue;
 
                 default:
-                    LOG_FATAL("Unexpected activeTrackState %d", activeTrackState);
+                    LOG_ALWAYS_FATAL("Unexpected activeTrackState %d", activeTrackState);
                 }
 
                 activeTracks.add(activeTrack);
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index f19cd88..2cf10e2 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -567,7 +567,14 @@
 
 // Don't call for fast tracks; the framesReady() could result in priority inversion
 bool AudioFlinger::PlaybackThread::Track::isReady() const {
-    if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing() || isStopping()) {
+    if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
+        return true;
+    }
+
+    if (isStopping()) {
+        if (framesReady() > 0) {
+            mFillingUpStatus = FS_FILLED;
+        }
         return true;
     }
 
@@ -604,7 +611,10 @@
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
 
-        if (state == PAUSED) {
+        // initial state-stopping. next state-pausing.
+        // What if resume is called ?
+
+        if (state == PAUSED || state == PAUSING) {
             if (mResumeToStopping) {
                 // happened we need to resume to STOPPING_1
                 mState = TrackBase::STOPPING_1;
@@ -991,6 +1001,33 @@
     }
 }
 
+//To be called with thread lock held
+bool AudioFlinger::PlaybackThread::Track::isResumePending() {
+
+    if (mState == RESUMING)
+        return true;
+    /* Resume is pending if track was stopping before pause was called */
+    if (mState == STOPPING_1 &&
+        mResumeToStopping)
+        return true;
+
+    return false;
+}
+
+//To be called with thread lock held
+void AudioFlinger::PlaybackThread::Track::resumeAck() {
+
+
+    if (mState == RESUMING)
+        mState = ACTIVE;
+
+    // Other possibility of  pending resume is stopping_1 state
+    // Do not update the state from stopping as this prevents
+    // drain being called.
+    if (mState == STOPPING_1) {
+        mResumeToStopping = false;
+    }
+}
 // ----------------------------------------------------------------------------
 
 sp<AudioFlinger::PlaybackThread::TimedTrack>
@@ -1488,7 +1525,7 @@
             mTrimQueueHeadOnRelease = false;
         }
     } else {
-        LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
+        LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
                   " buffers in the timed buffer queue");
     }
 
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 84565bb..f270bfc 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -3,9 +3,21 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-    AudioPolicyService.cpp \
+    AudioPolicyService.cpp
+
+USE_LEGACY_AUDIO_POLICY = 1
+ifeq ($(USE_LEGACY_AUDIO_POLICY), 1)
+LOCAL_SRC_FILES += \
+    AudioPolicyInterfaceImplLegacy.cpp \
+    AudioPolicyClientImplLegacy.cpp
+
+    LOCAL_CFLAGS += -DUSE_LEGACY_AUDIO_POLICY
+else
+LOCAL_SRC_FILES += \
     AudioPolicyInterfaceImpl.cpp \
-    AudioPolicyClientImpl.cpp
+    AudioPolicyClientImpl.cpp \
+    AudioPolicyManager.cpp
+endif
 
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audioflinger \
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/AudioPolicyClientImpl.cpp
index 53f3e2d..44c47c3 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/AudioPolicyClientImpl.cpp
@@ -14,44 +14,17 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AudioPolicyService"
+#define LOG_TAG "AudioPolicyClientImpl"
 //#define LOG_NDEBUG 0
 
-#include "Configuration.h"
-#undef __STRICT_ANSI__
-#define __STDINT_LIMITS
-#define __STDC_LIMIT_MACROS
-#include <stdint.h>
-
-#include <sys/time.h>
-#include <binder/IServiceManager.h>
 #include <utils/Log.h>
-#include <cutils/properties.h>
-#include <binder/IPCThreadState.h>
-#include <utils/String16.h>
-#include <utils/threads.h>
 #include "AudioPolicyService.h"
-#include "ServiceUtilities.h"
-#include <hardware_legacy/power.h>
-#include <media/AudioEffect.h>
-#include <media/EffectsFactoryApi.h>
-//#include <media/IAudioFlinger.h>
-
-#include <hardware/hardware.h>
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-#include <audio_effects/audio_effects_conf.h>
-#include <media/AudioParameter.h>
-
 
 namespace android {
 
-/* implementation of the interface to the policy manager */
-extern "C" {
+/* implementation of the client interface from the policy manager */
 
-audio_module_handle_t aps_load_hw_module(void *service __unused,
-                                             const char *name)
+audio_module_handle_t AudioPolicyService::AudioPolicyClient::loadHwModule(const char *name)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -62,34 +35,14 @@
     return af->loadHwModule(name);
 }
 
-// deprecated: replaced by aps_open_output_on_module()
-audio_io_handle_t aps_open_output(void *service __unused,
-                                         audio_devices_t *pDevices,
-                                         uint32_t *pSamplingRate,
-                                         audio_format_t *pFormat,
-                                         audio_channel_mask_t *pChannelMask,
-                                         uint32_t *pLatencyMs,
-                                         audio_output_flags_t flags)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags);
-}
-
-audio_io_handle_t aps_open_output_on_module(void *service __unused,
-                                                   audio_module_handle_t module,
-                                                   audio_devices_t *pDevices,
-                                                   uint32_t *pSamplingRate,
-                                                   audio_format_t *pFormat,
-                                                   audio_channel_mask_t *pChannelMask,
-                                                   uint32_t *pLatencyMs,
-                                                   audio_output_flags_t flags,
-                                                   const audio_offload_info_t *offloadInfo)
+audio_io_handle_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
+                               audio_devices_t *pDevices,
+                               uint32_t *pSamplingRate,
+                               audio_format_t *pFormat,
+                               audio_channel_mask_t *pChannelMask,
+                               uint32_t *pLatencyMs,
+                               audio_output_flags_t flags,
+                               const audio_offload_info_t *offloadInfo)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -100,9 +53,9 @@
                           pLatencyMs, flags, offloadInfo);
 }
 
-audio_io_handle_t aps_open_dup_output(void *service __unused,
-                                                 audio_io_handle_t output1,
-                                                 audio_io_handle_t output2)
+audio_io_handle_t AudioPolicyService::AudioPolicyClient::openDuplicateOutput(
+                                                                audio_io_handle_t output1,
+                                                                audio_io_handle_t output2)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -112,7 +65,7 @@
     return af->openDuplicateOutput(output1, output2);
 }
 
-int aps_close_output(void *service __unused, audio_io_handle_t output)
+status_t AudioPolicyService::AudioPolicyClient::closeOutput(audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -122,7 +75,7 @@
     return af->closeOutput(output);
 }
 
-int aps_suspend_output(void *service __unused, audio_io_handle_t output)
+status_t AudioPolicyService::AudioPolicyClient::suspendOutput(audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -133,7 +86,7 @@
     return af->suspendOutput(output);
 }
 
-int aps_restore_output(void *service __unused, audio_io_handle_t output)
+status_t AudioPolicyService::AudioPolicyClient::restoreOutput(audio_io_handle_t output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -144,29 +97,11 @@
     return af->restoreOutput(output);
 }
 
-// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
-audio_io_handle_t aps_open_input(void *service __unused,
-                                        audio_devices_t *pDevices,
-                                        uint32_t *pSamplingRate,
-                                        audio_format_t *pFormat,
-                                        audio_channel_mask_t *pChannelMask,
-                                        audio_in_acoustics_t acoustics __unused)
-{
-    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        ALOGW("%s: could not get AudioFlinger", __func__);
-        return 0;
-    }
-
-    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
-}
-
-audio_io_handle_t aps_open_input_on_module(void *service __unused,
-                                                  audio_module_handle_t module,
-                                                  audio_devices_t *pDevices,
-                                                  uint32_t *pSamplingRate,
-                                                  audio_format_t *pFormat,
-                                                  audio_channel_mask_t *pChannelMask)
+audio_io_handle_t AudioPolicyService::AudioPolicyClient::openInput(audio_module_handle_t module,
+                              audio_devices_t *pDevices,
+                              uint32_t *pSamplingRate,
+                              audio_format_t *pFormat,
+                              audio_channel_mask_t *pChannelMask)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -177,7 +112,7 @@
     return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
 }
 
-int aps_close_input(void *service __unused, audio_io_handle_t input)
+status_t AudioPolicyService::AudioPolicyClient::closeInput(audio_io_handle_t input)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -187,7 +122,15 @@
     return af->closeInput(input);
 }
 
-int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream)
+status_t AudioPolicyService::AudioPolicyClient::setStreamVolume(audio_stream_type_t stream,
+                     float volume, audio_io_handle_t output,
+                     int delay_ms)
+{
+    return mAudioPolicyService->setStreamVolume(stream, volume, output,
+                                               delay_ms);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::invalidateStream(audio_stream_type_t stream)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -197,9 +140,39 @@
     return af->invalidateStream(stream);
 }
 
-int aps_move_effects(void *service __unused, int session,
-                                audio_io_handle_t src_output,
-                                audio_io_handle_t dst_output)
+void AudioPolicyService::AudioPolicyClient::setParameters(audio_io_handle_t io_handle,
+                   const String8& keyValuePairs,
+                   int delay_ms)
+{
+    mAudioPolicyService->setParameters(io_handle, keyValuePairs.string(), delay_ms);
+}
+
+String8 AudioPolicyService::AudioPolicyClient::getParameters(audio_io_handle_t io_handle,
+                      const String8& keys)
+{
+    String8 result = AudioSystem::getParameters(io_handle, keys);
+    return result;
+}
+
+status_t AudioPolicyService::AudioPolicyClient::startTone(audio_policy_tone_t tone,
+              audio_stream_type_t stream)
+{
+    return mAudioPolicyService->startTone(tone, stream);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::stopTone()
+{
+    return mAudioPolicyService->stopTone();
+}
+
+status_t AudioPolicyService::AudioPolicyClient::setVoiceVolume(float volume, int delay_ms)
+{
+    return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
+}
+
+status_t AudioPolicyService::AudioPolicyClient::moveEffects(int session,
+                        audio_io_handle_t src_output,
+                        audio_io_handle_t dst_output)
 {
     sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
     if (af == 0) {
@@ -209,53 +182,6 @@
     return af->moveEffects(session, src_output, dst_output);
 }
 
-char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
-                                     const char *keys)
-{
-    String8 result = AudioSystem::getParameters(io_handle, String8(keys));
-    return strdup(result.string());
-}
 
-void aps_set_parameters(void *service, audio_io_handle_t io_handle,
-                                   const char *kv_pairs, int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
-}
-
-int aps_set_stream_volume(void *service, audio_stream_type_t stream,
-                                     float volume, audio_io_handle_t output,
-                                     int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->setStreamVolume(stream, volume, output,
-                                               delay_ms);
-}
-
-int aps_start_tone(void *service, audio_policy_tone_t tone,
-                              audio_stream_type_t stream)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->startTone(tone, stream);
-}
-
-int aps_stop_tone(void *service)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->stopTone();
-}
-
-int aps_set_voice_volume(void *service, float volume, int delay_ms)
-{
-    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
-
-    return audioPolicyService->setVoiceVolume(volume, delay_ms);
-}
-
-}; // extern "C"
 
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
new file mode 100644
index 0000000..53f3e2d
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyClientImplLegacy.cpp
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyService"
+//#define LOG_NDEBUG 0
+
+#include "Configuration.h"
+#undef __STRICT_ANSI__
+#define __STDINT_LIMITS
+#define __STDC_LIMIT_MACROS
+#include <stdint.h>
+
+#include <sys/time.h>
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+#include <cutils/properties.h>
+#include <binder/IPCThreadState.h>
+#include <utils/String16.h>
+#include <utils/threads.h>
+#include "AudioPolicyService.h"
+#include "ServiceUtilities.h"
+#include <hardware_legacy/power.h>
+#include <media/AudioEffect.h>
+#include <media/EffectsFactoryApi.h>
+//#include <media/IAudioFlinger.h>
+
+#include <hardware/hardware.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
+#include <audio_effects/audio_effects_conf.h>
+#include <media/AudioParameter.h>
+
+
+namespace android {
+
+/* implementation of the interface to the policy manager */
+extern "C" {
+
+audio_module_handle_t aps_load_hw_module(void *service __unused,
+                                             const char *name)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->loadHwModule(name);
+}
+
+// deprecated: replaced by aps_open_output_on_module()
+audio_io_handle_t aps_open_output(void *service __unused,
+                                         audio_devices_t *pDevices,
+                                         uint32_t *pSamplingRate,
+                                         audio_format_t *pFormat,
+                                         audio_channel_mask_t *pChannelMask,
+                                         uint32_t *pLatencyMs,
+                                         audio_output_flags_t flags)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->openOutput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask,
+                          pLatencyMs, flags);
+}
+
+audio_io_handle_t aps_open_output_on_module(void *service __unused,
+                                                   audio_module_handle_t module,
+                                                   audio_devices_t *pDevices,
+                                                   uint32_t *pSamplingRate,
+                                                   audio_format_t *pFormat,
+                                                   audio_channel_mask_t *pChannelMask,
+                                                   uint32_t *pLatencyMs,
+                                                   audio_output_flags_t flags,
+                                                   const audio_offload_info_t *offloadInfo)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+    return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
+                          pLatencyMs, flags, offloadInfo);
+}
+
+audio_io_handle_t aps_open_dup_output(void *service __unused,
+                                                 audio_io_handle_t output1,
+                                                 audio_io_handle_t output2)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+    return af->openDuplicateOutput(output1, output2);
+}
+
+int aps_close_output(void *service __unused, audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->closeOutput(output);
+}
+
+int aps_suspend_output(void *service __unused, audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return PERMISSION_DENIED;
+    }
+
+    return af->suspendOutput(output);
+}
+
+int aps_restore_output(void *service __unused, audio_io_handle_t output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return PERMISSION_DENIED;
+    }
+
+    return af->restoreOutput(output);
+}
+
+// deprecated: replaced by aps_open_input_on_module(), and acoustics parameter is ignored
+audio_io_handle_t aps_open_input(void *service __unused,
+                                        audio_devices_t *pDevices,
+                                        uint32_t *pSamplingRate,
+                                        audio_format_t *pFormat,
+                                        audio_channel_mask_t *pChannelMask,
+                                        audio_in_acoustics_t acoustics __unused)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->openInput((audio_module_handle_t)0, pDevices, pSamplingRate, pFormat, pChannelMask);
+}
+
+audio_io_handle_t aps_open_input_on_module(void *service __unused,
+                                                  audio_module_handle_t module,
+                                                  audio_devices_t *pDevices,
+                                                  uint32_t *pSamplingRate,
+                                                  audio_format_t *pFormat,
+                                                  audio_channel_mask_t *pChannelMask)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        ALOGW("%s: could not get AudioFlinger", __func__);
+        return 0;
+    }
+
+    return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
+}
+
+int aps_close_input(void *service __unused, audio_io_handle_t input)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->closeInput(input);
+}
+
+int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->invalidateStream(stream);
+}
+
+int aps_move_effects(void *service __unused, int session,
+                                audio_io_handle_t src_output,
+                                audio_io_handle_t dst_output)
+{
+    sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+
+    return af->moveEffects(session, src_output, dst_output);
+}
+
+char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
+                                     const char *keys)
+{
+    String8 result = AudioSystem::getParameters(io_handle, String8(keys));
+    return strdup(result.string());
+}
+
+void aps_set_parameters(void *service, audio_io_handle_t io_handle,
+                                   const char *kv_pairs, int delay_ms)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    audioPolicyService->setParameters(io_handle, kv_pairs, delay_ms);
+}
+
+int aps_set_stream_volume(void *service, audio_stream_type_t stream,
+                                     float volume, audio_io_handle_t output,
+                                     int delay_ms)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->setStreamVolume(stream, volume, output,
+                                               delay_ms);
+}
+
+int aps_start_tone(void *service, audio_policy_tone_t tone,
+                              audio_stream_type_t stream)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->startTone(tone, stream);
+}
+
+int aps_stop_tone(void *service)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->stopTone();
+}
+
+int aps_set_voice_volume(void *service, float volume, int delay_ms)
+{
+    AudioPolicyService *audioPolicyService = (AudioPolicyService *)service;
+
+    return audioPolicyService->setVoiceVolume(volume, delay_ms);
+}
+
+}; // extern "C"
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 768b13e..66260e3 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_AUDIOPOLICYINTERFACE_H
-#define ANDROID_AUDIOPOLICYINTERFACE_H
+#ifndef ANDROID_AUDIOPOLICY_INTERFACE_H
+#define ANDROID_AUDIOPOLICY_INTERFACE_H
 
 #include <media/AudioSystem.h>
 #include <utils/String8.h>
@@ -254,4 +254,4 @@
 
 }; // namespace android
 
-#endif // ANDROID_AUDIOPOLICYINTERFACE_H
+#endif // ANDROID_AUDIOPOLICY_INTERFACE_H
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index bb62ab3..c57c4fa 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -14,17 +14,13 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AudioPolicyService"
+#define LOG_TAG "AudioPolicyIntefaceImpl"
 //#define LOG_NDEBUG 0
 
 #include <utils/Log.h>
 #include "AudioPolicyService.h"
 #include "ServiceUtilities.h"
 
-#include <system/audio.h>
-#include <system/audio_policy.h>
-#include <hardware/audio_policy.h>
-
 namespace android {
 
 
@@ -34,7 +30,7 @@
                                                   audio_policy_dev_state_t state,
                                                   const char *device_address)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     if (!settingsAllowed()) {
@@ -50,7 +46,7 @@
 
     ALOGV("setDeviceConnectionState()");
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
+    return mAudioPolicyManager->setDeviceConnectionState(device,
                                                       state, device_address);
 }
 
@@ -58,16 +54,16 @@
                                                               audio_devices_t device,
                                                               const char *device_address)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     }
-    return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
+    return mAudioPolicyManager->getDeviceConnectionState(device,
                                                       device_address);
 }
 
 status_t AudioPolicyService::setPhoneState(audio_mode_t state)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     if (!settingsAllowed()) {
@@ -83,14 +79,14 @@
     AudioSystem::setMode(state);
 
     Mutex::Autolock _l(mLock);
-    mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
+    mAudioPolicyManager->setPhoneState(state);
     return NO_ERROR;
 }
 
 status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
                                          audio_policy_forced_cfg_t config)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     if (!settingsAllowed()) {
@@ -104,19 +100,19 @@
     }
     ALOGV("setForceUse()");
     Mutex::Autolock _l(mLock);
-    mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
+    mAudioPolicyManager->setForceUse(usage, config);
     return NO_ERROR;
 }
 
 audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return AUDIO_POLICY_FORCE_NONE;
     }
     if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
         return AUDIO_POLICY_FORCE_NONE;
     }
-    return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
+    return mAudioPolicyManager->getForceUse(usage);
 }
 
 audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
@@ -126,12 +122,12 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return 0;
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
+    return mAudioPolicyManager->getOutput(stream, samplingRate,
                                     format, channelMask, flags, offloadInfo);
 }
 
@@ -139,19 +135,19 @@
                                          audio_stream_type_t stream,
                                          int session)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     ALOGV("startOutput()");
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
+    return mAudioPolicyManager->startOutput(output, stream, session);
 }
 
 status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
                                         audio_stream_type_t stream,
                                         int session)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     ALOGV("stopOutput()");
@@ -165,12 +161,12 @@
 {
     ALOGV("doStopOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
+    return mAudioPolicyManager->stopOutput(output, stream, session);
 }
 
 void AudioPolicyService::releaseOutput(audio_io_handle_t output)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return;
     }
     ALOGV("releaseOutput()");
@@ -181,7 +177,7 @@
 {
     ALOGV("doReleaseOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
-    mpAudioPolicy->release_output(mpAudioPolicy, output);
+    mAudioPolicyManager->releaseOutput(output);
 }
 
 audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
@@ -190,7 +186,7 @@
                                     audio_channel_mask_t channelMask,
                                     int audioSession)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return 0;
     }
     // already checked by client, but double-check in case the client wrapper is bypassed
@@ -204,7 +200,7 @@
 
     Mutex::Autolock _l(mLock);
     // the audio_in_acoustics_t parameter is ignored by get_input()
-    audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
+    audio_io_handle_t input = mAudioPolicyManager->getInput(inputSource, samplingRate,
                                                    format, channelMask, (audio_in_acoustics_t) 0);
 
     if (input == 0) {
@@ -248,31 +244,31 @@
 
 status_t AudioPolicyService::startInput(audio_io_handle_t input)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     Mutex::Autolock _l(mLock);
 
-    return mpAudioPolicy->start_input(mpAudioPolicy, input);
+    return mAudioPolicyManager->startInput(input);
 }
 
 status_t AudioPolicyService::stopInput(audio_io_handle_t input)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     Mutex::Autolock _l(mLock);
 
-    return mpAudioPolicy->stop_input(mpAudioPolicy, input);
+    return mAudioPolicyManager->stopInput(input);
 }
 
 void AudioPolicyService::releaseInput(audio_io_handle_t input)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return;
     }
     Mutex::Autolock _l(mLock);
-    mpAudioPolicy->release_input(mpAudioPolicy, input);
+    mAudioPolicyManager->releaseInput(input);
 
     ssize_t index = mInputs.indexOfKey(input);
     if (index < 0) {
@@ -288,7 +284,7 @@
                                             int indexMin,
                                             int indexMax)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     if (!settingsAllowed()) {
@@ -298,7 +294,7 @@
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
-    mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
+    mAudioPolicyManager->initStreamVolume(stream, indexMin, indexMax);
     return NO_ERROR;
 }
 
@@ -306,7 +302,7 @@
                                                   int index,
                                                   audio_devices_t device)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     if (!settingsAllowed()) {
@@ -316,63 +312,53 @@
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
-    if (mpAudioPolicy->set_stream_volume_index_for_device) {
-        return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
-                                                                stream,
-                                                                index,
-                                                                device);
-    } else {
-        return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
-    }
+    return mAudioPolicyManager->setStreamVolumeIndex(stream,
+                                                    index,
+                                                    device);
 }
 
 status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
                                                   int *index,
                                                   audio_devices_t device)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
     if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mLock);
-    if (mpAudioPolicy->get_stream_volume_index_for_device) {
-        return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
-                                                                stream,
-                                                                index,
-                                                                device);
-    } else {
-        return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
-    }
+    return mAudioPolicyManager->getStreamVolumeIndex(stream,
+                                                    index,
+                                                    device);
 }
 
 uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return 0;
     }
-    return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
+    return mAudioPolicyManager->getStrategyForStream(stream);
 }
 
 //audio policy: use audio_device_t appropriately
 
 audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return (audio_devices_t)0;
     }
-    return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
+    return mAudioPolicyManager->getDevicesForStream(stream);
 }
 
 audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
 {
     // FIXME change return type to status_t, and return NO_INIT here
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return 0;
     }
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
+    return mAudioPolicyManager->getOutputForEffect(desc);
 }
 
 status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
@@ -381,56 +367,53 @@
                                 int session,
                                 int id)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-    return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
+    return mAudioPolicyManager->registerEffect(desc, io, strategy, session, id);
 }
 
 status_t AudioPolicyService::unregisterEffect(int id)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-    return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
+    return mAudioPolicyManager->unregisterEffect(id);
 }
 
 status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
-    return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
+    return mAudioPolicyManager->setEffectEnabled(id, enabled);
 }
 
 bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return 0;
     }
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
+    return mAudioPolicyManager->isStreamActive(stream, inPastMs);
 }
 
 bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
 {
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         return 0;
     }
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
+    return mAudioPolicyManager->isStreamActiveRemotely(stream, inPastMs);
 }
 
 bool AudioPolicyService::isSourceActive(audio_source_t source) const
 {
-    if (mpAudioPolicy == NULL) {
-        return false;
-    }
-    if (mpAudioPolicy->is_source_active == 0) {
+    if (mAudioPolicyManager == NULL) {
         return false;
     }
     Mutex::Autolock _l(mLock);
-    return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
+    return mAudioPolicyManager->isSourceActive(source);
 }
 
 status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
@@ -438,7 +421,7 @@
                                                        uint32_t *count)
 {
 
-    if (mpAudioPolicy == NULL) {
+    if (mAudioPolicyManager == NULL) {
         *count = 0;
         return NO_INIT;
     }
@@ -472,17 +455,12 @@
 
 bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
 {
-    if (mpAudioPolicy == NULL) {
-        ALOGV("mpAudioPolicy == NULL");
+    if (mAudioPolicyManager == NULL) {
+        ALOGV("mAudioPolicyManager == NULL");
         return false;
     }
 
-    if (mpAudioPolicy->is_offload_supported == NULL) {
-        ALOGV("HAL does not implement is_offload_supported");
-        return false;
-    }
-
-    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
+    return mAudioPolicyManager->isOffloadSupported(info);
 }
 
 
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
new file mode 100644
index 0000000..bb62ab3
--- /dev/null
+++ b/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
@@ -0,0 +1,489 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioPolicyService"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include "AudioPolicyService.h"
+#include "ServiceUtilities.h"
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <hardware/audio_policy.h>
+
+namespace android {
+
+
+// ----------------------------------------------------------------------------
+
+status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
+                                                  audio_policy_dev_state_t state,
+                                                  const char *device_address)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (!audio_is_output_device(device) && !audio_is_input_device(device)) {
+        return BAD_VALUE;
+    }
+    if (state != AUDIO_POLICY_DEVICE_STATE_AVAILABLE &&
+            state != AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
+        return BAD_VALUE;
+    }
+
+    ALOGV("setDeviceConnectionState()");
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->set_device_connection_state(mpAudioPolicy, device,
+                                                      state, device_address);
+}
+
+audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
+                                                              audio_devices_t device,
+                                                              const char *device_address)
+{
+    if (mpAudioPolicy == NULL) {
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
+    return mpAudioPolicy->get_device_connection_state(mpAudioPolicy, device,
+                                                      device_address);
+}
+
+status_t AudioPolicyService::setPhoneState(audio_mode_t state)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(state) >= AUDIO_MODE_CNT) {
+        return BAD_VALUE;
+    }
+
+    ALOGV("setPhoneState()");
+
+    // TODO: check if it is more appropriate to do it in platform specific policy manager
+    AudioSystem::setMode(state);
+
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->set_phone_state(mpAudioPolicy, state);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyService::setForceUse(audio_policy_force_use_t usage,
+                                         audio_policy_forced_cfg_t config)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
+        return BAD_VALUE;
+    }
+    if (config < 0 || config >= AUDIO_POLICY_FORCE_CFG_CNT) {
+        return BAD_VALUE;
+    }
+    ALOGV("setForceUse()");
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->set_force_use(mpAudioPolicy, usage, config);
+    return NO_ERROR;
+}
+
+audio_policy_forced_cfg_t AudioPolicyService::getForceUse(audio_policy_force_use_t usage)
+{
+    if (mpAudioPolicy == NULL) {
+        return AUDIO_POLICY_FORCE_NONE;
+    }
+    if (usage < 0 || usage >= AUDIO_POLICY_FORCE_USE_CNT) {
+        return AUDIO_POLICY_FORCE_NONE;
+    }
+    return mpAudioPolicy->get_force_use(mpAudioPolicy, usage);
+}
+
+audio_io_handle_t AudioPolicyService::getOutput(audio_stream_type_t stream,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    ALOGV("getOutput()");
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->get_output(mpAudioPolicy, stream, samplingRate,
+                                    format, channelMask, flags, offloadInfo);
+}
+
+status_t AudioPolicyService::startOutput(audio_io_handle_t output,
+                                         audio_stream_type_t stream,
+                                         int session)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    ALOGV("startOutput()");
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->start_output(mpAudioPolicy, output, stream, session);
+}
+
+status_t AudioPolicyService::stopOutput(audio_io_handle_t output,
+                                        audio_stream_type_t stream,
+                                        int session)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    ALOGV("stopOutput()");
+    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    return NO_ERROR;
+}
+
+status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
+                                      audio_stream_type_t stream,
+                                      int session)
+{
+    ALOGV("doStopOutput from tid %d", gettid());
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
+}
+
+void AudioPolicyService::releaseOutput(audio_io_handle_t output)
+{
+    if (mpAudioPolicy == NULL) {
+        return;
+    }
+    ALOGV("releaseOutput()");
+    mOutputCommandThread->releaseOutputCommand(output);
+}
+
+void AudioPolicyService::doReleaseOutput(audio_io_handle_t output)
+{
+    ALOGV("doReleaseOutput from tid %d", gettid());
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->release_output(mpAudioPolicy, output);
+}
+
+audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    int audioSession)
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    // already checked by client, but double-check in case the client wrapper is bypassed
+    if (inputSource >= AUDIO_SOURCE_CNT && inputSource != AUDIO_SOURCE_HOTWORD) {
+        return 0;
+    }
+
+    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
+        return 0;
+    }
+
+    Mutex::Autolock _l(mLock);
+    // the audio_in_acoustics_t parameter is ignored by get_input()
+    audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,
+                                                   format, channelMask, (audio_in_acoustics_t) 0);
+
+    if (input == 0) {
+        return input;
+    }
+    // create audio pre processors according to input source
+    audio_source_t aliasSource = (inputSource == AUDIO_SOURCE_HOTWORD) ?
+                                    AUDIO_SOURCE_VOICE_RECOGNITION : inputSource;
+
+    ssize_t index = mInputSources.indexOfKey(aliasSource);
+    if (index < 0) {
+        return input;
+    }
+    ssize_t idx = mInputs.indexOfKey(input);
+    InputDesc *inputDesc;
+    if (idx < 0) {
+        inputDesc = new InputDesc(audioSession);
+        mInputs.add(input, inputDesc);
+    } else {
+        inputDesc = mInputs.valueAt(idx);
+    }
+
+    Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
+    for (size_t i = 0; i < effects.size(); i++) {
+        EffectDesc *effect = effects[i];
+        sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
+        status_t status = fx->initCheck();
+        if (status != NO_ERROR && status != ALREADY_EXISTS) {
+            ALOGW("Failed to create Fx %s on input %d", effect->mName, input);
+            // fx goes out of scope and strong ref on AudioEffect is released
+            continue;
+        }
+        for (size_t j = 0; j < effect->mParams.size(); j++) {
+            fx->setParameter(effect->mParams[j]);
+        }
+        inputDesc->mEffects.add(fx);
+    }
+    setPreProcessorEnabled(inputDesc, true);
+    return input;
+}
+
+status_t AudioPolicyService::startInput(audio_io_handle_t input)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+
+    return mpAudioPolicy->start_input(mpAudioPolicy, input);
+}
+
+status_t AudioPolicyService::stopInput(audio_io_handle_t input)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+
+    return mpAudioPolicy->stop_input(mpAudioPolicy, input);
+}
+
+void AudioPolicyService::releaseInput(audio_io_handle_t input)
+{
+    if (mpAudioPolicy == NULL) {
+        return;
+    }
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->release_input(mpAudioPolicy, input);
+
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        return;
+    }
+    InputDesc *inputDesc = mInputs.valueAt(index);
+    setPreProcessorEnabled(inputDesc, false);
+    delete inputDesc;
+    mInputs.removeItemsAt(index);
+}
+
+status_t AudioPolicyService::initStreamVolume(audio_stream_type_t stream,
+                                            int indexMin,
+                                            int indexMax)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    mpAudioPolicy->init_stream_volume(mpAudioPolicy, stream, indexMin, indexMax);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int index,
+                                                  audio_devices_t device)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (!settingsAllowed()) {
+        return PERMISSION_DENIED;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    if (mpAudioPolicy->set_stream_volume_index_for_device) {
+        return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
+                                                                stream,
+                                                                index,
+                                                                device);
+    } else {
+        return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
+    }
+}
+
+status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int *index,
+                                                  audio_devices_t device)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    if (uint32_t(stream) >= AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    if (mpAudioPolicy->get_stream_volume_index_for_device) {
+        return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
+                                                                stream,
+                                                                index,
+                                                                device);
+    } else {
+        return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
+    }
+}
+
+uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    return mpAudioPolicy->get_strategy_for_stream(mpAudioPolicy, stream);
+}
+
+//audio policy: use audio_device_t appropriately
+
+audio_devices_t AudioPolicyService::getDevicesForStream(audio_stream_type_t stream)
+{
+    if (mpAudioPolicy == NULL) {
+        return (audio_devices_t)0;
+    }
+    return mpAudioPolicy->get_devices_for_stream(mpAudioPolicy, stream);
+}
+
+audio_io_handle_t AudioPolicyService::getOutputForEffect(const effect_descriptor_t *desc)
+{
+    // FIXME change return type to status_t, and return NO_INIT here
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->get_output_for_effect(mpAudioPolicy, desc);
+}
+
+status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
+                                audio_io_handle_t io,
+                                uint32_t strategy,
+                                int session,
+                                int id)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    return mpAudioPolicy->register_effect(mpAudioPolicy, desc, io, strategy, session, id);
+}
+
+status_t AudioPolicyService::unregisterEffect(int id)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    return mpAudioPolicy->unregister_effect(mpAudioPolicy, id);
+}
+
+status_t AudioPolicyService::setEffectEnabled(int id, bool enabled)
+{
+    if (mpAudioPolicy == NULL) {
+        return NO_INIT;
+    }
+    return mpAudioPolicy->set_effect_enabled(mpAudioPolicy, id, enabled);
+}
+
+bool AudioPolicyService::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->is_stream_active(mpAudioPolicy, stream, inPastMs);
+}
+
+bool AudioPolicyService::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    if (mpAudioPolicy == NULL) {
+        return 0;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->is_stream_active_remotely(mpAudioPolicy, stream, inPastMs);
+}
+
+bool AudioPolicyService::isSourceActive(audio_source_t source) const
+{
+    if (mpAudioPolicy == NULL) {
+        return false;
+    }
+    if (mpAudioPolicy->is_source_active == 0) {
+        return false;
+    }
+    Mutex::Autolock _l(mLock);
+    return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
+}
+
+status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+                                                       effect_descriptor_t *descriptors,
+                                                       uint32_t *count)
+{
+
+    if (mpAudioPolicy == NULL) {
+        *count = 0;
+        return NO_INIT;
+    }
+    Mutex::Autolock _l(mLock);
+    status_t status = NO_ERROR;
+
+    size_t index;
+    for (index = 0; index < mInputs.size(); index++) {
+        if (mInputs.valueAt(index)->mSessionId == audioSession) {
+            break;
+        }
+    }
+    if (index == mInputs.size()) {
+        *count = 0;
+        return BAD_VALUE;
+    }
+    Vector< sp<AudioEffect> > effects = mInputs.valueAt(index)->mEffects;
+
+    for (size_t i = 0; i < effects.size(); i++) {
+        effect_descriptor_t desc = effects[i]->descriptor();
+        if (i < *count) {
+            descriptors[i] = desc;
+        }
+    }
+    if (effects.size() > *count) {
+        status = NO_MEMORY;
+    }
+    *count = effects.size();
+    return status;
+}
+
+bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
+{
+    if (mpAudioPolicy == NULL) {
+        ALOGV("mpAudioPolicy == NULL");
+        return false;
+    }
+
+    if (mpAudioPolicy->is_offload_supported == NULL) {
+        ALOGV("HAL does not implement is_offload_supported");
+        return false;
+    }
+
+    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
+}
+
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 5ac9d9e..45f98d2 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -43,6 +43,117 @@
 namespace android {
 
 // ----------------------------------------------------------------------------
+// Definitions for audio_policy.conf file parsing
+// ----------------------------------------------------------------------------
+
+struct StringToEnum {
+    const char *name;
+    uint32_t value;
+};
+
+#define STRING_TO_ENUM(string) { #string, string }
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+
+const StringToEnum sDeviceNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
+};
+
+const StringToEnum sFlagNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+};
+
+const StringToEnum sFormatNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_32_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_FLOAT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
+    STRING_TO_ENUM(AUDIO_FORMAT_MP3),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC),
+    STRING_TO_ENUM(AUDIO_FORMAT_VORBIS),
+};
+
+const StringToEnum sOutChannelsNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+};
+
+const StringToEnum sInChannelsNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+};
+
+
+uint32_t AudioPolicyManager::stringToEnum(const struct StringToEnum *table,
+                                              size_t size,
+                                              const char *name)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (strcmp(table[i].name, name) == 0) {
+            ALOGV("stringToEnum() found %s", table[i].name);
+            return table[i].value;
+        }
+    }
+    return 0;
+}
+
+const char *AudioPolicyManager::enumToString(const struct StringToEnum *table,
+                                              size_t size,
+                                              uint32_t value)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (table[i].value == value) {
+            return table[i].name;
+        }
+    }
+    return "";
+}
+
+bool AudioPolicyManager::stringToBool(const char *value)
+{
+    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
+}
+
+
+// ----------------------------------------------------------------------------
 // AudioPolicyInterface implementation
 // ----------------------------------------------------------------------------
 
@@ -52,101 +163,59 @@
                                                   const char *device_address)
 {
     SortedVector <audio_io_handle_t> outputs;
+    String8 address = String8(device_address);
 
     ALOGV("setDeviceConnectionState() device: %x, state %d, address %s", device, state, device_address);
 
     // connect/disconnect only 1 device at a time
     if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
 
-    if (strlen(device_address) >= MAX_DEVICE_ADDRESS_LEN) {
-        ALOGE("setDeviceConnectionState() invalid address: %s", device_address);
-        return BAD_VALUE;
-    }
-
     // handle output devices
     if (audio_is_output_device(device)) {
-
-        if (!mHasA2dp && audio_is_a2dp_device(device)) {
-            ALOGE("setDeviceConnectionState() invalid A2DP device: %x", device);
-            return BAD_VALUE;
-        }
-        if (!mHasUsb && audio_is_usb_device(device)) {
-            ALOGE("setDeviceConnectionState() invalid USB audio device: %x", device);
-            return BAD_VALUE;
-        }
-        if (!mHasRemoteSubmix && audio_is_remote_submix_device((audio_devices_t)device)) {
-            ALOGE("setDeviceConnectionState() invalid remote submix audio device: %x", device);
-            return BAD_VALUE;
-        }
+        sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device,
+                                                            address,
+                                                            0);
+        ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
 
         // save a copy of the opened output descriptors before any output is opened or closed
         // by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies()
         mPreviousOutputs = mOutputs;
-        String8 paramStr;
         switch (state)
         {
         // handle output device connection
         case AUDIO_POLICY_DEVICE_STATE_AVAILABLE:
-            if (mAvailableOutputDevices & device) {
+            if (index >= 0) {
                 ALOGW("setDeviceConnectionState() device already connected: %x", device);
                 return INVALID_OPERATION;
             }
             ALOGV("setDeviceConnectionState() connecting device %x", device);
 
-            if (mHasA2dp && audio_is_a2dp_device(device)) {
-                // handle A2DP device connection
-                AudioParameter param;
-                param.add(String8(AUDIO_PARAMETER_A2DP_SINK_ADDRESS), String8(device_address));
-                paramStr = param.toString();
-            } else if (mHasUsb && audio_is_usb_device(device)) {
-                // handle USB device connection
-                paramStr = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
-            }
-
-            if (checkOutputsForDevice(device, state, outputs, paramStr) != NO_ERROR) {
+            if (checkOutputsForDevice(device, state, outputs, address) != NO_ERROR) {
                 return INVALID_OPERATION;
             }
             ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %d outputs",
                   outputs.size());
             // register new device as available
-            mAvailableOutputDevices = (audio_devices_t)(mAvailableOutputDevices | device);
-
-            if (mHasA2dp && audio_is_a2dp_device(device)) {
-                // handle A2DP device connection
-                mA2dpDeviceAddress = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
-                mA2dpSuspended = false;
-            } else if (audio_is_bluetooth_sco_device(device)) {
-                // handle SCO device connection
-                mScoDeviceAddress = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
-            } else if (mHasUsb && audio_is_usb_device(device)) {
-                // handle USB device connection
-                mUsbCardAndDevice = String8(device_address, MAX_DEVICE_ADDRESS_LEN);
+            index = mAvailableOutputDevices.add(devDesc);
+            if (index >= 0) {
+                mAvailableOutputDevices[index]->mId = nextUniqueId();
+            } else {
+                return NO_MEMORY;
             }
 
             break;
         // handle output device disconnection
         case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
-            if (!(mAvailableOutputDevices & device)) {
+            if (index < 0) {
                 ALOGW("setDeviceConnectionState() device not connected: %x", device);
                 return INVALID_OPERATION;
             }
 
             ALOGV("setDeviceConnectionState() disconnecting device %x", device);
             // remove device from available output devices
-            mAvailableOutputDevices = (audio_devices_t)(mAvailableOutputDevices & ~device);
+            mAvailableOutputDevices.remove(devDesc);
 
-            checkOutputsForDevice(device, state, outputs, paramStr);
-            if (mHasA2dp && audio_is_a2dp_device(device)) {
-                // handle A2DP device disconnection
-                mA2dpDeviceAddress = "";
-                mA2dpSuspended = false;
-            } else if (audio_is_bluetooth_sco_device(device)) {
-                // handle SCO device disconnection
-                mScoDeviceAddress = "";
-            } else if (mHasUsb && audio_is_usb_device(device)) {
-                // handle USB device disconnection
-                mUsbCardAndDevice = "";
-            }
+            checkOutputsForDevice(device, state, outputs, address);
             // not currently handling multiple simultaneous submixes: ignoring remote submix
             //   case and address
             } break;
@@ -156,6 +225,8 @@
             return BAD_VALUE;
         }
 
+        // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
+        // output is suspended before any tracks are moved to it
         checkA2dpSuspend();
         checkOutputForAllStrategies();
         // outputs must be closed after checkOutputForAllStrategies() is executed
@@ -170,6 +241,8 @@
                     closeOutput(outputs[i]);
                 }
             }
+            // check again after closing A2DP output to reset mA2dpSuspended if needed
+            checkA2dpSuspend();
         }
 
         updateDevicesAndOutputs();
@@ -195,26 +268,35 @@
     }
     // handle input devices
     if (audio_is_input_device(device)) {
+        sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device,
+                                                            address,
+                                                            0);
 
+        ssize_t index = mAvailableInputDevices.indexOf(devDesc);
         switch (state)
         {
         // handle input device connection
         case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
-            if (mAvailableInputDevices & device) {
+            if (index >= 0) {
                 ALOGW("setDeviceConnectionState() device already connected: %d", device);
                 return INVALID_OPERATION;
             }
-            mAvailableInputDevices = mAvailableInputDevices | (device & ~AUDIO_DEVICE_BIT_IN);
+            index = mAvailableInputDevices.add(devDesc);
+            if (index >= 0) {
+                mAvailableInputDevices[index]->mId = nextUniqueId();
+            } else {
+                return NO_MEMORY;
+            }
             }
             break;
 
         // handle input device disconnection
         case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
-            if (!(mAvailableInputDevices & device)) {
+            if (index < 0) {
                 ALOGW("setDeviceConnectionState() device not connected: %d", device);
                 return INVALID_OPERATION;
             }
-            mAvailableInputDevices = (audio_devices_t) (mAvailableInputDevices & ~device);
+            mAvailableInputDevices.remove(devDesc);
             } break;
 
         default:
@@ -248,33 +330,27 @@
 {
     audio_policy_dev_state_t state = AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     String8 address = String8(device_address);
+    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device,
+                                                        String8(device_address),
+                                                        0);
+    ssize_t index;
+    DeviceVector *deviceVector;
+
     if (audio_is_output_device(device)) {
-        if (device & mAvailableOutputDevices) {
-            if (audio_is_a2dp_device(device) &&
-                (!mHasA2dp || (address != "" && mA2dpDeviceAddress != address))) {
-                return state;
-            }
-            if (audio_is_bluetooth_sco_device(device) &&
-                address != "" && mScoDeviceAddress != address) {
-                return state;
-            }
-            if (audio_is_usb_device(device) &&
-                (!mHasUsb || (address != "" && mUsbCardAndDevice != address))) {
-                ALOGE("getDeviceConnectionState() invalid device: %x", device);
-                return state;
-            }
-            if (audio_is_remote_submix_device((audio_devices_t)device) && !mHasRemoteSubmix) {
-                return state;
-            }
-            state = AUDIO_POLICY_DEVICE_STATE_AVAILABLE;
-        }
+        deviceVector = &mAvailableOutputDevices;
     } else if (audio_is_input_device(device)) {
-        if (device & mAvailableInputDevices) {
-            state = AUDIO_POLICY_DEVICE_STATE_AVAILABLE;
-        }
+        deviceVector = &mAvailableInputDevices;
+    } else {
+        ALOGW("getDeviceConnectionState() invalid device type %08x", device);
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
     }
 
-    return state;
+    index = deviceVector->indexOf(devDesc);
+    if (index >= 0) {
+        return AUDIO_POLICY_DEVICE_STATE_AVAILABLE;
+    } else {
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
 }
 
 void AudioPolicyManager::setPhoneState(audio_mode_t state)
@@ -505,23 +581,23 @@
         }
         for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
             IOProfile *profile = mHwModules[i]->mOutputProfiles[j];
+            bool found = false;
             if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
                 if (profile->isCompatibleProfile(device, samplingRate, format,
                                            channelMask,
                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) {
-                    if (mAvailableOutputDevices & profile->mSupportedDevices) {
-                        return mHwModules[i]->mOutputProfiles[j];
-                    }
+                    found = true;
                 }
             } else {
                 if (profile->isCompatibleProfile(device, samplingRate, format,
                                            channelMask,
                                            AUDIO_OUTPUT_FLAG_DIRECT)) {
-                    if (mAvailableOutputDevices & profile->mSupportedDevices) {
-                        return mHwModules[i]->mOutputProfiles[j];
-                    }
+                    found = true;
                 }
             }
+            if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
+                return profile;
+            }
         }
     }
     return 0;
@@ -1377,16 +1453,6 @@
 
     snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput);
     result.append(buffer);
-    snprintf(buffer, SIZE, " A2DP device address: %s\n", mA2dpDeviceAddress.string());
-    result.append(buffer);
-    snprintf(buffer, SIZE, " SCO device address: %s\n", mScoDeviceAddress.string());
-    result.append(buffer);
-    snprintf(buffer, SIZE, " USB audio ALSA %s\n", mUsbCardAndDevice.string());
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Output devices: %08x\n", mAvailableOutputDevices);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Input devices: %08x\n", mAvailableInputDevices);
-    result.append(buffer);
     snprintf(buffer, SIZE, " Phone state: %d\n", mPhoneState);
     result.append(buffer);
     snprintf(buffer, SIZE, " Force use for communications %d\n",
@@ -1400,8 +1466,20 @@
     result.append(buffer);
     snprintf(buffer, SIZE, " Force use for system %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM]);
     result.append(buffer);
-    write(fd, result.string(), result.size());
 
+    snprintf(buffer, SIZE, " Available output devices:\n");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    DeviceDescriptor::dumpHeader(fd, 2);
+    for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
+        mAvailableOutputDevices[i]->dump(fd, 2);
+    }
+    snprintf(buffer, SIZE, "\n Available input devices:\n");
+    write(fd, buffer, strlen(buffer));
+    DeviceDescriptor::dumpHeader(fd, 2);
+    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+        mAvailableInputDevices[i]->dump(fd, 2);
+    }
 
     snprintf(buffer, SIZE, "\nHW Modules dump:\n");
     write(fd, buffer, strlen(buffer));
@@ -1525,18 +1603,22 @@
 // AudioPolicyManager
 // ----------------------------------------------------------------------------
 
+uint32_t AudioPolicyManager::nextUniqueId()
+{
+    return android_atomic_inc(&mNextUniqueId);
+}
+
 AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
     :
 #ifdef AUDIO_POLICY_TEST
     Thread(false),
 #endif //AUDIO_POLICY_TEST
     mPrimaryOutput((audio_io_handle_t)0),
-    mAvailableOutputDevices(AUDIO_DEVICE_NONE),
     mPhoneState(AUDIO_MODE_NORMAL),
     mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
     mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
-    mA2dpSuspended(false), mHasA2dp(false), mHasUsb(false), mHasRemoteSubmix(false),
-    mSpeakerDrcEnabled(false)
+    mA2dpSuspended(false),
+    mSpeakerDrcEnabled(false), mNextUniqueId(0)
 {
     mpClientInterface = clientInterface;
 
@@ -1544,21 +1626,21 @@
         mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
     }
 
-    mA2dpDeviceAddress = String8("");
-    mScoDeviceAddress = String8("");
-    mUsbCardAndDevice = String8("");
-
+    mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
     if (loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE) != NO_ERROR) {
         if (loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE) != NO_ERROR) {
             ALOGE("could not load audio policy configuration file, setting defaults");
             defaultAudioPolicyConfig();
         }
     }
+    // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
 
     // must be done after reading the policy
     initializeVolumeCurves();
 
     // open all output streams needed to access attached devices
+    audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
+    audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
     for (size_t i = 0; i < mHwModules.size(); i++) {
         mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->mName);
         if (mHwModules[i]->mHandle == 0) {
@@ -1568,15 +1650,22 @@
         // open all output streams needed to access attached devices
         // except for direct output streams that are only opened when they are actually
         // required by an app.
+        // This also validates mAvailableOutputDevices list
         for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
         {
             const IOProfile *outProfile = mHwModules[i]->mOutputProfiles[j];
 
-            if ((outProfile->mSupportedDevices & mAttachedOutputDevices) &&
+            if (outProfile->mSupportedDevices.isEmpty()) {
+                ALOGW("Output profile contains no device on module %s", mHwModules[i]->mName);
+                continue;
+            }
+
+            audio_devices_t profileTypes = outProfile->mSupportedDevices.types();
+            if ((profileTypes & outputDeviceTypes) &&
                     ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0)) {
                 AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor(outProfile);
-                outputDesc->mDevice = (audio_devices_t)(mDefaultOutputDevice &
-                                                            outProfile->mSupportedDevices);
+
+                outputDesc->mDevice = (audio_devices_t)(mDefaultOutputDevice->mType & profileTypes);
                 audio_io_handle_t output = mpClientInterface->openOutput(
                                                 outProfile->mModule->mHandle,
                                                 &outputDesc->mDevice,
@@ -1586,27 +1675,96 @@
                                                 &outputDesc->mLatency,
                                                 outputDesc->mFlags);
                 if (output == 0) {
+                    ALOGW("Cannot open output stream for device %08x on hw module %s",
+                          outputDesc->mDevice,
+                          mHwModules[i]->mName);
                     delete outputDesc;
                 } else {
-                    mAvailableOutputDevices = (audio_devices_t)(mAvailableOutputDevices |
-                                            (outProfile->mSupportedDevices & mAttachedOutputDevices));
+                    for (size_t i = 0; i  < outProfile->mSupportedDevices.size(); i++) {
+                        audio_devices_t type = outProfile->mSupportedDevices[i]->mType;
+                        ssize_t index =
+                                mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[i]);
+                        // give a valid ID to an attached device once confirmed it is reachable
+                        if ((index >= 0) && (mAvailableOutputDevices[index]->mId == 0)) {
+                            mAvailableOutputDevices[index]->mId = nextUniqueId();
+                        }
+                    }
                     if (mPrimaryOutput == 0 &&
                             outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
                         mPrimaryOutput = output;
                     }
                     addOutput(output, outputDesc);
                     setOutputDevice(output,
-                                    (audio_devices_t)(mDefaultOutputDevice &
-                                                        outProfile->mSupportedDevices),
+                                    outputDesc->mDevice,
                                     true);
                 }
             }
         }
-    }
+        // open input streams needed to access attached devices to validate
+        // mAvailableInputDevices list
+        for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
+        {
+            const IOProfile *inProfile = mHwModules[i]->mInputProfiles[j];
 
-    ALOGE_IF((mAttachedOutputDevices & ~mAvailableOutputDevices),
-             "Not output found for attached devices %08x",
-             (mAttachedOutputDevices & ~mAvailableOutputDevices));
+            if (inProfile->mSupportedDevices.isEmpty()) {
+                ALOGW("Input profile contains no device on module %s", mHwModules[i]->mName);
+                continue;
+            }
+
+            audio_devices_t profileTypes = inProfile->mSupportedDevices.types();
+            if (profileTypes & inputDeviceTypes) {
+                AudioInputDescriptor *inputDesc = new AudioInputDescriptor(inProfile);
+
+                inputDesc->mInputSource = AUDIO_SOURCE_MIC;
+                inputDesc->mDevice = inProfile->mSupportedDevices[0]->mType;
+                audio_io_handle_t input = mpClientInterface->openInput(
+                                                    inProfile->mModule->mHandle,
+                                                    &inputDesc->mDevice,
+                                                    &inputDesc->mSamplingRate,
+                                                    &inputDesc->mFormat,
+                                                    &inputDesc->mChannelMask);
+
+                if (input != 0) {
+                    for (size_t i = 0; i  < inProfile->mSupportedDevices.size(); i++) {
+                        audio_devices_t type = inProfile->mSupportedDevices[i]->mType;
+                        ssize_t index =
+                                mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[i]);
+                        // give a valid ID to an attached device once confirmed it is reachable
+                        if ((index >= 0) && (mAvailableInputDevices[index]->mId == 0)) {
+                            mAvailableInputDevices[index]->mId = nextUniqueId();
+                        }
+                    }
+                    mpClientInterface->closeInput(input);
+                } else {
+                    ALOGW("Cannot open input stream for device %08x on hw module %s",
+                          inputDesc->mDevice,
+                          mHwModules[i]->mName);
+                }
+                delete inputDesc;
+            }
+        }
+    }
+    // make sure all attached devices have been allocated a unique ID
+    for (size_t i = 0; i  < mAvailableOutputDevices.size();) {
+        if (mAvailableOutputDevices[i]->mId == 0) {
+            ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->mType);
+            mAvailableOutputDevices.remove(mAvailableOutputDevices[i]);
+            continue;
+        }
+        i++;
+    }
+    for (size_t i = 0; i  < mAvailableInputDevices.size();) {
+        if (mAvailableInputDevices[i]->mId == 0) {
+            ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->mType);
+            mAvailableInputDevices.remove(mAvailableInputDevices[i]);
+            continue;
+        }
+        i++;
+    }
+    // make sure default device is reachable
+    if (mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
+        ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->mType);
+    }
 
     ALOGE_IF((mPrimaryOutput == 0), "Failed to open primary output");
 
@@ -1653,6 +1811,8 @@
    for (size_t i = 0; i < mHwModules.size(); i++) {
         delete mHwModules[i];
    }
+   mAvailableOutputDevices.clear();
+   mAvailableInputDevices.clear();
 }
 
 status_t AudioPolicyManager::initCheck()
@@ -1819,10 +1979,18 @@
 }
 
 
+String8 AudioPolicyManager::addressToParameter(audio_devices_t device, const String8 address)
+{
+    if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
+        return String8("a2dp_sink_address=")+address;
+    }
+    return address;
+}
+
 status_t AudioPolicyManager::checkOutputsForDevice(audio_devices_t device,
                                                        audio_policy_dev_state_t state,
                                                        SortedVector<audio_io_handle_t>& outputs,
-                                                       const String8 paramStr)
+                                                       const String8 address)
 {
     AudioOutputDescriptor *desc;
 
@@ -1830,7 +1998,7 @@
         // first list already open outputs that can be routed to this device
         for (size_t i = 0; i < mOutputs.size(); i++) {
             desc = mOutputs.valueAt(i);
-            if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices & device)) {
+            if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices.types() & device)) {
                 ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
                 outputs.add(mOutputs.keyAt(i));
             }
@@ -1844,7 +2012,7 @@
             }
             for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
             {
-                if (mHwModules[i]->mOutputProfiles[j]->mSupportedDevices & device) {
+                if (mHwModules[i]->mOutputProfiles[j]->mSupportedDevices.types() & device) {
                     ALOGV("checkOutputsForDevice(): adding profile %d from module %d", j, i);
                     profiles.add(mHwModules[i]->mOutputProfiles[j]);
                 }
@@ -1873,7 +2041,7 @@
                 continue;
             }
 
-            ALOGV("opening output for device %08x with params %s", device, paramStr.string());
+            ALOGV("opening output for device %08x with params %s", device, address.string());
             desc = new AudioOutputDescriptor(profile);
             desc->mDevice = device;
             audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
@@ -1890,8 +2058,8 @@
                                                                        desc->mFlags,
                                                                        &offloadInfo);
             if (output != 0) {
-                if (!paramStr.isEmpty()) {
-                    mpClientInterface->setParameters(output, paramStr);
+                if (!address.isEmpty()) {
+                    mpClientInterface->setParameters(output, addressToParameter(device, address));
                 }
 
                 if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
@@ -1991,7 +2159,8 @@
         for (size_t i = 0; i < mOutputs.size(); i++) {
             desc = mOutputs.valueAt(i);
             if (!desc->isDuplicated() &&
-                    !(desc->mProfile->mSupportedDevices & mAvailableOutputDevices)) {
+                    !(desc->mProfile->mSupportedDevices.types() &
+                            mAvailableOutputDevices.types())) {
                 ALOGV("checkOutputsForDevice(): disconnecting adding output %d", mOutputs.keyAt(i));
                 outputs.add(mOutputs.keyAt(i));
             }
@@ -2004,7 +2173,7 @@
             for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
             {
                 IOProfile *profile = mHwModules[i]->mOutputProfiles[j];
-                if ((profile->mSupportedDevices & device) &&
+                if ((profile->mSupportedDevices.types() & device) &&
                         (profile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
                     ALOGV("checkOutputsForDevice(): clearing direct output profile %d on module %d",
                           j, i);
@@ -2166,10 +2335,6 @@
 
 audio_io_handle_t AudioPolicyManager::getA2dpOutput()
 {
-    if (!mHasA2dp) {
-        return 0;
-    }
-
     for (size_t i = 0; i < mOutputs.size(); i++) {
         AudioOutputDescriptor *outputDesc = mOutputs.valueAt(i);
         if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) {
@@ -2182,14 +2347,14 @@
 
 void AudioPolicyManager::checkA2dpSuspend()
 {
-    if (!mHasA2dp) {
-        return;
-    }
     audio_io_handle_t a2dpOutput = getA2dpOutput();
     if (a2dpOutput == 0) {
+        mA2dpSuspended = false;
         return;
     }
 
+    bool isScoConnected =
+            (mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) != 0;
     // suspend A2DP output if:
     //      (NOT already suspended) &&
     //      ((SCO device is connected &&
@@ -2203,7 +2368,7 @@
     //      (phone state is NOT ringing && NOT in call)
     //
     if (mA2dpSuspended) {
-        if (((mScoDeviceAddress == "") ||
+        if ((!isScoConnected ||
              ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO) &&
               (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] != AUDIO_POLICY_FORCE_BT_SCO))) &&
              ((mPhoneState != AUDIO_MODE_IN_CALL) &&
@@ -2213,7 +2378,7 @@
             mA2dpSuspended = false;
         }
     } else {
-        if (((mScoDeviceAddress != "") &&
+        if ((isScoConnected &&
              ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) ||
               (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO))) ||
              ((mPhoneState == AUDIO_MODE_IN_CALL) ||
@@ -2328,7 +2493,7 @@
               strategy, mDeviceForStrategy[strategy]);
         return mDeviceForStrategy[strategy];
     }
-
+    audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types();
     switch (strategy) {
 
     case STRATEGY_SONIFICATION_RESPECTFUL:
@@ -2366,45 +2531,45 @@
         switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
         case AUDIO_POLICY_FORCE_BT_SCO:
             if (!isInCall() || strategy != STRATEGY_DTMF) {
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
                 if (device) break;
             }
-            device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
             if (device) break;
-            device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
             if (device) break;
             // if SCO device is requested but no SCO device is available, fall back to default case
             // FALL THROUGH
 
         default:    // FORCE_NONE
             // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
-            if (mHasA2dp && !isInCall() &&
+            if (!isInCall() &&
                     (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                     (getA2dpOutput() != 0) && !mA2dpSuspended) {
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
                 if (device) break;
             }
-            device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
             if (device) break;
-            device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
             if (device) break;
             if (mPhoneState != AUDIO_MODE_IN_CALL) {
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_DEVICE;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                 if (device) break;
             }
-            device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_EARPIECE;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_EARPIECE;
             if (device) break;
-            device = mDefaultOutputDevice;
+            device = mDefaultOutputDevice->mType;
             if (device == AUDIO_DEVICE_NONE) {
                 ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
             }
@@ -2413,27 +2578,27 @@
         case AUDIO_POLICY_FORCE_SPEAKER:
             // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
             // A2DP speaker when forcing to speaker output
-            if (mHasA2dp && !isInCall() &&
+            if (!isInCall() &&
                     (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                     (getA2dpOutput() != 0) && !mA2dpSuspended) {
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
                 if (device) break;
             }
             if (mPhoneState != AUDIO_MODE_IN_CALL) {
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_DEVICE;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
                 if (device) break;
-                device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
                 if (device) break;
             }
-            device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_SPEAKER;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
             if (device) break;
-            device = mDefaultOutputDevice;
+            device = mDefaultOutputDevice->mType;
             if (device == AUDIO_DEVICE_NONE) {
                 ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
             }
@@ -2459,7 +2624,7 @@
 
         if ((strategy == STRATEGY_SONIFICATION) ||
                 (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
-            device = mAvailableOutputDevices & AUDIO_DEVICE_OUT_SPEAKER;
+            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
             if (device == AUDIO_DEVICE_NONE) {
                 ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
             }
@@ -2471,52 +2636,51 @@
         uint32_t device2 = AUDIO_DEVICE_NONE;
         if (strategy != STRATEGY_SONIFICATION) {
             // no sonification on remote submix (e.g. WFD)
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
         }
         if ((device2 == AUDIO_DEVICE_NONE) &&
-                mHasA2dp &&
                 (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
                 (getA2dpOutput() != 0) && !mA2dpSuspended) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
             if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
             }
             if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
             }
         }
         if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
         }
         if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
         }
         if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
         }
         if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_USB_DEVICE;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
         }
         if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
         }
         if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
             // no sonification on aux digital (e.g. HDMI)
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
         }
         if ((device2 == AUDIO_DEVICE_NONE) &&
                 (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
         }
         if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = mAvailableOutputDevices & AUDIO_DEVICE_OUT_SPEAKER;
+            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
         }
 
         // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
         // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
         device |= device2;
         if (device) break;
-        device = mDefaultOutputDevice;
+        device = mDefaultOutputDevice->mType;
         if (device == AUDIO_DEVICE_NONE) {
             ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
         }
@@ -2630,12 +2794,12 @@
     // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
     // output profile
     if ((device != AUDIO_DEVICE_NONE) &&
-            ((device & outputDesc->mProfile->mSupportedDevices) == 0)) {
+            ((device & outputDesc->mProfile->mSupportedDevices.types()) == 0)) {
         return 0;
     }
 
     // filter devices according to output selected
-    device = (audio_devices_t)(device & outputDesc->mProfile->mSupportedDevices);
+    device = (audio_devices_t)(device & outputDesc->mProfile->mSupportedDevices.types());
 
     audio_devices_t prevDevice = outputDesc->mDevice;
 
@@ -2694,10 +2858,11 @@
 audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
 {
     uint32_t device = AUDIO_DEVICE_NONE;
-
+    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() &
+                                            ~AUDIO_DEVICE_BIT_IN;
     switch (inputSource) {
     case AUDIO_SOURCE_VOICE_UPLINK:
-      if (mAvailableInputDevices & AUDIO_DEVICE_IN_VOICE_CALL) {
+      if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
           device = AUDIO_DEVICE_IN_VOICE_CALL;
           break;
       }
@@ -2709,29 +2874,29 @@
     case AUDIO_SOURCE_HOTWORD:
     case AUDIO_SOURCE_VOICE_COMMUNICATION:
         if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
-            mAvailableInputDevices & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
             device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-        } else if (mAvailableInputDevices & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
             device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-        } else if (mAvailableInputDevices & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
             device = AUDIO_DEVICE_IN_BUILTIN_MIC;
         }
         break;
     case AUDIO_SOURCE_CAMCORDER:
-        if (mAvailableInputDevices & AUDIO_DEVICE_IN_BACK_MIC) {
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
             device = AUDIO_DEVICE_IN_BACK_MIC;
-        } else if (mAvailableInputDevices & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
             device = AUDIO_DEVICE_IN_BUILTIN_MIC;
         }
         break;
     case AUDIO_SOURCE_VOICE_DOWNLINK:
     case AUDIO_SOURCE_VOICE_CALL:
-        if (mAvailableInputDevices & AUDIO_DEVICE_IN_VOICE_CALL) {
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
             device = AUDIO_DEVICE_IN_VOICE_CALL;
         }
         break;
     case AUDIO_SOURCE_REMOTE_SUBMIX:
-        if (mAvailableInputDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
             device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
         }
         break;
@@ -3335,7 +3500,7 @@
     if (isDuplicated()) {
         return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices());
     } else {
-        return mProfile->mSupportedDevices ;
+        return mProfile->mSupportedDevices.types() ;
     }
 }
 
@@ -3418,6 +3583,11 @@
       mDevice(AUDIO_DEVICE_NONE), mRefCount(0),
       mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile)
 {
+    if (profile != NULL) {
+        mSamplingRate = profile->mSamplingRates[0];
+        mFormat = profile->mFormats[0];
+        mChannelMask = profile->mChannelMasks[0];
+    }
 }
 
 status_t AudioPolicyManager::AudioInputDescriptor::dump(int fd)
@@ -3512,10 +3682,12 @@
 AudioPolicyManager::HwModule::~HwModule()
 {
     for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-         delete mOutputProfiles[i];
+        mOutputProfiles[i]->mSupportedDevices.clear();
+        delete mOutputProfiles[i];
     }
     for (size_t i = 0; i < mInputProfiles.size(); i++) {
-         delete mInputProfiles[i];
+        mInputProfiles[i]->mSupportedDevices.clear();
+        delete mInputProfiles[i];
     }
     free((void *)mName);
 }
@@ -3571,7 +3743,7 @@
          return false;
      }
 
-     if ((mSupportedDevices & device) != device) {
+     if ((mSupportedDevices.types() & device) != device) {
          return false;
      }
      if ((mFlags & flags) != flags) {
@@ -3638,103 +3810,129 @@
         result.append(i == (mFormats.size() - 1) ? "\n" : ", ");
     }
 
-    snprintf(buffer, SIZE, "    - devices: 0x%04x\n", mSupportedDevices);
+    snprintf(buffer, SIZE, "    - devices:\n");
     result.append(buffer);
+    write(fd, result.string(), result.size());
+    DeviceDescriptor::dumpHeader(fd, 6);
+    for (size_t i = 0; i < mSupportedDevices.size(); i++) {
+        mSupportedDevices[i]->dump(fd, 6);
+    }
+
     snprintf(buffer, SIZE, "    - flags: 0x%04x\n", mFlags);
     result.append(buffer);
 
     write(fd, result.string(), result.size());
 }
 
-// --- audio_policy.conf file parsing
+// --- DeviceDescriptor implementation
 
-struct StringToEnum {
-    const char *name;
-    uint32_t value;
-};
-
-#define STRING_TO_ENUM(string) { #string, string }
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-
-const struct StringToEnum sDeviceNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
-};
-
-const struct StringToEnum sFlagNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
-};
-
-const struct StringToEnum sFormatNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_32_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_FLOAT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
-    STRING_TO_ENUM(AUDIO_FORMAT_MP3),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC),
-    STRING_TO_ENUM(AUDIO_FORMAT_VORBIS),
-};
-
-const struct StringToEnum sOutChannelsNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
-};
-
-const struct StringToEnum sInChannelsNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
-};
-
-
-uint32_t AudioPolicyManager::stringToEnum(const struct StringToEnum *table,
-                                              size_t size,
-                                              const char *name)
+bool AudioPolicyManager::DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const
 {
-    for (size_t i = 0; i < size; i++) {
-        if (strcmp(table[i].name, name) == 0) {
-            ALOGV("stringToEnum() found %s", table[i].name);
-            return table[i].value;
+    // Devices are considered equal if they:
+    // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
+    // - have the same address or one device does not specify the address
+    // - have the same channel mask or one device does not specify the channel mask
+    return (mType == other->mType) &&
+           (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) &&
+           (mChannelMask == 0 || other->mChannelMask == 0 ||
+                mChannelMask == other->mChannelMask);
+}
+
+void AudioPolicyManager::DeviceVector::refreshTypes()
+{
+    mTypes = AUDIO_DEVICE_NONE;
+    for(size_t i = 0; i < size(); i++) {
+        mTypes |= itemAt(i)->mType;
+    }
+    ALOGV("DeviceVector::refreshTypes() mTypes %08x", mTypes);
+}
+
+ssize_t AudioPolicyManager::DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
+{
+    for(size_t i = 0; i < size(); i++) {
+        if (item->equals(itemAt(i))) {
+            return i;
         }
     }
-    return 0;
+    return -1;
 }
 
-bool AudioPolicyManager::stringToBool(const char *value)
+ssize_t AudioPolicyManager::DeviceVector::add(const sp<DeviceDescriptor>& item)
 {
-    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
+    ssize_t ret = indexOf(item);
+
+    if (ret < 0) {
+        ret = SortedVector::add(item);
+        if (ret >= 0) {
+            refreshTypes();
+        }
+    } else {
+        ALOGW("DeviceVector::add device %08x already in", item->mType);
+        ret = -1;
+    }
+    return ret;
 }
 
+ssize_t AudioPolicyManager::DeviceVector::remove(const sp<DeviceDescriptor>& item)
+{
+    size_t i;
+    ssize_t ret = indexOf(item);
+
+    if (ret < 0) {
+        ALOGW("DeviceVector::remove device %08x not in", item->mType);
+    } else {
+        ret = SortedVector::removeAt(ret);
+        if (ret >= 0) {
+            refreshTypes();
+        }
+    }
+    return ret;
+}
+
+void AudioPolicyManager::DeviceVector::loadDevicesFromType(audio_devices_t types)
+{
+    DeviceVector deviceList;
+
+    uint32_t role_bit = AUDIO_DEVICE_BIT_IN & types;
+    types &= ~role_bit;
+
+    while (types) {
+        uint32_t i = 31 - __builtin_clz(types);
+        uint32_t type = 1 << i;
+        types &= ~type;
+        add(new DeviceDescriptor(type | role_bit));
+    }
+}
+
+void AudioPolicyManager::DeviceDescriptor::dumpHeader(int fd, int spaces)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "%*s%-48s %-2s %-8s %-32s \n",
+                         spaces, "", "Type", "ID", "Cnl Mask", "Address");
+    write(fd, buffer, strlen(buffer));
+}
+
+status_t AudioPolicyManager::DeviceDescriptor::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "%*s%-48s %2d %08x %-32s \n",
+                         spaces, "",
+                         enumToString(sDeviceNameToEnumTable,
+                                      ARRAY_SIZE(sDeviceNameToEnumTable),
+                                      mType),
+                         mId, mChannelMask, mAddress.string());
+    write(fd, buffer, strlen(buffer));
+
+    return NO_ERROR;
+}
+
+
+// --- audio_policy.conf file parsing
+
 audio_output_flags_t AudioPolicyManager::parseFlagNames(char *name)
 {
     uint32_t flag = 0;
@@ -3770,9 +3968,9 @@
             device |= stringToEnum(sDeviceNameToEnumTable,
                                  ARRAY_SIZE(sDeviceNameToEnumTable),
                                  devName);
-        }
+         }
         devName = strtok(NULL, "|");
-    }
+     }
     return device;
 }
 
@@ -3886,11 +4084,11 @@
         } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
             loadInChannels((char *)node->value, profile);
         } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices = parseDeviceNames((char *)node->value);
+            profile->mSupportedDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
         }
         node = node->next;
     }
-    ALOGW_IF(profile->mSupportedDevices == AUDIO_DEVICE_NONE,
+    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
             "loadInput() invalid supported devices");
     ALOGW_IF(profile->mChannelMasks.size() == 0,
             "loadInput() invalid supported channel masks");
@@ -3898,12 +4096,13 @@
             "loadInput() invalid supported sampling rates");
     ALOGW_IF(profile->mFormats.size() == 0,
             "loadInput() invalid supported formats");
-    if ((profile->mSupportedDevices != AUDIO_DEVICE_NONE) &&
+    if (!profile->mSupportedDevices.isEmpty() &&
             (profile->mChannelMasks.size() != 0) &&
             (profile->mSamplingRates.size() != 0) &&
             (profile->mFormats.size() != 0)) {
 
-        ALOGV("loadInput() adding input mSupportedDevices %04x", profile->mSupportedDevices);
+        ALOGV("loadInput() adding input Supported Devices %04x",
+              profile->mSupportedDevices.types());
 
         module->mInputProfiles.add(profile);
         return NO_ERROR;
@@ -3927,13 +4126,13 @@
         } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
             loadOutChannels((char *)node->value, profile);
         } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices = parseDeviceNames((char *)node->value);
+            profile->mSupportedDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
         } else if (strcmp(node->name, FLAGS_TAG) == 0) {
             profile->mFlags = parseFlagNames((char *)node->value);
         }
         node = node->next;
     }
-    ALOGW_IF(profile->mSupportedDevices == AUDIO_DEVICE_NONE,
+    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
             "loadOutput() invalid supported devices");
     ALOGW_IF(profile->mChannelMasks.size() == 0,
             "loadOutput() invalid supported channel masks");
@@ -3941,13 +4140,13 @@
             "loadOutput() invalid supported sampling rates");
     ALOGW_IF(profile->mFormats.size() == 0,
             "loadOutput() invalid supported formats");
-    if ((profile->mSupportedDevices != AUDIO_DEVICE_NONE) &&
+    if (!profile->mSupportedDevices.isEmpty() &&
             (profile->mChannelMasks.size() != 0) &&
             (profile->mSamplingRates.size() != 0) &&
             (profile->mFormats.size() != 0)) {
 
-        ALOGV("loadOutput() adding output mSupportedDevices %04x, mFlags %04x",
-              profile->mSupportedDevices, profile->mFlags);
+        ALOGV("loadOutput() adding output Supported Devices %04x, mFlags %04x",
+              profile->mSupportedDevices.types(), profile->mFlags);
 
         module->mOutputProfiles.add(profile);
         return NO_ERROR;
@@ -3965,14 +4164,6 @@
     HwModule *module = new HwModule(root->name);
 
     if (node != NULL) {
-        if (strcmp(root->name, AUDIO_HARDWARE_MODULE_ID_A2DP) == 0) {
-            mHasA2dp = true;
-        } else if (strcmp(root->name, AUDIO_HARDWARE_MODULE_ID_USB) == 0) {
-            mHasUsb = true;
-        } else if (strcmp(root->name, AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX) == 0) {
-            mHasRemoteSubmix = true;
-        }
-
         node = node->first_child;
         while (node) {
             ALOGV("loadHwModule() loading output %s", node->name);
@@ -4026,20 +4217,22 @@
     node = node->first_child;
     while (node) {
         if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
-            mAttachedOutputDevices = parseDeviceNames((char *)node->value);
-            ALOGW_IF(mAttachedOutputDevices == AUDIO_DEVICE_NONE,
-                    "loadGlobalConfig() no attached output devices");
-            ALOGV("loadGlobalConfig() mAttachedOutputDevices %04x", mAttachedOutputDevices);
+            mAvailableOutputDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
+            ALOGV("loadGlobalConfig() Attached Output Devices %08x",
+                  mAvailableOutputDevices.types());
         } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
-            mDefaultOutputDevice = (audio_devices_t)stringToEnum(sDeviceNameToEnumTable,
+            audio_devices_t device = (audio_devices_t)stringToEnum(sDeviceNameToEnumTable,
                                               ARRAY_SIZE(sDeviceNameToEnumTable),
                                               (char *)node->value);
-            ALOGW_IF(mDefaultOutputDevice == AUDIO_DEVICE_NONE,
-                    "loadGlobalConfig() default device not specified");
-            ALOGV("loadGlobalConfig() mDefaultOutputDevice %04x", mDefaultOutputDevice);
+            if (device != AUDIO_DEVICE_NONE) {
+                mDefaultOutputDevice = new DeviceDescriptor(device);
+            } else {
+                ALOGW("loadGlobalConfig() default device not specified");
+            }
+            ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", mDefaultOutputDevice->mType);
         } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
-            mAvailableInputDevices = parseDeviceNames((char *)node->value) & ~AUDIO_DEVICE_BIT_IN;
-            ALOGV("loadGlobalConfig() mAvailableInputDevices %04x", mAvailableInputDevices);
+            mAvailableInputDevices.loadDevicesFromType(parseDeviceNames((char *)node->value));
+            ALOGV("loadGlobalConfig() Available InputDevices %08x", mAvailableInputDevices.types());
         } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
             mSpeakerDrcEnabled = stringToBool((char *)node->value);
             ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", mSpeakerDrcEnabled);
@@ -4076,10 +4269,9 @@
 {
     HwModule *module;
     IOProfile *profile;
-
-    mDefaultOutputDevice = AUDIO_DEVICE_OUT_SPEAKER;
-    mAttachedOutputDevices = AUDIO_DEVICE_OUT_SPEAKER;
-    mAvailableInputDevices = AUDIO_DEVICE_IN_BUILTIN_MIC & ~AUDIO_DEVICE_BIT_IN;
+    sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
+    mAvailableOutputDevices.add(mDefaultOutputDevice);
+    mAvailableInputDevices.add(defaultInputDevice);
 
     module = new HwModule("primary");
 
@@ -4087,7 +4279,7 @@
     profile->mSamplingRates.add(44100);
     profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
     profile->mChannelMasks.add(AUDIO_CHANNEL_OUT_STEREO);
-    profile->mSupportedDevices = AUDIO_DEVICE_OUT_SPEAKER;
+    profile->mSupportedDevices.add(mDefaultOutputDevice);
     profile->mFlags = AUDIO_OUTPUT_FLAG_PRIMARY;
     module->mOutputProfiles.add(profile);
 
@@ -4095,7 +4287,7 @@
     profile->mSamplingRates.add(8000);
     profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
     profile->mChannelMasks.add(AUDIO_CHANNEL_IN_MONO);
-    profile->mSupportedDevices = AUDIO_DEVICE_IN_BUILTIN_MIC;
+    profile->mSupportedDevices.add(defaultInputDevice);
     module->mInputProfiles.add(profile);
 
     mHwModules.add(module);
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index e00d8ab..8a631ba 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -30,7 +30,6 @@
 
 // ----------------------------------------------------------------------------
 
-#define MAX_DEVICE_ADDRESS_LEN 20
 // Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
 #define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5
 // Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB
@@ -175,6 +174,47 @@
 
         class IOProfile;
 
+        class DeviceDescriptor: public RefBase
+        {
+        public:
+            DeviceDescriptor(audio_devices_t type, String8 address,
+                             audio_channel_mask_t channelMask) :
+                                 mType(type), mAddress(address),
+                                 mChannelMask(channelMask), mId(0) {}
+
+            DeviceDescriptor(audio_devices_t type) :
+                                 mType(type), mAddress(""),
+                                 mChannelMask(0), mId(0) {}
+
+            status_t dump(int fd, int spaces) const;
+            static void dumpHeader(int fd, int spaces);
+
+            bool equals(const sp<DeviceDescriptor>& other) const;
+
+            audio_devices_t mType;
+            String8 mAddress;
+            audio_channel_mask_t mChannelMask;
+            uint32_t mId;
+        };
+
+        class DeviceVector : public SortedVector< sp<DeviceDescriptor> >
+        {
+        public:
+            DeviceVector() : SortedVector(), mTypes(AUDIO_DEVICE_NONE) {}
+
+            ssize_t         add(const sp<DeviceDescriptor>& item);
+            ssize_t         remove(const sp<DeviceDescriptor>& item);
+            ssize_t         indexOf(const sp<DeviceDescriptor>& item) const;
+
+            audio_devices_t types() const { return mTypes; }
+
+            void loadDevicesFromType(audio_devices_t types);
+
+        private:
+            void refreshTypes();
+            audio_devices_t mTypes;
+        };
+
         class HwModule {
         public:
                     HwModule(const char *name);
@@ -213,8 +253,8 @@
             Vector <uint32_t> mSamplingRates; // supported sampling rates
             Vector <audio_channel_mask_t> mChannelMasks; // supported channel masks
             Vector <audio_format_t> mFormats; // supported audio formats
-            audio_devices_t mSupportedDevices; // supported devices (devices this output can be
-                                               // routed to)
+            DeviceVector  mSupportedDevices; // supported devices
+                                             // (devices this output can be routed to)
             audio_output_flags_t mFlags; // attribute flags (e.g primary output,
                                                 // direct output...). For outputs only.
             HwModule *mModule;                     // audio HW module exposing this I/O stream
@@ -411,7 +451,7 @@
         status_t checkOutputsForDevice(audio_devices_t device,
                                        audio_policy_dev_state_t state,
                                        SortedVector<audio_io_handle_t>& outputs,
-                                       const String8 paramStr);
+                                       const String8 address);
 
         // close an output and its companion duplicating output.
         void closeOutput(audio_io_handle_t output);
@@ -496,6 +536,9 @@
         static uint32_t stringToEnum(const struct StringToEnum *table,
                                      size_t size,
                                      const char *name);
+        static const char *enumToString(const struct StringToEnum *table,
+                                      size_t size,
+                                      uint32_t value);
         static bool stringToBool(const char *value);
         static audio_output_flags_t parseFlagNames(char *name);
         static audio_devices_t parseDeviceNames(char *name);
@@ -520,18 +563,14 @@
         // reset to mOutputs when updateDevicesAndOutputs() is called.
         DefaultKeyedVector<audio_io_handle_t, AudioOutputDescriptor *> mPreviousOutputs;
         DefaultKeyedVector<audio_io_handle_t, AudioInputDescriptor *> mInputs;     // list of input descriptors
-        audio_devices_t mAvailableOutputDevices; // bit field of all available output devices
-        audio_devices_t mAvailableInputDevices; // bit field of all available input devices
+        DeviceVector  mAvailableOutputDevices; // bit field of all available output devices
+        DeviceVector  mAvailableInputDevices; // bit field of all available input devices
                                                 // without AUDIO_DEVICE_BIT_IN to allow direct bit
                                                 // field comparisons
         int mPhoneState;                                                    // current phone state
         audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];   // current forced use configuration
 
         StreamDescriptor mStreams[AUDIO_STREAM_CNT];           // stream descriptors for volume control
-        String8 mA2dpDeviceAddress;                                         // A2DP device MAC address
-        String8 mScoDeviceAddress;                                          // SCO device MAC address
-        String8 mUsbCardAndDevice; // USB audio ALSA card and device numbers:
-                                   // card=<card_number>;device=<><device_number>
         bool    mLimitRingtoneVolume;                                       // limit ringtone volume to music volume if headset connected
         audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
         float   mLastVoiceVolume;                                           // last voice volume value sent to audio HAL
@@ -544,16 +583,12 @@
         uint32_t mTotalEffectsMemory;  // current memory used by effects
         KeyedVector<int, EffectDescriptor *> mEffects;  // list of registered audio effects
         bool    mA2dpSuspended;  // true if A2DP output is suspended
-        bool mHasA2dp; // true on platforms with support for bluetooth A2DP
-        bool mHasUsb; // true on platforms with support for USB audio
-        bool mHasRemoteSubmix; // true on platforms with support for remote presentation of a submix
-        audio_devices_t mAttachedOutputDevices; // output devices always available on the platform
-        audio_devices_t mDefaultOutputDevice; // output device selected by default at boot time
-                                              // (must be in mAttachedOutputDevices)
+        sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
         bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path
                                 // to boost soft sounds, used to adjust volume curves accordingly
 
         Vector <HwModule *> mHwModules;
+        volatile int32_t mNextUniqueId;
 
 #ifdef AUDIO_POLICY_TEST
         Mutex   mLock;
@@ -577,6 +612,9 @@
         //    routing of notifications
         void handleNotificationRoutingForStream(audio_stream_type_t stream);
         static bool isVirtualInputDevice(audio_devices_t device);
+        uint32_t nextUniqueId();
+        // converts device address to string sent to audio HAL via setParameters
+        static String8 addressToParameter(audio_devices_t device, const String8 address);
 };
 
 };
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/AudioPolicyService.cpp
index 49145a5..4a708a0 100644
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/AudioPolicyService.cpp
@@ -60,7 +60,8 @@
 // ----------------------------------------------------------------------------
 
 AudioPolicyService::AudioPolicyService()
-    : BnAudioPolicyService(), mpAudioPolicyDev(NULL), mpAudioPolicy(NULL)
+    : BnAudioPolicyService(), mpAudioPolicyDev(NULL), mpAudioPolicy(NULL),
+      mAudioPolicyManager(NULL), mAudioPolicyClient(NULL)
 {
     char value[PROPERTY_VALUE_MAX];
     const struct hw_module_t *module;
@@ -75,12 +76,15 @@
     mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
     // start output activity command thread
     mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
+
+#ifdef USE_LEGACY_AUDIO_POLICY
+    ALOGI("AudioPolicyService CSTOR in legacy mode");
+
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
     if (rc) {
         return;
     }
-
     rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
     ALOGE_IF(rc, "couldn't open audio policy device (%s)", strerror(-rc));
     if (rc) {
@@ -99,8 +103,13 @@
     if (rc) {
         return;
     }
-
     ALOGI("Loaded audio policy from %s (%s)", module->name, module->id);
+#else
+    ALOGI("AudioPolicyService CSTOR in new mode");
+
+    mAudioPolicyClient = new AudioPolicyClient(this);
+    mAudioPolicyManager = new AudioPolicyManager(mAudioPolicyClient);
+#endif
 
     // load audio pre processing modules
     if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
@@ -130,12 +139,17 @@
     }
     mInputs.clear();
 
+#ifdef USE_LEGACY_AUDIO_POLICY
     if (mpAudioPolicy != NULL && mpAudioPolicyDev != NULL) {
         mpAudioPolicyDev->destroy_audio_policy(mpAudioPolicyDev, mpAudioPolicy);
     }
     if (mpAudioPolicyDev != NULL) {
         audio_policy_dev_close(mpAudioPolicyDev);
     }
+#else
+    delete mAudioPolicyManager;
+    delete mAudioPolicyClient;
+#endif
 }
 
 
@@ -163,7 +177,11 @@
     char buffer[SIZE];
     String8 result;
 
+#ifdef USE_LEGACY_AUDIO_POLICY
     snprintf(buffer, SIZE, "PolicyManager Interface: %p\n", mpAudioPolicy);
+#else
+    snprintf(buffer, SIZE, "AudioPolicyManager: %p\n", mAudioPolicyManager);
+#endif
     result.append(buffer);
     snprintf(buffer, SIZE, "Command Thread: %p\n", mAudioCommandThread.get());
     result.append(buffer);
@@ -193,9 +211,15 @@
             mTonePlaybackThread->dump(fd);
         }
 
+#ifdef USE_LEGACY_AUDIO_POLICY
         if (mpAudioPolicy) {
             mpAudioPolicy->dump(mpAudioPolicy, fd);
         }
+#else
+        if (mAudioPolicyManager) {
+            mAudioPolicyManager->dump(fd);
+        }
+#endif
 
         if (locked) mLock.unlock();
     }
@@ -856,7 +880,7 @@
     return fx_param;
 
 error:
-    delete fx_param;
+    free(fx_param);
     return NULL;
 }
 
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
index ae053a9..cdc90d0 100644
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -30,6 +30,8 @@
 #include <media/IAudioPolicyService.h>
 #include <media/ToneGenerator.h>
 #include <media/AudioEffect.h>
+#include <hardware_legacy/AudioPolicyInterface.h>
+#include "AudioPolicyManager.h"
 
 namespace android {
 
@@ -38,7 +40,6 @@
 class AudioPolicyService :
     public BinderService<AudioPolicyService>,
     public BnAudioPolicyService,
-//    public AudioPolicyClientInterface,
     public IBinder::DeathRecipient
 {
     friend class BinderService<AudioPolicyService>;
@@ -313,6 +314,91 @@
         Vector< sp<AudioEffect> >mEffects;
     };
 
+    class AudioPolicyClient : public AudioPolicyClientInterface
+    {
+     public:
+        AudioPolicyClient(AudioPolicyService *service) : mAudioPolicyService(service) {}
+        virtual ~AudioPolicyClient() {}
+
+        //
+        // Audio HW module functions
+        //
+
+        // loads a HW module.
+        virtual audio_module_handle_t loadHwModule(const char *name);
+
+        //
+        // Audio output Control functions
+        //
+
+        // opens an audio output with the requested parameters. The parameter values can indicate to use the default values
+        // in case the audio policy manager has no specific requirements for the output being opened.
+        // When the function returns, the parameter values reflect the actual values used by the audio hardware output stream.
+        // The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
+        virtual audio_io_handle_t openOutput(audio_module_handle_t module,
+                                             audio_devices_t *pDevices,
+                                             uint32_t *pSamplingRate,
+                                             audio_format_t *pFormat,
+                                             audio_channel_mask_t *pChannelMask,
+                                             uint32_t *pLatencyMs,
+                                             audio_output_flags_t flags,
+                                             const audio_offload_info_t *offloadInfo = NULL);
+        // creates a special output that is duplicated to the two outputs passed as arguments. The duplication is performed by
+        // a special mixer thread in the AudioFlinger.
+        virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, audio_io_handle_t output2);
+        // closes the output stream
+        virtual status_t closeOutput(audio_io_handle_t output);
+        // suspends the output. When an output is suspended, the corresponding audio hardware output stream is placed in
+        // standby and the AudioTracks attached to the mixer thread are still processed but the output mix is discarded.
+        virtual status_t suspendOutput(audio_io_handle_t output);
+        // restores a suspended output.
+        virtual status_t restoreOutput(audio_io_handle_t output);
+
+        //
+        // Audio input Control functions
+        //
+
+        // opens an audio input
+        virtual audio_io_handle_t openInput(audio_module_handle_t module,
+                                            audio_devices_t *pDevices,
+                                            uint32_t *pSamplingRate,
+                                            audio_format_t *pFormat,
+                                            audio_channel_mask_t *pChannelMask);
+        // closes an audio input
+        virtual status_t closeInput(audio_io_handle_t input);
+        //
+        // misc control functions
+        //
+
+        // set a stream volume for a particular output. For the same user setting, a given stream type can have different volumes
+        // for each output (destination device) it is attached to.
+        virtual status_t setStreamVolume(audio_stream_type_t stream, float volume, audio_io_handle_t output, int delayMs = 0);
+
+        // invalidate a stream type, causing a reroute to an unspecified new output
+        virtual status_t invalidateStream(audio_stream_type_t stream);
+
+        // function enabling to send proprietary informations directly from audio policy manager to audio hardware interface.
+        virtual void setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs, int delayMs = 0);
+        // function enabling to receive proprietary informations directly from audio hardware interface to audio policy manager.
+        virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys);
+
+        // request the playback of a tone on the specified stream: used for instance to replace notification sounds when playing
+        // over a telephony device during a phone call.
+        virtual status_t startTone(audio_policy_tone_t tone, audio_stream_type_t stream);
+        virtual status_t stopTone();
+
+        // set down link audio volume.
+        virtual status_t setVoiceVolume(float volume, int delayMs = 0);
+
+        // move effect to the specified output
+        virtual status_t moveEffects(int session,
+                                         audio_io_handle_t srcOutput,
+                                         audio_io_handle_t dstOutput);
+
+     private:
+        AudioPolicyService *mAudioPolicyService;
+    };
+
     static const char * const kInputSourceNames[AUDIO_SOURCE_CNT -1];
 
     void setPreProcessorEnabled(const InputDesc *inputDesc, bool enabled);
@@ -344,6 +430,9 @@
     sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
+    AudioPolicyManager *mAudioPolicyManager;
+    AudioPolicyClient *mAudioPolicyClient;
+
     KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
     KeyedVector< audio_io_handle_t, InputDesc* > mInputs;
 };
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index 4e2272d..2f485b9 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -74,6 +74,5 @@
 LOCAL_CFLAGS += -Wall -Wextra
 
 LOCAL_MODULE:= libcameraservice
-LOCAL_32_BIT_ONLY := true
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
index 7fdf304..bfef50e 100644
--- a/services/camera/libcameraservice/CameraDeviceFactory.cpp
+++ b/services/camera/libcameraservice/CameraDeviceFactory.cpp
@@ -46,6 +46,8 @@
             device = new Camera2Device(cameraId);
             break;
         case CAMERA_DEVICE_API_VERSION_3_0:
+        case CAMERA_DEVICE_API_VERSION_3_1:
+        case CAMERA_DEVICE_API_VERSION_3_2:
             device = new Camera3Device(cameraId);
             break;
         default:
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index b83c315..5c6f653 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -206,7 +206,7 @@
              */
         }
 
-        ALOGV("%s: After unplug, disconnected %d clients",
+        ALOGV("%s: After unplug, disconnected %zu clients",
               __FUNCTION__, clientsToDisconnect.size());
     }
 
@@ -324,6 +324,8 @@
       case CAMERA_DEVICE_API_VERSION_2_0:
       case CAMERA_DEVICE_API_VERSION_2_1:
       case CAMERA_DEVICE_API_VERSION_3_0:
+      case CAMERA_DEVICE_API_VERSION_3_1:
+      case CAMERA_DEVICE_API_VERSION_3_2:
         return true;
       default:
         return false;
@@ -519,6 +521,8 @@
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
           case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
             client = new Camera2Client(this, cameraClient,
                     clientPackageName, cameraId,
                     facing, callingPid, clientUid, getpid(),
@@ -605,6 +609,8 @@
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
           case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
             client = new ProCamera2Client(this, cameraCb, String16(),
                     cameraId, facing, callingPid, USE_CALLING_UID, getpid());
             break;
@@ -683,6 +689,8 @@
           case CAMERA_DEVICE_API_VERSION_2_0:
           case CAMERA_DEVICE_API_VERSION_2_1:
           case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
             client = new CameraDeviceClient(this, cameraCb, String16(),
                     cameraId, facing, callingPid, USE_CALLING_UID, getpid());
             break;
@@ -1233,6 +1241,7 @@
         if (!mModule) {
             result = String8::format("No camera module available!\n");
             write(fd, result.string(), result.size());
+            if (locked) mServiceLock.unlock();
             return NO_ERROR;
         }
 
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 80b7cd4..0447979 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -118,7 +118,9 @@
             mZslProcessorThread = zslProc;
             break;
         }
-        case CAMERA_DEVICE_API_VERSION_3_0:{
+        case CAMERA_DEVICE_API_VERSION_3_0:
+        case CAMERA_DEVICE_API_VERSION_3_1:
+        case CAMERA_DEVICE_API_VERSION_3_2: {
             sp<ZslProcessor3> zslProc =
                     new ZslProcessor3(this, mCaptureSequencer);
             mZslProcessor = zslProc;
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 8a4ce4e..f5c28ed 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -18,6 +18,8 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
+#include <inttypes.h>
+
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <utils/Vector.h>
@@ -585,8 +587,8 @@
             ALOGE("No timestamp field in capture frame!");
         }
         if (entry.data.i64[0] != mCaptureTimestamp) {
-            ALOGW("Mismatched capture timestamps: Metadata frame %lld,"
-                    " captured buffer %lld",
+            ALOGW("Mismatched capture timestamps: Metadata frame %" PRId64 ","
+                    " captured buffer %" PRId64,
                     entry.data.i64[0],
                     mCaptureTimestamp);
         }
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 19acae4..dd5b27c 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -168,7 +168,7 @@
                 continue;
             }
             if (faceScores[i] > 100) {
-                ALOGW("%s: Face index %d with out of range score %d",
+                ALOGW("%s: Face index %zu with out of range score %d",
                         __FUNCTION__, i, faceScores[i]);
             }
 
diff --git a/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
index 2f0c67d..9ecab71 100644
--- a/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.cpp
@@ -197,7 +197,7 @@
 void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) {
     ALOGV("%s", __FUNCTION__);
     JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest);
-    ALOGV("%s: Setting destination to %p, size %d",
+    ALOGV("%s: Setting destination to %p, size %zu",
             __FUNCTION__, dest->parent->mJpegBuffer->data, kMaxJpegSize);
     dest->next_output_byte = (JOCTET*)(dest->parent->mJpegBuffer->data);
     dest->free_in_buffer = kMaxJpegSize;
@@ -213,7 +213,7 @@
 void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
     (void) cinfo; // TODO: clean up
     ALOGV("%s", __FUNCTION__);
-    ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
+    ALOGV("%s: Done writing JPEG data. %zu bytes left in buffer",
             __FUNCTION__, cinfo->dest->free_in_buffer);
 }
 
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index fb8bd27..964d278 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -243,7 +243,7 @@
         size_t heapSize = mCaptureHeap->getSize();
         if (jpegSize > heapSize) {
             ALOGW("%s: JPEG image is larger than expected, truncating "
-                    "(got %d, expected at most %d bytes)",
+                    "(got %zu, expected at most %zu bytes)",
                     __FUNCTION__, jpegSize, heapSize);
             jpegSize = heapSize;
         }
@@ -337,13 +337,13 @@
             size_t offset = size - MARKER_LENGTH;
             uint8_t *end = jpegBuffer + offset;
             if (checkJpegStart(jpegBuffer) && checkJpegEnd(end)) {
-                ALOGV("Found JPEG transport header, img size %d", size);
+                ALOGV("Found JPEG transport header, img size %zu", size);
                 return size;
             } else {
                 ALOGW("Found JPEG transport header with bad Image Start/End");
             }
         } else {
-            ALOGW("Found JPEG transport header with bad size %d", size);
+            ALOGW("Found JPEG transport header with bad size %zu", size);
         }
     }
 
@@ -359,15 +359,15 @@
         segment_t *segment = (segment_t*)(jpegBuffer + size);
         uint8_t type = checkJpegMarker(segment->marker);
         if (type == 0) { // invalid marker, no more segments, begin JPEG data
-            ALOGV("JPEG stream found beginning at offset %d", size);
+            ALOGV("JPEG stream found beginning at offset %zu", size);
             break;
         }
         if (type == EOI || size > maxSize - sizeof(segment_t)) {
-            ALOGE("Got premature End before JPEG data, offset %d", size);
+            ALOGE("Got premature End before JPEG data, offset %zu", size);
             return 0;
         }
         size_t length = ntohs(segment->length);
-        ALOGV("JFIF Segment, type %x length %x", type, length);
+        ALOGV("JFIF Segment, type %x length %zx", type, length);
         size += length + MARKER_LENGTH;
     }
 
@@ -387,10 +387,10 @@
     }
 
     if (size > maxSize) {
-        ALOGW("JPEG size %d too large, reducing to maxSize %d", size, maxSize);
+        ALOGW("JPEG size %zu too large, reducing to maxSize %zu", size, maxSize);
         size = maxSize;
     }
-    ALOGV("Final JPEG size %d", size);
+    ALOGV("Final JPEG size %zu", size);
     return size;
 }
 
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 2cf0d29..5bfb969 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -944,7 +944,7 @@
         if (sceneModeOverrides.count !=
                 availableSceneModes.count * kModesPerSceneMode) {
             ALOGE("%s: Camera %d: Scene mode override list is an "
-                    "unexpected size: %d (expected %d)", __FUNCTION__,
+                    "unexpected size: %zu (expected %zu)", __FUNCTION__,
                     cameraId, sceneModeOverrides.count,
                     availableSceneModes.count);
             return NO_INIT;
@@ -1074,7 +1074,7 @@
         const char* tagName = get_camera_metadata_tag_name(tag);
         if (tagName == NULL) tagName = "<unknown>";
         ALOGE("Malformed static metadata entry '%s.%s' (%x):"
-                "Expected between %d and %d values, but got %d values",
+                "Expected between %zu and %zu values, but got %zu values",
                 tagSection, tagName, tag, minCount, maxCount, entry.count);
     }
 
@@ -2351,7 +2351,7 @@
     }
 
     if (areas.size() > maxRegions) {
-        ALOGE("%s: Too many areas requested: %d",
+        ALOGE("%s: Too many areas requested: %zu",
                 __FUNCTION__, areas.size());
         return BAD_VALUE;
     }
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 1844ea3..2064e2c 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -225,14 +225,14 @@
     ATRACE_CALL();
     // Make sure we can support this many buffer slots
     if (count > BufferQueue::NUM_BUFFER_SLOTS) {
-        ALOGE("%s: Camera %d: Too many recording buffers requested: %d, max %d",
+        ALOGE("%s: Camera %d: Too many recording buffers requested: %zu, max %d",
                 __FUNCTION__, mId, count, BufferQueue::NUM_BUFFER_SLOTS);
         return BAD_VALUE;
     }
 
     Mutex::Autolock m(mMutex);
 
-    ALOGV("%s: Camera %d: New recording buffer count from encoder: %d",
+    ALOGV("%s: Camera %d: New recording buffer count from encoder: %zu",
             __FUNCTION__, mId, count);
 
     // Need to re-size consumer and heap
@@ -314,7 +314,7 @@
 
     bool newConsumer = false;
     if (mRecordingConsumer == 0) {
-        ALOGV("%s: Camera %d: Creating recording consumer with %d + 1 "
+        ALOGV("%s: Camera %d: Creating recording consumer with %zu + 1 "
                 "consumer-side buffers", __FUNCTION__, mId, mRecordingHeapCount);
         // Create CPU buffer queue endpoint. We need one more buffer here so that we can
         // always acquire and free a buffer when the heap is full; otherwise the consumer
@@ -437,7 +437,7 @@
         releaseAllRecordingFramesLocked();
     }
 
-    ALOGV("%s: Camera %d: %s started, recording heap has %d free of %d",
+    ALOGV("%s: Camera %d: %s started, recording heap has %zu free of %zu",
             __FUNCTION__, mId, (type == PREVIEW) ? "preview" : "recording",
             mRecordingHeapFree, mRecordingHeapCount);
 
@@ -660,8 +660,8 @@
 
         if (mRecordingHeap == 0) {
             const size_t bufferSize = 4 + sizeof(buffer_handle_t);
-            ALOGV("%s: Camera %d: Creating recording heap with %d buffers of "
-                    "size %d bytes", __FUNCTION__, mId,
+            ALOGV("%s: Camera %d: Creating recording heap with %zu buffers of "
+                    "size %zu bytes", __FUNCTION__, mId,
                     mRecordingHeapCount, bufferSize);
 
             mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
@@ -821,10 +821,10 @@
     }
 
     if (releasedCount > 0) {
-        ALOGW("%s: Camera %d: Force-freed %d outstanding buffers "
+        ALOGW("%s: Camera %d: Force-freed %zu outstanding buffers "
                 "from previous recording session", __FUNCTION__, mId, releasedCount);
         ALOGE_IF(releasedCount != mRecordingHeapCount - mRecordingHeapFree,
-            "%s: Camera %d: Force-freed %d buffers, but expected %d",
+            "%s: Camera %d: Force-freed %zu buffers, but expected %zu",
             __FUNCTION__, mId, releasedCount, mRecordingHeapCount - mRecordingHeapFree);
     }
 
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 07381ae..6ab9e1a 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -25,6 +25,8 @@
 #define ALOGVV(...) ((void)0)
 #endif
 
+#include <inttypes.h>
+
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <gui/Surface.h>
@@ -78,7 +80,7 @@
     entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
     nsecs_t timestamp = entry.data.i64[0];
     (void)timestamp;
-    ALOGVV("Got preview frame for timestamp %lld", timestamp);
+    ALOGVV("Got preview frame for timestamp %" PRId64, timestamp);
 
     if (mState != RUNNING) return;
 
@@ -463,7 +465,7 @@
 
     mZslQueueHead = (mZslQueueHead + 1) % kZslBufferDepth;
 
-    ALOGVV("  Acquired buffer, timestamp %lld", queueHead.buffer.mTimestamp);
+    ALOGVV("  Acquired buffer, timestamp %" PRId64, queueHead.buffer.mTimestamp);
 
     findMatchesLocked();
 
@@ -482,7 +484,7 @@
                 entry = queueEntry.frame.find(ANDROID_SENSOR_TIMESTAMP);
                 frameTimestamp = entry.data.i64[0];
             }
-            ALOGVV("   %d: b: %lld\tf: %lld", i,
+            ALOGVV("   %d: b: %" PRId64 "\tf: %" PRId64, i,
                     bufferTimestamp, frameTimestamp );
         }
         if (queueEntry.frame.isEmpty() && bufferTimestamp != 0) {
@@ -500,13 +502,13 @@
                     }
                     nsecs_t frameTimestamp = entry.data.i64[0];
                     if (bufferTimestamp == frameTimestamp) {
-                        ALOGVV("%s: Found match %lld", __FUNCTION__,
+                        ALOGVV("%s: Found match %" PRId64, __FUNCTION__,
                                 frameTimestamp);
                         match = true;
                     } else {
                         int64_t delta = abs(bufferTimestamp - frameTimestamp);
                         if ( delta < 1000000) {
-                            ALOGVV("%s: Found close match %lld (delta %lld)",
+                            ALOGVV("%s: Found close match %" PRId64 " (delta %" PRId64 ")",
                                     __FUNCTION__, bufferTimestamp, delta);
                             match = true;
                         }
@@ -542,7 +544,7 @@
             if (entry.count > 0) frameAeState = entry.data.u8[0];
         }
         String8 result =
-                String8::format("   %zu: b: %lld\tf: %lld, AE state: %d", i,
+                String8::format("   %zu: b: %" PRId64 "\tf: %" PRId64 ", AE state: %d", i,
                         bufferTimestamp, frameTimestamp, frameAeState);
         ALOGV("%s", result.string());
         if (fd != -1) {
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index c1d0496..3949b90 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -25,6 +25,8 @@
 #define ALOGVV(...) ((void)0)
 #endif
 
+#include <inttypes.h>
+
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <gui/Surface.h>
@@ -68,7 +70,7 @@
     entry = frame.find(ANDROID_SENSOR_TIMESTAMP);
     nsecs_t timestamp = entry.data.i64[0];
     (void)timestamp;
-    ALOGVV("Got preview metadata for timestamp %lld", timestamp);
+    ALOGVV("Got preview metadata for timestamp %" PRId64, timestamp);
 
     if (mState != RUNNING) return;
 
@@ -364,7 +366,7 @@
             if (entry.count > 0) frameAeState = entry.data.u8[0];
         }
         String8 result =
-                String8::format("   %zu: b: %lld\tf: %lld, AE state: %d", i,
+                String8::format("   %zu: b: %" PRId64 "\tf: %" PRId64 ", AE state: %d", i,
                         bufferTimestamp, frameTimestamp, frameAeState);
         ALOGV("%s", result.string());
         if (fd != -1) {
@@ -424,7 +426,7 @@
                 idx = j;
             }
 
-            ALOGVV("%s: Saw timestamp %lld", __FUNCTION__, frameTimestamp);
+            ALOGVV("%s: Saw timestamp %" PRId64, __FUNCTION__, frameTimestamp);
         }
     }
 
@@ -444,7 +446,7 @@
         ALOGW("%s: ZSL queue has no metadata frames", __FUNCTION__);
     }
 
-    ALOGV("%s: Candidate timestamp %lld (idx %d), empty frames: %d",
+    ALOGV("%s: Candidate timestamp %" PRId64 " (idx %zu), empty frames: %zu",
           __FUNCTION__, minTimestamp, idx, emptyCount);
 
     if (metadataIdx) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 187220e..1c9a342 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -159,7 +159,7 @@
 
     int32_t requestId = mRequestIdCounter++;
     metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
-    ALOGV("%s: Camera %d: Submitting request with ID %d",
+    ALOGV("%s: Camera %d: Creating request with ID %d",
           __FUNCTION__, mCameraId, requestId);
 
     if (streaming) {
@@ -186,6 +186,116 @@
     return res;
 }
 
+status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests,
+                                               bool streaming) {
+    ATRACE_CALL();
+    ALOGV("%s-start of function", __FUNCTION__);
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    if (requests.empty()) {
+        ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
+              __FUNCTION__, mCameraId);
+        return BAD_VALUE;
+    }
+
+    List<const CameraMetadata> metadataRequestList;
+    int32_t requestId = mRequestIdCounter;
+    uint32_t loopCounter = 0;
+
+    for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) {
+        sp<CaptureRequest> request = *it;
+        if (request == 0) {
+            ALOGE("%s: Camera %d: Sent null request.",
+                    __FUNCTION__, mCameraId);
+            return BAD_VALUE;
+        }
+
+        CameraMetadata metadata(request->mMetadata);
+        if (metadata.isEmpty()) {
+            ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
+                   __FUNCTION__, mCameraId);
+            return BAD_VALUE;
+        } else if (request->mSurfaceList.isEmpty()) {
+            ALOGE("%s: Camera %d: Requests must have at least one surface target. "
+                  "Rejecting request.", __FUNCTION__, mCameraId);
+            return BAD_VALUE;
+        }
+
+        if (!enforceRequestPermissions(metadata)) {
+            // Callee logs
+            return PERMISSION_DENIED;
+        }
+
+        /**
+         * Write in the output stream IDs which we calculate from
+         * the capture request's list of surface targets
+         */
+        Vector<int32_t> outputStreamIds;
+        outputStreamIds.setCapacity(request->mSurfaceList.size());
+        for (Vector<sp<Surface> >::iterator surfaceIt = 0;
+                surfaceIt != request->mSurfaceList.end(); ++surfaceIt) {
+            sp<Surface> surface = *surfaceIt;
+            if (surface == 0) continue;
+
+            sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
+            int idx = mStreamMap.indexOfKey(gbp->asBinder());
+
+            // Trying to submit request with surface that wasn't created
+            if (idx == NAME_NOT_FOUND) {
+                ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
+                      " we have not called createStream on",
+                      __FUNCTION__, mCameraId);
+                return BAD_VALUE;
+            }
+
+            int streamId = mStreamMap.valueAt(idx);
+            outputStreamIds.push_back(streamId);
+            ALOGV("%s: Camera %d: Appending output stream %d to request",
+                  __FUNCTION__, mCameraId, streamId);
+        }
+
+        metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
+                        outputStreamIds.size());
+
+        metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
+        loopCounter++; // loopCounter starts from 1
+        ALOGV("%s: Camera %d: Creating request with ID %d (%d of %d)",
+              __FUNCTION__, mCameraId, requestId, loopCounter, requests.size());
+
+        metadataRequestList.push_back(metadata);
+    }
+    mRequestIdCounter++;
+
+    if (streaming) {
+        res = mDevice->setStreamingRequestList(metadataRequestList);
+        if (res != OK) {
+            ALOGE("%s: Camera %d:  Got error %d after trying to set streaming "
+                  "request", __FUNCTION__, mCameraId, res);
+        } else {
+            mStreamingRequestList.push_back(requestId);
+        }
+    } else {
+        res = mDevice->captureList(metadataRequestList);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Got error %d after trying to set capture",
+                __FUNCTION__, mCameraId, res);
+        }
+    }
+
+    ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
+    if (res == OK) {
+        return requestId;
+    }
+
+    return res;
+}
+
 status_t CameraDeviceClient::cancelRequest(int requestId) {
     ATRACE_CALL();
     ALOGV("%s, requestId = %d", __FUNCTION__, requestId);
@@ -284,7 +394,7 @@
         ssize_t index = mStreamMap.indexOfKey(bufferProducer->asBinder());
         if (index != NAME_NOT_FOUND) {
             ALOGW("%s: Camera %d: Buffer producer already has a stream for it "
-                  "(ID %d)",
+                  "(ID %zd)",
                   __FUNCTION__, mCameraId, index);
             return ALREADY_EXISTS;
         }
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index b9c16aa..e96e1ae 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -63,8 +63,11 @@
      */
 
     // Note that the callee gets a copy of the metadata.
-    virtual int           submitRequest(sp<CaptureRequest> request,
-                                        bool streaming = false);
+    virtual status_t           submitRequest(sp<CaptureRequest> request,
+                                             bool streaming = false);
+    // List of requests are copied.
+    virtual status_t           submitRequestList(List<sp<CaptureRequest> > requests,
+                                                 bool streaming = false);
     virtual status_t      cancelRequest(int requestId);
 
     // Returns -EBUSY if device is not idle
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 2d1253f..6a88c87 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -18,6 +18,8 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
+#include <inttypes.h>
+
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
@@ -236,7 +238,7 @@
     (void)requestId;
     (void)timestamp;
 
-    ALOGV("%s: Shutter notification for request id %d at time %lld",
+    ALOGV("%s: Shutter notification for request id %d at time %" PRId64,
             __FUNCTION__, requestId, timestamp);
 }
 
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index e80abf1..a4ae179 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -22,6 +22,7 @@
 #include <utils/String16.h>
 #include <utils/Vector.h>
 #include <utils/Timers.h>
+#include <utils/List.h>
 
 #include "hardware/camera2.h"
 #include "camera/CameraMetadata.h"
@@ -58,12 +59,22 @@
     virtual status_t capture(CameraMetadata &request) = 0;
 
     /**
+     * Submit a list of requests.
+     */
+    virtual status_t captureList(const List<const CameraMetadata> &requests) = 0;
+
+    /**
      * Submit request for streaming. The CameraDevice makes a copy of the
      * passed-in buffer and the caller retains ownership.
      */
     virtual status_t setStreamingRequest(const CameraMetadata &request) = 0;
 
     /**
+     * Submit a list of requests for streaming.
+     */
+    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests) = 0;
+
+    /**
      * Clear the streaming request slot.
      */
     virtual status_t clearStreamingRequest() = 0;
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index f2064fb..4d31667 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -183,7 +183,7 @@
             item++;
         }
     }
-    ALOGV("Got %d range listeners out of %d", listeners.size(), mRangeListeners.size());
+    ALOGV("Got %zu range listeners out of %zu", listeners.size(), mRangeListeners.size());
     List<sp<FilteredListener> >::iterator item = listeners.begin();
     for (; item != listeners.end(); item++) {
         (*item)->onFrameAvailable(requestId, frame);
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index f60ca98..0cc3a04 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -207,6 +207,12 @@
     return OK;
 }
 
+status_t Camera2Device::captureList(const List<const CameraMetadata> &requests) {
+    ATRACE_CALL();
+    ALOGE("%s: Camera2Device burst capture not implemented", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
 
 status_t Camera2Device::setStreamingRequest(const CameraMetadata &request) {
     ATRACE_CALL();
@@ -215,6 +221,12 @@
     return mRequestQueue.setStreamSlot(streamRequest.release());
 }
 
+status_t Camera2Device::setStreamingRequestList(const List<const CameraMetadata> &requests) {
+    ATRACE_CALL();
+    ALOGE("%s, Camera2Device streaming burst not implemented", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
 status_t Camera2Device::clearStreamingRequest() {
     ATRACE_CALL();
     return mRequestQueue.setStreamSlot(NULL);
@@ -987,7 +999,7 @@
         return BAD_VALUE;
     }
 
-    ALOGV("%s: New stream parameters %d x %d, format 0x%x, size %d",
+    ALOGV("%s: New stream parameters %d x %d, format 0x%x, size %zu",
             __FUNCTION__, width, height, format, size);
 
     mConsumerInterface = consumer;
@@ -1059,7 +1071,7 @@
                 mSize, 1, mFormat);
         if (res != OK) {
             ALOGE("%s: Unable to configure compressed stream buffer geometry"
-                    " %d x %d, size %d for stream %d",
+                    " %d x %d, size %zu for stream %d",
                     __FUNCTION__, mWidth, mHeight, mSize, mId);
             return res;
         }
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 5b91f88..61bfd1a 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -48,7 +48,9 @@
     virtual status_t dump(int fd, const Vector<String16>& args);
     virtual const CameraMetadata& info() const;
     virtual status_t capture(CameraMetadata &request);
+    virtual status_t captureList(const List<const CameraMetadata> &requests);
     virtual status_t setStreamingRequest(const CameraMetadata &request);
+    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests);
     virtual status_t clearStreamingRequest();
     virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
     virtual status_t createStream(sp<ANativeWindow> consumer,
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 08e03ce..f586e75 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -37,6 +37,8 @@
     "%s: " fmt, __FUNCTION__,                    \
     ##__VA_ARGS__)
 
+#include <inttypes.h>
+
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <utils/Timers.h>
@@ -112,9 +114,9 @@
 
     /** Cross-check device version */
 
-    if (device->common.version != CAMERA_DEVICE_API_VERSION_3_0) {
+    if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) {
         SET_ERR_L("Could not open camera: "
-                "Camera device is not version %x, reports %x instead",
+                "Camera device should be at least %x, reports %x instead",
                 CAMERA_DEVICE_API_VERSION_3_0,
                 device->common.version);
         device->common.close(&device->common);
@@ -128,7 +130,7 @@
     if (info.device_version != device->common.version) {
         SET_ERR_L("HAL reporting mismatched camera_info version (%x)"
                 " and device version (%x).",
-                device->common.version, info.device_version);
+                info.device_version, device->common.version);
         device->common.close(&device->common);
         return BAD_VALUE;
     }
@@ -331,7 +333,7 @@
     } else {
         for (size_t i = 0; i < mInFlightMap.size(); i++) {
             InFlightRequest r = mInFlightMap.valueAt(i);
-            lines.appendFormat("      Frame %d |  Timestamp: %lld, metadata"
+            lines.appendFormat("      Frame %d |  Timestamp: %" PRId64 ", metadata"
                     " arrived: %s, buffers left: %d\n", mInFlightMap.keyAt(i),
                     r.captureTimestamp, r.haveResultMetadata ? "true" : "false",
                     r.numBuffersLeft);
@@ -370,6 +372,45 @@
     return mDeviceInfo;
 }
 
+status_t Camera3Device::checkStatusOkToCaptureLocked() {
+    switch (mStatus) {
+        case STATUS_ERROR:
+            CLOGE("Device has encountered a serious error");
+            return INVALID_OPERATION;
+        case STATUS_UNINITIALIZED:
+            CLOGE("Device not initialized");
+            return INVALID_OPERATION;
+        case STATUS_UNCONFIGURED:
+        case STATUS_CONFIGURED:
+        case STATUS_ACTIVE:
+            // OK
+            break;
+        default:
+            SET_ERR_L("Unexpected status: %d", mStatus);
+            return INVALID_OPERATION;
+    }
+    return OK;
+}
+
+status_t Camera3Device::convertMetadataListToRequestListLocked(
+        const List<const CameraMetadata> &metadataList, RequestList *requestList) {
+    if (requestList == NULL) {
+        CLOGE("requestList cannot be NULL.");
+        return BAD_VALUE;
+    }
+
+    for (List<const CameraMetadata>::const_iterator it = metadataList.begin();
+            it != metadataList.end(); ++it) {
+        sp<CaptureRequest> newRequest = setUpRequestLocked(*it);
+        if (newRequest == 0) {
+            CLOGE("Can't create capture request");
+            return BAD_VALUE;
+        }
+        requestList->push_back(newRequest);
+    }
+    return OK;
+}
+
 status_t Camera3Device::capture(CameraMetadata &request) {
     ATRACE_CALL();
     status_t res;
@@ -410,10 +451,59 @@
                     kActiveTimeout/1e9);
         }
         ALOGV("Camera %d: Capture request enqueued", mId);
+    } else {
+        CLOGE("Cannot queue request. Impossible."); // queueRequest always returns OK.
+        return BAD_VALUE;
     }
     return res;
 }
 
+status_t Camera3Device::submitRequestsHelper(
+        const List<const CameraMetadata> &requests, bool repeating) {
+    ATRACE_CALL();
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    status_t res = checkStatusOkToCaptureLocked();
+    if (res != OK) {
+        // error logged by previous call
+        return res;
+    }
+
+    RequestList requestList;
+
+    res = convertMetadataListToRequestListLocked(requests, /*out*/&requestList);
+    if (res != OK) {
+        // error logged by previous call
+        return res;
+    }
+
+    if (repeating) {
+        res = mRequestThread->setRepeatingRequests(requestList);
+    } else {
+        res = mRequestThread->queueRequestList(requestList);
+    }
+
+    if (res == OK) {
+        waitUntilStateThenRelock(/*active*/true, kActiveTimeout);
+        if (res != OK) {
+            SET_ERR_L("Can't transition to active in %f seconds!",
+                    kActiveTimeout/1e9);
+        }
+        ALOGV("Camera %d: Capture request enqueued", mId);
+    } else {
+        CLOGE("Cannot queue request. Impossible.");
+        return BAD_VALUE;
+    }
+
+    return res;
+}
+
+status_t Camera3Device::captureList(const List<const CameraMetadata> &requests) {
+    ATRACE_CALL();
+
+    return submitRequestsHelper(requests, /*repeating*/false);
+}
 
 status_t Camera3Device::setStreamingRequest(const CameraMetadata &request) {
     ATRACE_CALL();
@@ -460,6 +550,11 @@
     return res;
 }
 
+status_t Camera3Device::setStreamingRequestList(const List<const CameraMetadata> &requests) {
+    ATRACE_CALL();
+
+    return submitRequestsHelper(requests, /*repeating*/true);
+}
 
 sp<Camera3Device::CaptureRequest> Camera3Device::setUpRequestLocked(
         const CameraMetadata &request) {
@@ -662,7 +757,7 @@
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, size %d",
+    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, size %zu",
             mId, mNextStreamId, width, height, format, size);
 
     status_t res;
@@ -904,6 +999,10 @@
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
+    return waitUntilDrainedLocked();
+}
+
+status_t Camera3Device::waitUntilDrainedLocked() {
     switch (mStatus) {
         case STATUS_UNINITIALIZED:
         case STATUS_UNCONFIGURED:
@@ -1008,7 +1107,7 @@
         if (res == TIMED_OUT) {
             return res;
         } else if (res != OK) {
-            ALOGW("%s: Camera %d: No frame in %lld ns: %s (%d)",
+            ALOGW("%s: Camera %d: No frame in %" PRId64 " ns: %s (%d)",
                     __FUNCTION__, mId, timeout, strerror(-res), res);
             return res;
         }
@@ -1114,7 +1213,7 @@
     if (mHal3Device->common.version >= CAMERA_DEVICE_API_VERSION_3_1) {
         res = mHal3Device->ops->flush(mHal3Device);
     } else {
-        res = waitUntilDrained();
+        res = waitUntilDrainedLocked();
     }
 
     return res;
@@ -1672,7 +1771,7 @@
         // Sanity check - if we have too many in-flight frames, something has
         // likely gone wrong
         if (mInFlightMap.size() > kInFlightWarnLimit) {
-            CLOGE("In-flight list too large: %d", mInFlightMap.size());
+            CLOGE("In-flight list too large: %zu", mInFlightMap.size());
         }
 
     }
@@ -1722,7 +1821,7 @@
             gotResult = false;
         } else if (timestamp != entry.data.i64[0]) {
             SET_ERR("Timestamp mismatch between shutter notify and result"
-                    " metadata for frame %d (%lld vs %lld respectively)",
+                    " metadata for frame %d (%" PRId64 " vs %" PRId64 " respectively)",
                     frameNumber, timestamp, entry.data.i64[0]);
             gotResult = false;
         }
@@ -1744,7 +1843,7 @@
         // Note: stream may be deallocated at this point, if this buffer was the
         // last reference to it.
         if (res != OK) {
-            ALOGE("Can't return buffer %d for frame %d to its stream: "
+            ALOGE("Can't return buffer %zu for frame %d to its stream: "
                     " %s (%d)", i, frameNumber, strerror(-res), res);
         }
     }
@@ -1834,7 +1933,7 @@
                         frameNumber);
                 break;
             }
-            ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %lld",
+            ALOGVV("Camera %d: %s: Shutter fired for frame %d (id %d) at %" PRId64,
                     mId, __FUNCTION__, frameNumber, requestId, timestamp);
             // Call listener, if any
             if (listener != NULL) {
@@ -1895,6 +1994,19 @@
     return OK;
 }
 
+status_t Camera3Device::RequestThread::queueRequestList(
+        List<sp<CaptureRequest> > &requests) {
+    Mutex::Autolock l(mRequestLock);
+    for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end();
+            ++it) {
+        mRequestQueue.push_back(*it);
+    }
+
+    unpauseForNewRequests();
+
+    return OK;
+}
+
 
 status_t Camera3Device::RequestThread::queueTrigger(
         RequestTrigger trigger[],
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 9007a9b..ed58246 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -54,7 +54,7 @@
 }
 
 /**
- * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0
+ * CameraDevice for HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 or higher.
  */
 class Camera3Device :
             public CameraDeviceBase,
@@ -79,7 +79,9 @@
     // Capture and setStreamingRequest will configure streams if currently in
     // idle state
     virtual status_t capture(CameraMetadata &request);
+    virtual status_t captureList(const List<const CameraMetadata> &requests);
     virtual status_t setStreamingRequest(const CameraMetadata &request);
+    virtual status_t setStreamingRequestList(const List<const CameraMetadata> &requests);
     virtual status_t clearStreamingRequest();
 
     virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
@@ -201,6 +203,14 @@
     };
     typedef List<sp<CaptureRequest> > RequestList;
 
+    status_t checkStatusOkToCaptureLocked();
+
+    status_t convertMetadataListToRequestListLocked(
+            const List<const CameraMetadata> &metadataList,
+            /*out*/RequestList *requestList);
+
+    status_t submitRequestsHelper(const List<const CameraMetadata> &requests, bool repeating);
+
     /**
      * Get the last request submitted to the hal by the request thread.
      *
@@ -236,6 +246,13 @@
     status_t waitUntilStateThenRelock(bool active, nsecs_t timeout);
 
     /**
+     * Implementation of waitUntilDrained. On success, will transition to IDLE state.
+     *
+     * Need to be called with mLock and mInterfaceLock held.
+     */
+    status_t waitUntilDrainedLocked();
+
+    /**
      * Do common work for setting up a streaming or single capture request.
      * On success, will transition to ACTIVE if in IDLE.
      */
@@ -312,6 +329,8 @@
 
         status_t queueRequest(sp<CaptureRequest> request);
 
+        status_t queueRequestList(List<sp<CaptureRequest> > &requests);
+
         /**
          * Remove all queued and repeating requests, and pending triggers
          */
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 42e02d8..d662cc2 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -18,8 +18,7 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
-// This is needed for stdint.h to define INT64_MAX in C++
-#define __STDC_LIMIT_MACROS
+#include <inttypes.h>
 
 #include <utils/Log.h>
 #include <utils/Trace.h>
@@ -54,8 +53,8 @@
 
 bool Camera3IOStreamBase::hasOutstandingBuffersLocked() const {
     nsecs_t signalTime = mCombinedFence->getSignalTime();
-    ALOGV("%s: Stream %d: Has %d outstanding buffers,"
-            " buffer signal time is %lld",
+    ALOGV("%s: Stream %d: Has %zu outstanding buffers,"
+            " buffer signal time is %" PRId64,
             __FUNCTION__, mId, mDequeuedBufferCount, signalTime);
     if (mDequeuedBufferCount > 0 || signalTime == INT64_MAX) {
         return true;
@@ -73,7 +72,7 @@
     lines.appendFormat("      Max size: %zu\n", mMaxSize);
     lines.appendFormat("      Usage: %d, max HAL buffers: %d\n",
             camera3_stream::usage, camera3_stream::max_buffers);
-    lines.appendFormat("      Frames produced: %d, last timestamp: %lld ns\n",
+    lines.appendFormat("      Frames produced: %d, last timestamp: %" PRId64 " ns\n",
             mFrameCount, mLastTimestamp);
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
             mTotalBufferCount, mDequeuedBufferCount);
@@ -119,7 +118,7 @@
     }
 
     if (mDequeuedBufferCount > 0) {
-        ALOGE("%s: Can't disconnect with %d buffers still dequeued!",
+        ALOGE("%s: Can't disconnect with %zu buffers still dequeued!",
                 __FUNCTION__, mDequeuedBufferCount);
         return INVALID_OPERATION;
     }
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 6d2cf94..70406f1 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -353,7 +353,7 @@
     }
     if (bufferIdx == bufferCount) {
         // Got all buffers, register with HAL
-        ALOGV("%s: Registering %d buffers with camera HAL",
+        ALOGV("%s: Registering %zu buffers with camera HAL",
                 __FUNCTION__, bufferCount);
         ATRACE_BEGIN("camera3->register_stream_buffers");
         res = hal3Device->ops->register_stream_buffers(hal3Device,
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 04deac5..09e14c5 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -18,6 +18,8 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
+#include <inttypes.h>
+
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include "Camera3ZslStream.h"
@@ -299,7 +301,7 @@
 
     if (actual != timestamp) {
         ALOGW("%s: ZSL buffer candidate search didn't find an exact match --"
-              " requested timestamp = %lld, actual timestamp = %lld",
+              " requested timestamp = %" PRId64 ", actual timestamp = %" PRId64,
               __FUNCTION__, timestamp, actual);
     }
 
diff --git a/services/camera/libcameraservice/device3/StatusTracker.cpp b/services/camera/libcameraservice/device3/StatusTracker.cpp
index ab5419f..723b5c2 100644
--- a/services/camera/libcameraservice/device3/StatusTracker.cpp
+++ b/services/camera/libcameraservice/device3/StatusTracker.cpp
@@ -18,9 +18,6 @@
 #define ATRACE_TAG ATRACE_TAG_CAMERA
 //#define LOG_NDEBUG 0
 
-// This is needed for stdint.h to define INT64_MAX in C++
-#define __STDC_LIMIT_MACROS
-
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <ui/Fence.h>
@@ -52,7 +49,7 @@
         ALOGV("%s: Adding new component %d", __FUNCTION__, id);
 
         err = mStates.add(id, IDLE);
-        ALOGE_IF(err < 0, "%s: Can't add new component %d: %s (%d)",
+        ALOGE_IF(err < 0, "%s: Can't add new component %d: %s (%zd)",
                 __FUNCTION__, id, strerror(-err), err);
     }
 
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index 9a6dc28..e4ec5fd 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -17,6 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "RingBufferConsumer"
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
+#include <inttypes.h>
+
 #include <utils/Log.h>
 
 #include <gui/RingBufferConsumer.h>
@@ -164,10 +167,10 @@
     }
 
     if (it == end) {
-        BI_LOGE("Failed to pin buffer (timestamp %lld, framenumber %lld)",
+        BI_LOGE("Failed to pin buffer (timestamp %" PRId64 ", framenumber %" PRIu64 ")",
                  item.mTimestamp, item.mFrameNumber);
     } else {
-        BI_LOGV("Pinned buffer (frame %lld, timestamp %lld)",
+        BI_LOGV("Pinned buffer (frame %" PRIu64 ", timestamp %" PRId64 ")",
                 item.mFrameNumber, item.mTimestamp);
     }
 }
@@ -222,12 +225,12 @@
 
         if (err != OK) {
             BI_LOGE("Failed to add release fence to buffer "
-                    "(timestamp %lld, framenumber %lld",
+                    "(timestamp %" PRId64 ", framenumber %" PRIu64,
                     item.mTimestamp, item.mFrameNumber);
             return err;
         }
 
-        BI_LOGV("Attempting to release buffer timestamp %lld, frame %lld",
+        BI_LOGV("Attempting to release buffer timestamp %" PRId64 ", frame %" PRIu64,
                 item.mTimestamp, item.mFrameNumber);
 
         // item.mGraphicBuffer was populated with the proper graphic-buffer
@@ -241,7 +244,7 @@
             return err;
         }
 
-        BI_LOGV("Buffer timestamp %lld, frame %lld evicted",
+        BI_LOGV("Buffer timestamp %" PRId64 ", frame %" PRIu64 " evicted",
                 item.mTimestamp, item.mFrameNumber);
 
         size_t currentSize = mBufferItemList.size();
@@ -294,8 +297,8 @@
             return;
         }
 
-        BI_LOGV("New buffer acquired (timestamp %lld), "
-                "buffer items %u out of %d",
+        BI_LOGV("New buffer acquired (timestamp %" PRId64 "), "
+                "buffer items %zu out of %d",
                 item.mTimestamp,
                 mBufferItemList.size(), mBufferCount);
 
@@ -321,7 +324,7 @@
 
             if (res != OK) {
                 BI_LOGE("Failed to add release fence to buffer "
-                        "(timestamp %lld, framenumber %lld",
+                        "(timestamp %" PRId64 ", framenumber %" PRIu64,
                         item.mTimestamp, item.mFrameNumber);
                 return;
             }
@@ -333,10 +336,10 @@
 
     if (it == end) {
         // This should never happen. If it happens, we have a bug.
-        BI_LOGE("Failed to unpin buffer (timestamp %lld, framenumber %lld)",
+        BI_LOGE("Failed to unpin buffer (timestamp %" PRId64 ", framenumber %" PRIu64 ")",
                  item.mTimestamp, item.mFrameNumber);
     } else {
-        BI_LOGV("Unpinned buffer (timestamp %lld, framenumber %lld)",
+        BI_LOGV("Unpinned buffer (timestamp %" PRId64 ", framenumber %" PRIu64 ")",
                  item.mTimestamp, item.mFrameNumber);
     }
 }