Merge "Add enabling variable for extended precision audio"
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 22199fa..85f44f0 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -77,6 +77,32 @@
     return CameraBaseT::connect(cameraId, clientPackageName, clientUid);
 }
 
+status_t Camera::connectLegacy(int cameraId, int halVersion,
+        const String16& clientPackageName,
+        int clientUid,
+        sp<Camera>& camera)
+{
+    ALOGV("%s: connect legacy camera device", __FUNCTION__);
+    sp<Camera> c = new Camera(cameraId);
+    sp<ICameraClient> cl = c;
+    status_t status = NO_ERROR;
+    const sp<ICameraService>& cs = CameraBaseT::getCameraService();
+
+    if (cs != 0) {
+        status = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
+                                        clientUid, /*out*/c->mCamera);
+    }
+    if (status == OK && c->mCamera != 0) {
+        c->mCamera->asBinder()->linkToDeath(c);
+        c->mStatus = NO_ERROR;
+        camera = c;
+    } else {
+        ALOGW("An error occurred while connecting to camera: %d", cameraId);
+        c.clear();
+    }
+    return status;
+}
+
 status_t Camera::reconnect()
 {
     ALOGV("reconnect");
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index b86651f..5485205 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "BpCameraService"
 #include <utils/Log.h>
 #include <utils/Errors.h>
+#include <utils/String16.h>
 
 #include <stdint.h>
 #include <sys/types.h>
@@ -185,6 +186,29 @@
         return status;
     }
 
+    // connect to camera service (android.hardware.Camera)
+    virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
+                             int halVersion,
+                             const String16 &clientPackageName, int clientUid,
+                             /*out*/sp<ICamera>& device)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
+        data.writeStrongBinder(cameraClient->asBinder());
+        data.writeInt32(cameraId);
+        data.writeInt32(halVersion);
+        data.writeString16(clientPackageName);
+        data.writeInt32(clientUid);
+        remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply);
+
+        if (readExceptionCode(reply)) return -EPROTO;
+        status_t status = reply.readInt32();
+        if (reply.readInt32() != 0) {
+            device = interface_cast<ICamera>(reply.readStrongBinder());
+        }
+        return status;
+    }
+
     // connect to camera service (pro client)
     virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb, int cameraId,
                                 const String16 &clientPackageName, int clientUid,
@@ -253,6 +277,41 @@
         if (readExceptionCode(reply)) return -EPROTO;
         return reply.readInt32();
     }
+
+    virtual status_t getLegacyParameters(int cameraId, String16* parameters) {
+        if (parameters == NULL) {
+            ALOGE("%s: parameters must not be null", __FUNCTION__);
+            return BAD_VALUE;
+        }
+
+        Parcel data, reply;
+
+        data.writeInt32(cameraId);
+        remote()->transact(BnCameraService::GET_LEGACY_PARAMETERS, data, &reply);
+        if (readExceptionCode(reply)) return -EPROTO;
+
+        status_t res = data.readInt32();
+        int32_t length = data.readInt32(); // -1 means null
+        if (length > 0) {
+            *parameters = data.readString16();
+        } else {
+            *parameters = String16();
+        }
+
+        return res;
+    }
+
+    virtual status_t supportsCameraApi(int cameraId, int apiVersion) {
+        Parcel data, reply;
+
+        data.writeInt32(cameraId);
+        data.writeInt32(apiVersion);
+        remote()->transact(BnCameraService::SUPPORTS_CAMERA_API, data, &reply);
+        if (readExceptionCode(reply)) return -EPROTO;
+
+        status_t res = data.readInt32();
+        return res;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(CameraService, "android.hardware.ICameraService");
@@ -387,6 +446,50 @@
             reply->writeInt32(removeListener(listener));
             return NO_ERROR;
         } break;
+        case GET_LEGACY_PARAMETERS: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            int cameraId = data.readInt32();
+            String16 parameters;
+
+            reply->writeNoException();
+            // return value
+            reply->writeInt32(getLegacyParameters(cameraId, &parameters));
+            // out parameters
+            reply->writeInt32(1); // parameters is always available
+            reply->writeString16(parameters);
+            return NO_ERROR;
+        } break;
+        case SUPPORTS_CAMERA_API: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            int cameraId = data.readInt32();
+            int apiVersion = data.readInt32();
+
+            reply->writeNoException();
+            // return value
+            reply->writeInt32(supportsCameraApi(cameraId, apiVersion));
+            return NO_ERROR;
+        } break;
+        case CONNECT_LEGACY: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            sp<ICameraClient> cameraClient =
+                    interface_cast<ICameraClient>(data.readStrongBinder());
+            int32_t cameraId = data.readInt32();
+            int32_t halVersion = data.readInt32();
+            const String16 clientName = data.readString16();
+            int32_t clientUid = data.readInt32();
+            sp<ICamera> camera;
+            status_t status = connectLegacy(cameraClient, cameraId, halVersion,
+                    clientName, clientUid, /*out*/camera);
+            reply->writeNoException();
+            reply->writeInt32(status);
+            if (camera != NULL) {
+                reply->writeInt32(1);
+                reply->writeStrongBinder(camera->asBinder());
+            } else {
+                reply->writeInt32(0);
+            }
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/cmds/screenrecord/FrameOutput.cpp b/cmds/screenrecord/FrameOutput.cpp
index 4da16bc..03e0062 100644
--- a/cmds/screenrecord/FrameOutput.cpp
+++ b/cmds/screenrecord/FrameOutput.cpp
@@ -71,7 +71,7 @@
     sp<IGraphicBufferConsumer> consumer;
     BufferQueue::createBufferQueue(&producer, &consumer);
     mGlConsumer = new GLConsumer(consumer, mExtTextureName,
-                GL_TEXTURE_EXTERNAL_OES);
+                GL_TEXTURE_EXTERNAL_OES, true, false);
     mGlConsumer->setName(String8("virtual display"));
     mGlConsumer->setDefaultBufferSize(width, height);
     mGlConsumer->setDefaultMaxBufferCount(5);
diff --git a/cmds/screenrecord/Overlay.cpp b/cmds/screenrecord/Overlay.cpp
index c2a8f1b..7fef53d 100644
--- a/cmds/screenrecord/Overlay.cpp
+++ b/cmds/screenrecord/Overlay.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+#include <assert.h>
+#include <inttypes.h>
+#include <stdlib.h>
+
 #define LOG_TAG "ScreenRecord"
 //#define LOG_NDEBUG 0
 #include <utils/Log.h>
@@ -27,9 +31,6 @@
 #include <GLES2/gl2.h>
 #include <GLES2/gl2ext.h>
 
-#include <stdlib.h>
-#include <assert.h>
-
 #include "screenrecord.h"
 #include "Overlay.h"
 #include "TextRenderer.h"
@@ -172,7 +173,7 @@
     sp<IGraphicBufferConsumer> consumer;
     BufferQueue::createBufferQueue(&mProducer, &consumer);
     mGlConsumer = new GLConsumer(consumer, mExtTextureName,
-                GL_TEXTURE_EXTERNAL_OES);
+                GL_TEXTURE_EXTERNAL_OES, true, false);
     mGlConsumer->setName(String8("virtual display"));
     mGlConsumer->setDefaultBufferSize(width, height);
     mGlConsumer->setDefaultMaxBufferCount(5);
@@ -235,7 +236,7 @@
 
     char textBuf[64];
     getTimeString_l(monotonicNsec, textBuf, sizeof(textBuf));
-    String8 timeStr(String8::format("%s f=%lld (%zd)",
+    String8 timeStr(String8::format("%s f=%" PRId64 " (%zd)",
             textBuf, frameNumber, mTotalDroppedFrames));
     mTextRenderer.drawString(mTexProgram, Program::kIdentity, 0, 0, timeStr);
 
diff --git a/cmds/screenrecord/TextRenderer.cpp b/cmds/screenrecord/TextRenderer.cpp
index 784055c..6a9176b 100644
--- a/cmds/screenrecord/TextRenderer.cpp
+++ b/cmds/screenrecord/TextRenderer.cpp
@@ -353,6 +353,6 @@
         }
     }
 
-    ALOGV("goodPos=%d for str='%s'", goodPos, str);
+    ALOGV("goodPos=%zu for str='%s'", goodPos, str);
     return const_cast<char*>(str + goodPos);
 }
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 02ed53a..02df1d2 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -14,6 +14,19 @@
  * limitations under the License.
  */
 
+#include <assert.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <termios.h>
+#include <unistd.h>
+
 #define LOG_TAG "ScreenRecord"
 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
 //#define LOG_NDEBUG 0
@@ -36,18 +49,6 @@
 #include <media/stagefright/MediaMuxer.h>
 #include <media/ICrypto.h>
 
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <stdio.h>
-#include <ctype.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <getopt.h>
-#include <sys/wait.h>
-#include <termios.h>
-#include <assert.h>
-
 #include "screenrecord.h"
 #include "Overlay.h"
 #include "FrameOutput.h"
@@ -354,7 +355,7 @@
         case NO_ERROR:
             // got a buffer
             if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) != 0) {
-                ALOGV("Got codec config buffer (%u bytes)", size);
+                ALOGV("Got codec config buffer (%zu bytes)", size);
                 if (muxer != NULL) {
                     // ignore this -- we passed the CSD into MediaMuxer when
                     // we got the format change notification
@@ -362,7 +363,7 @@
                 }
             }
             if (size != 0) {
-                ALOGV("Got data in buffer %d, size=%d, pts=%lld",
+                ALOGV("Got data in buffer %zu, size=%zu, pts=%" PRId64,
                         bufIndex, size, ptsUsec);
 
                 { // scope
@@ -473,7 +474,7 @@
 
     ALOGV("Encoder stopping (req=%d)", gStopRequested);
     if (gVerbose) {
-        printf("Encoder stopping; recorded %u frames in %lld seconds\n",
+        printf("Encoder stopping; recorded %u frames in %" PRId64 " seconds\n",
                 debugNumFrames, nanoseconds_to_seconds(
                         systemTime(CLOCK_MONOTONIC) - startWhenNsec));
     }
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index b70afe6..81edcb4 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -942,7 +942,9 @@
             sp<IGraphicBufferProducer> producer;
             sp<IGraphicBufferConsumer> consumer;
             BufferQueue::createBufferQueue(&producer, &consumer);
-            sp<GLConsumer> texture = new GLConsumer(consumer, 0 /* tex */);
+            sp<GLConsumer> texture = new GLConsumer(consumer, 0 /* tex */,
+                    GLConsumer::TEXTURE_EXTERNAL, true /* useFenceSync */,
+                    false /* isControlledByApp */);
             gSurface = new Surface(producer);
         }
 
diff --git a/drm/drmserver/DrmManagerService.cpp b/drm/drmserver/DrmManagerService.cpp
index 2b71904..63341e0 100644
--- a/drm/drmserver/DrmManagerService.cpp
+++ b/drm/drmserver/DrmManagerService.cpp
@@ -34,7 +34,18 @@
 static Vector<uid_t> trustedUids;
 
 static bool isProtectedCallAllowed() {
-    return true;
+    // TODO
+    // Following implementation is just for reference.
+    // Each OEM manufacturer should implement/replace with their own solutions.
+    IPCThreadState* ipcState = IPCThreadState::self();
+    uid_t uid = ipcState->getCallingUid();
+
+    for (unsigned int i = 0; i < trustedUids.size(); ++i) {
+        if (trustedUids[i] == uid) {
+            return true;
+        }
+    }
+    return false;
 }
 
 void DrmManagerService::instantiate() {
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 79682b8..2b60842 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -74,6 +74,10 @@
                                 const String16& clientPackageName,
                                 int clientUid);
 
+    static  status_t  connectLegacy(int cameraId, int halVersion,
+                                     const String16& clientPackageName,
+                                     int clientUid, sp<Camera>& camera);
+
             virtual     ~Camera();
 
             status_t    reconnect();
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index 6e48f22..f7f06bb 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -32,6 +32,7 @@
 class ICameraDeviceCallbacks;
 class CameraMetadata;
 class VendorTagDescriptor;
+class String16;
 
 class ICameraService : public IInterface
 {
@@ -49,12 +50,24 @@
         REMOVE_LISTENER,
         GET_CAMERA_CHARACTERISTICS,
         GET_CAMERA_VENDOR_TAG_DESCRIPTOR,
+        GET_LEGACY_PARAMETERS,
+        SUPPORTS_CAMERA_API,
+        CONNECT_LEGACY,
     };
 
     enum {
         USE_CALLING_UID = -1
     };
 
+    enum {
+        API_VERSION_1 = 1,
+        API_VERSION_2 = 2,
+    };
+
+    enum {
+        CAMERA_HAL_API_VERSION_UNSPECIFIED = -1
+      };
+
 public:
     DECLARE_META_INTERFACE(CameraService);
 
@@ -105,6 +118,30 @@
             int clientUid,
             /*out*/
             sp<ICameraDeviceUser>& device) = 0;
+
+    virtual status_t getLegacyParameters(
+            int cameraId,
+            /*out*/
+            String16* parameters) = 0;
+
+    /**
+     * Returns OK if device supports camera2 api,
+     * returns -EOPNOTSUPP if it doesn't.
+     */
+    virtual status_t supportsCameraApi(
+            int cameraId, int apiVersion) = 0;
+
+    /**
+     * Connect the device as a legacy device for a given HAL version.
+     * For halVersion, use CAMERA_API_DEVICE_VERSION_* for a particular
+     * version, or CAMERA_HAL_API_VERSION_UNSPECIFIED for a service-selected version.
+     */
+    virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient,
+            int cameraId, int halVersion,
+            const String16& clientPackageName,
+            int clientUid,
+            /*out*/
+            sp<ICamera>& device) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index a4722cb..c89ceaa 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -99,6 +99,8 @@
     // to be non-zero if status == NO_ERROR
     static status_t getOutputSamplingRate(uint32_t* samplingRate,
             audio_stream_type_t stream);
+    static status_t getOutputSamplingRateForAttr(uint32_t* samplingRate,
+                const audio_attributes_t *attr);
     static status_t getOutputFrameCount(size_t* frameCount,
             audio_stream_type_t stream);
     static status_t getOutputLatency(uint32_t* latency,
@@ -212,7 +214,12 @@
                                         audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
                                         audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                         const audio_offload_info_t *offloadInfo = NULL);
-
+    static audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr,
+                                        uint32_t samplingRate = 0,
+                                        audio_format_t format = AUDIO_FORMAT_DEFAULT,
+                                        audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
+                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                        const audio_offload_info_t *offloadInfo = NULL);
     static status_t startOutput(audio_io_handle_t output,
                                 audio_stream_type_t stream,
                                 int session);
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 79db323..3492520 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -253,7 +253,8 @@
                             transfer_type transferType = TRANSFER_DEFAULT,
                             const audio_offload_info_t *offloadInfo = NULL,
                             int uid = -1,
-                            pid_t pid = -1);
+                            pid_t pid = -1,
+                            audio_attributes_t* pAttributes = NULL);
 
     /* Result of constructing the AudioTrack. This must be checked for successful initialization
      * before using any AudioTrack API (except for set()), because using
@@ -586,6 +587,11 @@
                         AudioTrack(const AudioTrack& other);
             AudioTrack& operator = (const AudioTrack& other);
 
+            void        setAttributesFromStreamType(audio_stream_type_t streamType);
+            void        setStreamTypeFromAttributes(audio_attributes_t& aa);
+    /* paa is guaranteed non-NULL */
+            bool        isValidAttributes(const audio_attributes_t *paa);
+
     /* a small internal class to handle the callback */
     class AudioTrackThread : public Thread
     {
@@ -626,6 +632,8 @@
             nsecs_t processAudioBuffer();
 
             bool     isOffloaded() const;
+            bool     isDirect() const;
+            bool     isOffloadedOrDirect() const;
 
             // caller must hold lock on mLock for all _l methods
 
@@ -642,6 +650,13 @@
             bool     isOffloaded_l() const
                 { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
 
+            bool     isOffloadedOrDirect_l() const
+                { return (mFlags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|
+                                                AUDIO_OUTPUT_FLAG_DIRECT)) != 0; }
+
+            bool     isDirect_l() const
+                { return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -667,6 +682,7 @@
     transfer_type           mTransfer;
     audio_offload_info_t    mOffloadInfoCopy;
     const audio_offload_info_t* mOffloadInfo;
+    audio_attributes_t      mAttributes;
 
     // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.  For 8-bit PCM data, it's
     // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer.
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index c742810..fc8be20 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -50,6 +50,7 @@
         TRACK_TIMED   = 1,  // client requests a TimedAudioTrack
         TRACK_FAST    = 2,  // client requests a fast AudioTrack or AudioRecord
         TRACK_OFFLOAD = 4,  // client requests offload to hw codec
+        TRACK_DIRECT = 8,   // client requests a direct output
     };
     typedef uint32_t track_flags_t;
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index d422aa3..959e4c3 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -56,6 +56,12 @@
                                         audio_channel_mask_t channelMask = 0,
                                         audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
+    virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr,
+                                            uint32_t samplingRate = 0,
+                                            audio_format_t format = AUDIO_FORMAT_DEFAULT,
+                                            audio_channel_mask_t channelMask = 0,
+                                            audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                            const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0) = 0;
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 8ec7f1c..84b6bab 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -278,6 +278,7 @@
     status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
     status_t setupH263EncoderParameters(const sp<AMessage> &msg);
     status_t setupAVCEncoderParameters(const sp<AMessage> &msg);
+    status_t setupHEVCEncoderParameters(const sp<AMessage> &msg);
     status_t setupVPXEncoderParameters(const sp<AMessage> &msg);
 
     status_t verifySupportForProfileAndLevel(int32_t profile, int32_t level);
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index db5c78f..695767d 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -19,11 +19,13 @@
 #define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
 //#define LOG_NDEBUG 0
 
-#include <cutils/log.h>
 #include <assert.h>
+#include <inttypes.h>
+#include <new>
 #include <stdlib.h>
 #include <string.h>
-#include <new>
+
+#include <cutils/log.h>
 #include "EffectBundle.h"
 
 
@@ -560,11 +562,12 @@
             MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
 
             if (MemTab.Region[i].pBaseAddress == LVM_NULL){
-                ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %ld bytes "
-                        "for region %u\n", MemTab.Region[i].Size, i );
+                ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %" PRIu32
+                        " bytes for region %u\n", MemTab.Region[i].Size, i );
                 bMallocFailure = LVM_TRUE;
             }else{
-                ALOGV("\tLvmBundle_init CreateInstance allocated %ld bytes for region %u at %p\n",
+                ALOGV("\tLvmBundle_init CreateInstance allocated %" PRIu32
+                        " bytes for region %u at %p\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }
         }
@@ -576,11 +579,11 @@
     if(bMallocFailure == LVM_TRUE){
         for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
             if (MemTab.Region[i].pBaseAddress == LVM_NULL){
-                ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %ld bytes "
-                        "for region %u Not freeing\n", MemTab.Region[i].Size, i );
+                ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed to allocate %" PRIu32
+                        " bytes for region %u Not freeing\n", MemTab.Region[i].Size, i );
             }else{
-                ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated %ld bytes "
-                     "for region %u at %p- free\n",
+                ALOGV("\tLVM_ERROR :LvmBundle_init CreateInstance Failed: but allocated %" PRIu32
+                     " bytes for region %u at %p- free\n",
                      MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
                 free(MemTab.Region[i].pBaseAddress);
             }
@@ -889,16 +892,16 @@
     for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
         if (MemTab.Region[i].Size != 0){
             if (MemTab.Region[i].pBaseAddress != NULL){
-                ALOGV("\tLvmEffect_free - START freeing %ld bytes for region %u at %p\n",
+                ALOGV("\tLvmEffect_free - START freeing %" PRIu32 " bytes for region %u at %p\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
 
                 free(MemTab.Region[i].pBaseAddress);
 
-                ALOGV("\tLvmEffect_free - END   freeing %ld bytes for region %u at %p\n",
+                ALOGV("\tLvmEffect_free - END   freeing %" PRIu32 " bytes for region %u at %p\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }else{
-                ALOGV("\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer %ld bytes "
-                        "for region %u at %p ERROR\n",
+                ALOGV("\tLVM_ERROR : LvmEffect_free - trying to free with NULL pointer %" PRIu32
+                        " bytes for region %u at %p ERROR\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }
         }
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index c6d3759..13f1a0d 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -19,11 +19,13 @@
 #define ARRAY_SIZE(array) (sizeof array / sizeof array[0])
 //#define LOG_NDEBUG 0
 
-#include <cutils/log.h>
 #include <assert.h>
+#include <inttypes.h>
+#include <new>
 #include <stdlib.h>
 #include <string.h>
-#include <new>
+
+#include <cutils/log.h>
 #include "EffectReverb.h"
 // from Reverb/lib
 #include "LVREV.h"
@@ -269,7 +271,7 @@
     pContext->InFrames32  = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2);
     pContext->OutFrames32 = (LVM_INT32 *)malloc(LVREV_MAX_FRAME_SIZE * sizeof(LVM_INT32) * 2);
 
-    ALOGV("\tEffectCreate %p, size %d", pContext, sizeof(ReverbContext));
+    ALOGV("\tEffectCreate %p, size %zu", pContext, sizeof(ReverbContext));
     ALOGV("\tEffectCreate end\n");
     return 0;
 } /* end EffectCreate */
@@ -570,15 +572,15 @@
     for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
         if (MemTab.Region[i].Size != 0){
             if (MemTab.Region[i].pBaseAddress != NULL){
-                ALOGV("\tfree() - START freeing %ld bytes for region %u at %p\n",
+                ALOGV("\tfree() - START freeing %" PRIu32 " bytes for region %u at %p\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
 
                 free(MemTab.Region[i].pBaseAddress);
 
-                ALOGV("\tfree() - END   freeing %ld bytes for region %u at %p\n",
+                ALOGV("\tfree() - END   freeing %" PRIu32 " bytes for region %u at %p\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }else{
-                ALOGV("\tLVM_ERROR : free() - trying to free with NULL pointer %ld bytes "
+                ALOGV("\tLVM_ERROR : free() - trying to free with NULL pointer %" PRIu32 " bytes "
                         "for region %u at %p ERROR\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }
@@ -771,11 +773,12 @@
             MemTab.Region[i].pBaseAddress = malloc(MemTab.Region[i].Size);
 
             if (MemTab.Region[i].pBaseAddress == LVM_NULL){
-                ALOGV("\tLVREV_ERROR :Reverb_init CreateInstance Failed to allocate %ld "
-                        "bytes for region %u\n", MemTab.Region[i].Size, i );
+                ALOGV("\tLVREV_ERROR :Reverb_init CreateInstance Failed to allocate %" PRIu32
+                        " bytes for region %u\n", MemTab.Region[i].Size, i );
                 bMallocFailure = LVM_TRUE;
             }else{
-                ALOGV("\tReverb_init CreateInstance allocate %ld bytes for region %u at %p\n",
+                ALOGV("\tReverb_init CreateInstance allocate %" PRIu32
+                        " bytes for region %u at %p\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
             }
         }
@@ -787,11 +790,11 @@
     if(bMallocFailure == LVM_TRUE){
         for (int i=0; i<LVM_NR_MEMORY_REGIONS; i++){
             if (MemTab.Region[i].pBaseAddress == LVM_NULL){
-                ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed to allocate %ld bytes "
-                        "for region %u - Not freeing\n", MemTab.Region[i].Size, i );
+                ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed to allocate %" PRIu32
+                        " bytes for region %u - Not freeing\n", MemTab.Region[i].Size, i );
             }else{
-                ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed: but allocated %ld bytes "
-                        "for region %u at %p- free\n",
+                ALOGV("\tLVM_ERROR :Reverb_init CreateInstance Failed: but allocated %" PRIu32
+                        " bytes for region %u at %p- free\n",
                         MemTab.Region[i].Size, i, MemTab.Region[i].pBaseAddress);
                 free(MemTab.Region[i].pBaseAddress);
             }
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 50b444a..f865d38 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -18,7 +18,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioRecord"
 
+#include <inttypes.h>
 #include <sys/resource.h>
+
 #include <binder/IPCThreadState.h>
 #include <media/AudioRecord.h>
 #include <utils/Log.h>
@@ -468,7 +470,7 @@
         if (frameCount == 0) {
             frameCount = minFrameCount;
         } else if (frameCount < minFrameCount) {
-            ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount);
+            ALOGE("frameCount %zu < minFrameCount %zu", frameCount, minFrameCount);
             return BAD_VALUE;
         }
 
@@ -555,17 +557,17 @@
     mCblk = cblk;
     // note that temp is the (possibly revised) value of frameCount
     if (temp < frameCount || (frameCount == 0 && temp == 0)) {
-        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
+        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
     }
     frameCount = temp;
 
     mAwaitBoost = false;
     if (mFlags & AUDIO_INPUT_FLAG_FAST) {
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
-            ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %u", frameCount);
+            ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
             mAwaitBoost = true;
         } else {
-            ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
+            ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
             // once denied, do not request again if IAudioRecord is re-created
             mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
         }
@@ -740,7 +742,7 @@
     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
         // sanity-check. user is most-likely passing an error code, and it would
         // make the return value ambiguous (actualSize vs error).
-        ALOGE("AudioRecord::read(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
+        ALOGE("AudioRecord::read(buffer=%p, size=%zu (%zu)", buffer, userSize, userSize);
         return BAD_VALUE;
     }
 
@@ -921,10 +923,10 @@
         size_t nonContig;
         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
-                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
+                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
         requested = &ClientProxy::kNonBlocking;
         size_t avail = audioBuffer.frameCount + nonContig;
-        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
+        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
                 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
         if (err != NO_ERROR) {
             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
@@ -952,8 +954,8 @@
 
         // Sanity check on returned size
         if (ssize_t(readSize) < 0 || readSize > reqSize) {
-            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
-                    reqSize, (int) readSize);
+            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
+                    reqSize, ssize_t(readSize));
             return NS_NEVER;
         }
 
@@ -1092,7 +1094,7 @@
         ns = 1000000000LL;
         // fall through
     default:
-        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
+        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
         pauseInternal(ns);
         return true;
     }
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 15b32ff..a47d45c 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -245,6 +245,19 @@
     return getSamplingRate(output, samplingRate);
 }
 
+status_t AudioSystem::getOutputSamplingRateForAttr(uint32_t* samplingRate,
+        const audio_attributes_t *attr)
+{
+    if (attr == NULL) {
+        return BAD_VALUE;
+    }
+    audio_io_handle_t output = getOutputForAttr(attr);
+    if (output == 0) {
+        return PERMISSION_DENIED;
+    }
+    return getSamplingRate(output, samplingRate);
+}
+
 status_t AudioSystem::getSamplingRate(audio_io_handle_t output,
                                       uint32_t* samplingRate)
 {
@@ -310,7 +323,7 @@
         return BAD_VALUE;
     }
 
-    ALOGV("getFrameCount() output %d, frameCount %d", output, *frameCount);
+    ALOGV("getFrameCount() output %d, frameCount %zu", output, *frameCount);
 
     return NO_ERROR;
 }
@@ -476,7 +489,7 @@
 
         OutputDescriptor *outputDesc =  new OutputDescriptor(*desc);
         gOutputs.add(ioHandle, outputDesc);
-        ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x frameCount %u "
+        ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x frameCount %zu "
                 "latency %d",
                 outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask,
                 outputDesc->frameCount, outputDesc->latency);
@@ -501,7 +514,7 @@
         desc = (const OutputDescriptor *)param2;
 
         ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %#x channel mask %#x "
-                "frameCount %d latency %d",
+                "frameCount %zu latency %d",
                 ioHandle, desc->samplingRate, desc->format,
                 desc->channelMask, desc->frameCount, desc->latency);
         OutputDescriptor *outputDesc = gOutputs.valueAt(index);
@@ -633,6 +646,19 @@
     return aps->getOutput(stream, samplingRate, format, channelMask, flags, offloadInfo);
 }
 
+audio_io_handle_t AudioSystem::getOutputForAttr(const audio_attributes_t *attr,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    if (attr == NULL) return 0;
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return 0;
+    return aps->getOutputForAttr(attr, samplingRate, format, channelMask, flags, offloadInfo);
+}
+
 status_t AudioSystem::startOutput(audio_io_handle_t output,
                                   audio_stream_type_t stream,
                                   int session)
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index e6827ee..898d58d 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -15,12 +15,13 @@
 ** limitations under the License.
 */
 
-
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioTrack"
 
+#include <inttypes.h>
 #include <math.h>
 #include <sys/resource.h>
+
 #include <audio_utils/primitives.h>
 #include <binder/IPCThreadState.h>
 #include <media/AudioTrack.h>
@@ -89,7 +90,7 @@
                 streamType, sampleRate);
         return BAD_VALUE;
     }
-    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
+    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
             *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
     return NO_ERROR;
 }
@@ -103,6 +104,10 @@
       mPreviousSchedulingGroup(SP_DEFAULT),
       mPausedPosition(0)
 {
+    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
+    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
+    mAttributes.flags = 0x0;
+    strcpy(mAttributes.tags, "");
 }
 
 AudioTrack::AudioTrack(
@@ -129,7 +134,7 @@
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
-            offloadInfo, uid, pid);
+            offloadInfo, uid, pid, NULL /*no audio attributes*/);
 }
 
 AudioTrack::AudioTrack(
@@ -156,7 +161,7 @@
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
-            uid, pid);
+            uid, pid, NULL /*no audio attributes*/);
 }
 
 AudioTrack::~AudioTrack()
@@ -199,7 +204,8 @@
         transfer_type transferType,
         const audio_offload_info_t *offloadInfo,
         int uid,
-        pid_t pid)
+        pid_t pid,
+        audio_attributes_t* pAttributes)
 {
     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
           "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
@@ -245,7 +251,7 @@
     ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
             sharedBuffer->size());
 
-    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
+    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
 
     AutoMutex lock(mLock);
 
@@ -259,18 +265,33 @@
     if (streamType == AUDIO_STREAM_DEFAULT) {
         streamType = AUDIO_STREAM_MUSIC;
     }
-    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
-        ALOGE("Invalid stream type %d", streamType);
-        return BAD_VALUE;
+
+    if (pAttributes == NULL) {
+        if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
+            ALOGE("Invalid stream type %d", streamType);
+            return BAD_VALUE;
+        }
+        setAttributesFromStreamType(streamType);
+        mStreamType = streamType;
+    } else {
+        if (!isValidAttributes(pAttributes)) {
+            ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
+                pAttributes->usage, pAttributes->content_type, pAttributes->flags,
+                pAttributes->tags);
+        }
+        // stream type shouldn't be looked at, this track has audio attributes
+        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
+        setStreamTypeFromAttributes(mAttributes);
+        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
+                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
     }
-    mStreamType = streamType;
 
     status_t status;
     if (sampleRate == 0) {
-        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
+        status = AudioSystem::getOutputSamplingRateForAttr(&sampleRate, &mAttributes);
         if (status != NO_ERROR) {
             ALOGE("Could not get output sample rate for stream type %d; status %d",
-                    streamType, status);
+                    mStreamType, status);
             return status;
         }
     }
@@ -314,7 +335,7 @@
                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
     }
     // only allow deep buffering for music stream type
-    if (streamType != AUDIO_STREAM_MUSIC) {
+    if (mStreamType != AUDIO_STREAM_MUSIC) {
         flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
     }
 
@@ -615,12 +636,12 @@
 
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
-    if (mIsTimed || isOffloaded()) {
+    if (mIsTimed || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
     uint32_t afSamplingRate;
-    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
+    if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) {
         return NO_INIT;
     }
     // Resampler implementation limits input sampling rate to 2 x output sampling rate.
@@ -646,7 +667,7 @@
     // sample rate can be updated during playback by the offloaded decoder so we need to
     // query the HAL and update if needed.
 // FIXME use Proxy return channel to update the rate from server and avoid polling here
-    if (isOffloaded_l()) {
+    if (isOffloadedOrDirect_l()) {
         if (mOutput != AUDIO_IO_HANDLE_NONE) {
             uint32_t sampleRate = 0;
             status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
@@ -660,7 +681,7 @@
 
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
-    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -694,7 +715,7 @@
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL || isOffloaded()) {
+    if (mCbf == NULL || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -707,7 +728,7 @@
 
 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
 {
-    if (isOffloaded()) {
+    if (isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
     if (marker == NULL) {
@@ -723,7 +744,7 @@
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL || isOffloaded()) {
+    if (mCbf == NULL || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -736,7 +757,7 @@
 
 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
-    if (isOffloaded()) {
+    if (isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
     if (updatePeriod == NULL) {
@@ -751,7 +772,7 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
-    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
     if (position > mFrameCount) {
@@ -784,10 +805,10 @@
     }
 
     AutoMutex lock(mLock);
-    if (isOffloaded_l()) {
+    if (isOffloadedOrDirect_l()) {
         uint32_t dspFrames = 0;
 
-        if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
+        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
             ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
             *position = mPausedPosition;
             return NO_ERROR;
@@ -822,7 +843,7 @@
 
 status_t AudioTrack::reload()
 {
-    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
         return INVALID_OPERATION;
     }
 
@@ -867,12 +888,12 @@
         return NO_INIT;
     }
 
-    audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
+    audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat,
             mChannelMask, mFlags, mOffloadInfo);
     if (output == AUDIO_IO_HANDLE_NONE) {
-        ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
-              "channel mask %#x, flags %#x",
-              mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
+        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
+              " channel mask %#x, flags %#x",
+              mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
         return BAD_VALUE;
     }
     {
@@ -973,14 +994,14 @@
 
         // Ensure that buffer depth covers at least audio hardware latency
         uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
-        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
+        ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
                 afFrameCount, minBufCount, afSampleRate, afLatency);
         if (minBufCount <= nBuffering) {
             minBufCount = nBuffering;
         }
 
         size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
-        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
+        ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
                 ", afLatency=%d",
                 minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
 
@@ -988,7 +1009,7 @@
             frameCount = minFrameCount;
         } else if (frameCount < minFrameCount) {
             // not ALOGW because it happens all the time when playing key clicks over A2DP
-            ALOGV("Minimum buffer size corrected from %d to %d",
+            ALOGV("Minimum buffer size corrected from %zu to %zu",
                      frameCount, minFrameCount);
             frameCount = minFrameCount;
         }
@@ -1018,6 +1039,10 @@
         trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
     }
 
+    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+        trackFlags |= IAudioFlinger::TRACK_DIRECT;
+    }
+
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
     sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
@@ -1071,14 +1096,14 @@
         // In current design, AudioTrack client checks and ensures frame count validity before
         // passing it to AudioFlinger so AudioFlinger should not return a different value except
         // for fast track as it uses a special method of assigning frame count.
-        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
+        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
     }
     frameCount = temp;
 
     mAwaitBoost = false;
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
-            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
+            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
             mAwaitBoost = true;
             if (mSharedBuffer == 0) {
                 // Theoretically double-buffering is not required for fast tracks,
@@ -1089,7 +1114,7 @@
                 }
             }
         } else {
-            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
+            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
             // once denied, do not request again if IAudioTrack is re-created
             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
             if (mSharedBuffer == 0) {
@@ -1109,6 +1134,16 @@
             //return NO_INIT;
         }
     }
+    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
+            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
+        } else {
+            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
+            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
+            // FIXME This is a warning, not an error, so don't return error status
+            //return NO_INIT;
+        }
+    }
 
     // We retain a copy of the I/O handle, but don't own the reference
     mOutput = output;
@@ -1304,6 +1339,16 @@
         return INVALID_OPERATION;
     }
 
+    if (isDirect()) {
+        AutoMutex lock(mLock);
+        int32_t flags = android_atomic_and(
+                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
+                            &mCblk->mFlags);
+        if (flags & CBLK_INVALID) {
+            return DEAD_OBJECT;
+        }
+    }
+
     if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
         // Sanity-check: user is most-likely passing an error code, and it would
         // make the return value ambiguous (actualSize vs error).
@@ -1452,7 +1497,7 @@
         // for offloaded tracks restoreTrack_l() will just update the sequence and clear
         // AudioSystem cache. We should not exit here but after calling the callback so
         // that the upper layers can recreate the track
-        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
+        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
             status_t status = restoreTrack_l("processAudioBuffer");
             mLock.unlock();
             // Run again immediately, but with a new IAudioTrack
@@ -1578,7 +1623,7 @@
         mObservedSequence = sequence;
         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
         // for offloaded tracks, just wait for the upper layers to recreate the track
-        if (isOffloaded()) {
+        if (isOffloadedOrDirect()) {
             return NS_INACTIVE;
         }
     }
@@ -1636,10 +1681,10 @@
         size_t nonContig;
         status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
         LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
-                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
+                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
         requested = &ClientProxy::kNonBlocking;
         size_t avail = audioBuffer.frameCount + nonContig;
-        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
+        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
                 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
         if (err != NO_ERROR) {
             if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
@@ -1674,8 +1719,8 @@
 
         // Sanity check on returned size
         if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
-            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
-                    reqSize, (int) writtenSize);
+            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
+                    reqSize, ssize_t(writtenSize));
             return NS_NEVER;
         }
 
@@ -1736,7 +1781,7 @@
 status_t AudioTrack::restoreTrack_l(const char *from)
 {
     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
-          isOffloaded_l() ? "Offloaded" : "PCM", from);
+          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
     ++mSequence;
     status_t result;
 
@@ -1744,7 +1789,7 @@
     // output parameters in createTrack_l()
     AudioSystem::clearAudioConfigCache();
 
-    if (isOffloaded_l()) {
+    if (isOffloadedOrDirect_l()) {
         // FIXME re-creation of offloaded tracks is not yet implemented
         return DEAD_OBJECT;
     }
@@ -1830,6 +1875,19 @@
     return isOffloaded_l();
 }
 
+bool AudioTrack::isDirect() const
+{
+    AutoMutex lock(mLock);
+    return isDirect_l();
+}
+
+bool AudioTrack::isOffloadedOrDirect() const
+{
+    AutoMutex lock(mLock);
+    return isOffloadedOrDirect_l();
+}
+
+
 status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
 {
 
@@ -1858,6 +1916,136 @@
     return mProxy->getUnderrunFrames();
 }
 
+void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) {
+    mAttributes.flags = 0x0;
+
+    switch(streamType) {
+    case AUDIO_STREAM_DEFAULT:
+    case AUDIO_STREAM_MUSIC:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+        mAttributes.usage = AUDIO_USAGE_MEDIA;
+        break;
+    case AUDIO_STREAM_VOICE_CALL:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
+        break;
+    case AUDIO_STREAM_ENFORCED_AUDIBLE:
+        mAttributes.flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
+        // intended fall through, attributes in common with STREAM_SYSTEM
+    case AUDIO_STREAM_SYSTEM:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
+        break;
+    case AUDIO_STREAM_RING:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+        mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+        break;
+    case AUDIO_STREAM_ALARM:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+        mAttributes.usage = AUDIO_USAGE_ALARM;
+        break;
+    case AUDIO_STREAM_NOTIFICATION:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+        mAttributes.usage = AUDIO_USAGE_NOTIFICATION;
+        break;
+    case AUDIO_STREAM_BLUETOOTH_SCO:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
+        mAttributes.flags |= AUDIO_FLAG_SCO;
+        break;
+    case AUDIO_STREAM_DTMF:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
+        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
+        break;
+    case AUDIO_STREAM_TTS:
+        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
+        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
+        break;
+    default:
+        ALOGE("invalid stream type %d when converting to attributes", streamType);
+    }
+}
+
+void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) {
+    // flags to stream type mapping
+    if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE;
+        return;
+    }
+    if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
+        mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
+        return;
+    }
+
+    // usage to stream type mapping
+    switch (aa.usage) {
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        mStreamType = AUDIO_STREAM_MUSIC;
+        return;
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        mStreamType = AUDIO_STREAM_SYSTEM;
+        return;
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        mStreamType = AUDIO_STREAM_VOICE_CALL;
+        return;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        mStreamType = AUDIO_STREAM_DTMF;
+        return;
+
+    case AUDIO_USAGE_ALARM:
+        mStreamType = AUDIO_STREAM_ALARM;
+        return;
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        mStreamType = AUDIO_STREAM_RING;
+        return;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        mStreamType = AUDIO_STREAM_NOTIFICATION;
+        return;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        mStreamType = AUDIO_STREAM_MUSIC;
+    }
+}
+
+bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
+    // has flags that map to a strategy?
+    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
+        return true;
+    }
+
+    // has known usage?
+    switch (paa->usage) {
+    case AUDIO_USAGE_UNKNOWN:
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+    case AUDIO_USAGE_ALARM:
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+    case AUDIO_USAGE_GAME:
+        break;
+    default:
+        return false;
+    }
+    return true;
+}
 // =========================================================================
 
 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
@@ -1918,7 +2106,7 @@
         ns = 1000000000LL;
         // fall through
     default:
-        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
+        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
         pauseInternal(ns);
         return true;
     }
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 0dbfa62..eec025e 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -135,7 +135,7 @@
         // pipe should not be overfull
         if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
             if (mIsOut) {
-                ALOGE("Shared memory control block is corrupt (filled=%d, mFrameCount=%u); "
+                ALOGE("Shared memory control block is corrupt (filled=%zd, mFrameCount=%zu); "
                         "shutting down", filled, mFrameCount);
                 mIsShutdown = true;
                 status = NO_INIT;
@@ -338,7 +338,7 @@
     ssize_t filled = rear - front;
     // pipe should not be overfull
     if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
-        ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+        ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
         return 0;
     }
     return (size_t)filled;
@@ -555,7 +555,7 @@
     ssize_t filled = rear - front;
     // pipe should not already be overfull
     if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
-        ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+        ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
         mIsShutdown = true;
     }
     if (mIsShutdown) {
@@ -642,7 +642,7 @@
     }
     // FIXME AudioRecord wakeup needs to be optimized; it currently wakes up client every time
     if (!mIsOut || (mAvailToClient + stepCount >= minimum)) {
-        ALOGV("mAvailToClient=%u stepCount=%u minimum=%u", mAvailToClient, stepCount, minimum);
+        ALOGV("mAvailToClient=%zu stepCount=%zu minimum=%zu", mAvailToClient, stepCount, minimum);
         int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
         if (!(old & CBLK_FUTEX_WAKE)) {
             (void) syscall(__NR_futex, &cblk->mFutex,
@@ -675,7 +675,7 @@
     ssize_t filled = rear - cblk->u.mStreaming.mFront;
     // pipe should not already be overfull
     if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
-        ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+        ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
         mIsShutdown = true;
         return 0;
     }
@@ -834,7 +834,7 @@
     size_t newPosition = position + stepCount;
     int32_t setFlags = 0;
     if (!(position <= newPosition && newPosition <= mFrameCount)) {
-        ALOGW("%s newPosition %u outside [%u, %u]", __func__, newPosition, position, mFrameCount);
+        ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position, mFrameCount);
         newPosition = mFrameCount;
     } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {
         if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) {
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
index 4992798..7d1ddfd 100644
--- a/media/libmedia/CharacterEncodingDetector.cpp
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -112,7 +112,7 @@
         if (allprintable) {
             // since 'buf' is empty, ICU would return a UTF-8 matcher with low confidence, so
             // no need to even call it
-            ALOGV("all tags are printable, assuming ascii (%d)", strlen(buf));
+            ALOGV("all tags are printable, assuming ascii (%zu)", strlen(buf));
         } else {
             ucsdet_setText(csd, buf, strlen(buf), &status);
             int32_t matches;
@@ -267,11 +267,11 @@
     Vector<const UCharsetMatch*> matches;
     UErrorCode status = U_ZERO_ERROR;
 
-    ALOGV("%d matches", nummatches);
+    ALOGV("%zu matches", nummatches);
     for (size_t i = 0; i < nummatches; i++) {
         const char *encname = ucsdet_getName(ucma[i], &status);
         int confidence = ucsdet_getConfidence(ucma[i], &status);
-        ALOGV("%d: %s %d", i, encname, confidence);
+        ALOGV("%zu: %s %d", i, encname, confidence);
         matches.push_back(ucma[i]);
     }
 
@@ -287,7 +287,7 @@
         return matches[0];
     }
 
-    ALOGV("considering %d matches", num);
+    ALOGV("considering %zu matches", num);
 
     // keep track of how many "special" characters result when converting the input using each
     // encoding
@@ -315,7 +315,7 @@
             freqcoverage = frequent_ja_coverage;
         }
 
-        ALOGV("%d: %s %d", i, encname, confidence);
+        ALOGV("%zu: %s %d", i, encname, confidence);
         UConverter *conv = ucnv_open(encname, &status);
         const char *source = input;
         const char *sourceLimit = input + len;
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 77d131b..41a9065 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -64,7 +64,8 @@
     RELEASE_AUDIO_PATCH,
     LIST_AUDIO_PATCHES,
     SET_AUDIO_PORT_CONFIG,
-    REGISTER_CLIENT
+    REGISTER_CLIENT,
+    GET_OUTPUT_FOR_ATTR
 };
 
 class BpAudioPolicyService : public BpInterface<IAudioPolicyService>
@@ -155,6 +156,36 @@
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
 
+    virtual audio_io_handle_t getOutputForAttr(
+                                            const audio_attributes_t *attr,
+                                            uint32_t samplingRate,
+                                            audio_format_t format,
+                                            audio_channel_mask_t channelMask,
+                                            audio_output_flags_t flags,
+                                            const audio_offload_info_t *offloadInfo)
+        {
+            Parcel data, reply;
+            data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+            if (attr == NULL) {
+                ALOGE("Writing NULL audio attributes - shouldn't happen");
+                return (audio_io_handle_t) 0;
+            }
+            data.write(attr, sizeof(audio_attributes_t));
+            data.writeInt32(samplingRate);
+            data.writeInt32(static_cast <uint32_t>(format));
+            data.writeInt32(channelMask);
+            data.writeInt32(static_cast <uint32_t>(flags));
+            // hasOffloadInfo
+            if (offloadInfo == NULL) {
+                data.writeInt32(0);
+            } else {
+                data.writeInt32(1);
+                data.write(offloadInfo, sizeof(audio_offload_info_t));
+            }
+            remote()->transact(GET_OUTPUT_FOR_ATTR, data, &reply);
+            return static_cast <audio_io_handle_t> (reply.readInt32());
+        }
+
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session)
@@ -614,6 +645,30 @@
             return NO_ERROR;
         } break;
 
+        case GET_OUTPUT_FOR_ATTR: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_attributes_t *attr = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+            data.read(attr, sizeof(audio_attributes_t));
+            uint32_t samplingRate = data.readInt32();
+            audio_format_t format = (audio_format_t) data.readInt32();
+            audio_channel_mask_t channelMask = data.readInt32();
+            audio_output_flags_t flags =
+                    static_cast <audio_output_flags_t>(data.readInt32());
+            bool hasOffloadInfo = data.readInt32() != 0;
+            audio_offload_info_t offloadInfo;
+            if (hasOffloadInfo) {
+                data.read(&offloadInfo, sizeof(audio_offload_info_t));
+            }
+            audio_io_handle_t output = getOutputForAttr(attr,
+                    samplingRate,
+                    format,
+                    channelMask,
+                    flags,
+                    hasOffloadInfo ? &offloadInfo : NULL);
+            reply->writeInt32(static_cast <int>(output));
+            return NO_ERROR;
+        } break;
+
         case START_OUTPUT: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 432d890..38f717c 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -15,8 +15,10 @@
 ** limitations under the License.
 */
 
+#include <inttypes.h>
 #include <stdint.h>
 #include <sys/types.h>
+
 #include <binder/Parcel.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaMetadataRetriever.h>
@@ -125,7 +127,7 @@
 
     sp<IMemory> getFrameAtTime(int64_t timeUs, int option)
     {
-        ALOGV("getTimeAtTime: time(%lld us) and option(%d)", timeUs, option);
+        ALOGV("getTimeAtTime: time(%" PRId64 " us) and option(%d)", timeUs, option);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
         data.writeInt64(timeUs);
@@ -237,7 +239,7 @@
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
             int64_t timeUs = data.readInt64();
             int option = data.readInt32();
-            ALOGV("getTimeAtTime: time(%lld us) and option(%d)", timeUs, option);
+            ALOGV("getTimeAtTime: time(%" PRId64 " us) and option(%d)", timeUs, option);
 #ifndef DISABLE_GROUP_SCHEDULE_HACK
             setSchedPolicy(data);
 #endif
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 8e58162..95af006 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -17,6 +17,10 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "IMediaRecorder"
+
+#include <inttypes.h>
+#include <unistd.h>
+
 #include <utils/Log.h>
 #include <binder/Parcel.h>
 #include <camera/ICamera.h>
@@ -24,8 +28,6 @@
 #include <media/IMediaRecorder.h>
 #include <gui/Surface.h>
 #include <gui/IGraphicBufferProducer.h>
-#include <unistd.h>
-
 
 namespace android {
 
@@ -167,7 +169,7 @@
     }
 
     status_t setOutputFile(int fd, int64_t offset, int64_t length) {
-        ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
+        ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
         Parcel data, reply;
         data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
         data.writeFileDescriptor(fd);
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 28238c4..e9e453b 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -475,7 +475,7 @@
 }
 
 void MediaProfiles::initRequiredProfileRefs(const Vector<int>& cameraIds) {
-    ALOGV("Number of camera ids: %d", cameraIds.size());
+    ALOGV("Number of camera ids: %zu", cameraIds.size());
     CHECK(cameraIds.size() > 0);
     mRequiredProfileRefs = new RequiredProfiles[cameraIds.size()];
     for (size_t i = 0, n = cameraIds.size(); i < n; ++i) {
@@ -602,14 +602,14 @@
 
                 int index = getCamcorderProfileIndex(cameraId, profile->mQuality);
                 if (index != -1) {
-                    ALOGV("Profile quality %d for camera %d already exists",
+                    ALOGV("Profile quality %d for camera %zu already exists",
                         profile->mQuality, cameraId);
                     CHECK(index == refIndex);
                     continue;
                 }
 
                 // Insert the new profile
-                ALOGV("Add a profile: quality %d=>%d for camera %d",
+                ALOGV("Add a profile: quality %d=>%d for camera %zu",
                         mCamcorderProfiles[info->mRefProfileIndex]->mQuality,
                         profile->mQuality, cameraId);
 
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index a55e09c..2aa0592 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -16,6 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "SoundPool"
+
+#include <inttypes.h>
+
 #include <utils/Log.h>
 
 #define USE_SHARED_MEM_BUFFER
@@ -212,7 +215,7 @@
 
 int SoundPool::load(int fd, int64_t offset, int64_t length, int priority __unused)
 {
-    ALOGV("load: fd=%d, offset=%lld, length=%lld, priority=%d",
+    ALOGV("load: fd=%d, offset=%" PRId64 ", length=%" PRId64 ", priority=%d",
             fd, offset, length, priority);
     Mutex::Autolock lock(&mLock);
     sp<Sample> sample = new Sample(++mNextSampleID, fd, offset, length);
@@ -462,7 +465,8 @@
     mFd = dup(fd);
     mOffset = offset;
     mLength = length;
-    ALOGV("create sampleID=%d, fd=%d, offset=%lld, length=%lld", mSampleID, mFd, mLength, mOffset);
+    ALOGV("create sampleID=%d, fd=%d, offset=%" PRId64 " length=%" PRId64,
+        mSampleID, mFd, mLength, mOffset);
 }
 
 void Sample::init()
@@ -516,7 +520,7 @@
         ALOGE("Unable to load sample: %s", mUrl);
         goto error;
     }
-    ALOGV("pointer = %p, size = %u, sampleRate = %u, numChannels = %d",
+    ALOGV("pointer = %p, size = %zu, sampleRate = %u, numChannels = %d",
           mHeap->getBase(), mSize, sampleRate, numChannels);
 
     if (sampleRate > kMaxSampleRate) {
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 1d6bb6f..39a239d 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -18,6 +18,8 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaMetadataRetriever"
 
+#include <inttypes.h>
+
 #include <binder/IServiceManager.h>
 #include <binder/IPCThreadState.h>
 #include <media/mediametadataretriever.h>
@@ -114,7 +116,7 @@
 
 status_t MediaMetadataRetriever::setDataSource(int fd, int64_t offset, int64_t length)
 {
-    ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
+    ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
     Mutex::Autolock _l(mLock);
     if (mRetriever == 0) {
         ALOGE("retriever is not initialized");
@@ -129,7 +131,7 @@
 
 sp<IMemory> MediaMetadataRetriever::getFrameAtTime(int64_t timeUs, int option)
 {
-    ALOGV("getFrameAtTime: time(%lld us) option(%d)", timeUs, option);
+    ALOGV("getFrameAtTime: time(%" PRId64 " us) option(%d)", timeUs, option);
     Mutex::Autolock _l(mLock);
     if (mRetriever == 0) {
         ALOGE("retriever is not initialized");
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 0be01a9..406f9f2 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -17,12 +17,14 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaPlayer"
-#include <utils/Log.h>
 
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
 #include <fcntl.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <utils/Log.h>
 
 #include <binder/IServiceManager.h>
 #include <binder/IPCThreadState.h>
@@ -157,7 +159,7 @@
 
 status_t MediaPlayer::setDataSource(int fd, int64_t offset, int64_t length)
 {
-    ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
+    ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
     status_t err = UNKNOWN_ERROR;
     const sp<IMediaPlayerService>& service(getMediaPlayerService());
     if (service != 0) {
@@ -194,7 +196,7 @@
             (mCurrentState != MEDIA_PLAYER_STATE_ERROR) &&
             ((mCurrentState & MEDIA_PLAYER_IDLE) != MEDIA_PLAYER_IDLE);
     if ((mPlayer != NULL) && hasBeenInitialized) {
-        ALOGV("invoke %d", request.dataSize());
+        ALOGV("invoke %zu", request.dataSize());
         return  mPlayer->invoke(request, reply);
     }
     ALOGE("invoke failed: wrong state %X", mCurrentState);
@@ -818,7 +820,7 @@
                                         audio_format_t* pFormat,
                                         const sp<IMemoryHeap>& heap, size_t *pSize)
 {
-    ALOGV("decode(%d, %lld, %lld)", fd, offset, length);
+    ALOGV("decode(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
     status_t status;
     const sp<IMediaPlayerService>& service = getMediaPlayerService();
     if (service != 0) {
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 3710e46..c8192e9 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -17,6 +17,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaRecorder"
+
+#include <inttypes.h>
+
 #include <utils/Log.h>
 #include <media/mediarecorder.h>
 #include <binder/IServiceManager.h>
@@ -286,7 +289,7 @@
 
 status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
 {
-    ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
+    ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
     if (mMediaRecorder == NULL) {
         ALOGE("media recorder is not initialized yet");
         return INVALID_OPERATION;
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 9b239b1..e9c5e8e 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -176,11 +176,11 @@
 class StagefrightPlayerFactory :
     public MediaPlayerFactory::IFactory {
   public:
-    virtual float scoreFactory(const sp<IMediaPlayer>& client,
+    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
                                int fd,
                                int64_t offset,
-                               int64_t length,
-                               float curScore) {
+                               int64_t /*length*/,
+                               float /*curScore*/) {
         char buf[20];
         lseek(fd, offset, SEEK_SET);
         read(fd, buf, sizeof(buf));
@@ -203,7 +203,7 @@
 
 class NuPlayerFactory : public MediaPlayerFactory::IFactory {
   public:
-    virtual float scoreFactory(const sp<IMediaPlayer>& client,
+    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
                                const char* url,
                                float curScore) {
         static const float kOurScore = 0.8;
@@ -235,9 +235,9 @@
         return 0.0;
     }
 
-    virtual float scoreFactory(const sp<IMediaPlayer>& client,
-                               const sp<IStreamSource> &source,
-                               float curScore) {
+    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
+                               const sp<IStreamSource>& /*source*/,
+                               float /*curScore*/) {
         return 1.0;
     }
 
@@ -249,7 +249,7 @@
 
 class SonivoxPlayerFactory : public MediaPlayerFactory::IFactory {
   public:
-    virtual float scoreFactory(const sp<IMediaPlayer>& client,
+    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
                                const char* url,
                                float curScore) {
         static const float kOurScore = 0.4;
@@ -280,7 +280,7 @@
         return 0.0;
     }
 
-    virtual float scoreFactory(const sp<IMediaPlayer>& client,
+    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
                                int fd,
                                int64_t offset,
                                int64_t length,
@@ -318,9 +318,9 @@
 
 class TestPlayerFactory : public MediaPlayerFactory::IFactory {
   public:
-    virtual float scoreFactory(const sp<IMediaPlayer>& client,
+    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
                                const char* url,
-                               float curScore) {
+                               float /*curScore*/) {
         if (TestPlayerStub::canBeUsed(url)) {
             return 1.0;
         }
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.h b/media/libmediaplayerservice/MediaPlayerFactory.h
index fe8972b..5ddde19 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.h
+++ b/media/libmediaplayerservice/MediaPlayerFactory.h
@@ -29,19 +29,19 @@
       public:
         virtual ~IFactory() { }
 
-        virtual float scoreFactory(const sp<IMediaPlayer>& client,
-                                   const char* url,
-                                   float curScore) { return 0.0; }
+        virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
+                                   const char* /*url*/,
+                                   float /*curScore*/) { return 0.0; }
 
-        virtual float scoreFactory(const sp<IMediaPlayer>& client,
-                                   int fd,
-                                   int64_t offset,
-                                   int64_t length,
-                                   float curScore) { return 0.0; }
+        virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
+                                   int /*fd*/,
+                                   int64_t /*offset*/,
+                                   int64_t /*length*/,
+                                   float /*curScore*/) { return 0.0; }
 
-        virtual float scoreFactory(const sp<IMediaPlayer>& client,
-                                   const sp<IStreamSource> &source,
-                                   float curScore) { return 0.0; }
+        virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
+                                   const sp<IStreamSource> &/*source*/,
+                                   float /*curScore*/) { return 0.0; }
 
         virtual sp<MediaPlayerBase> createPlayer() = 0;
     };
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 778eb9a..76632a7 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -307,7 +307,7 @@
     return new RemoteDisplay(client, iface.string());
 }
 
-status_t MediaPlayerService::AudioCache::dump(int fd, const Vector<String16>& args) const
+status_t MediaPlayerService::AudioCache::dump(int fd, const Vector<String16>& /*args*/) const
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -673,8 +673,8 @@
 
     ALOGV("st_dev  = %llu", sb.st_dev);
     ALOGV("st_mode = %u", sb.st_mode);
-    ALOGV("st_uid  = %lu", sb.st_uid);
-    ALOGV("st_gid  = %lu", sb.st_gid);
+    ALOGV("st_uid  = %lu", static_cast<unsigned long>(sb.st_uid));
+    ALOGV("st_gid  = %lu", static_cast<unsigned long>(sb.st_gid));
     ALOGV("st_size = %llu", sb.st_size);
 
     if (offset >= sb.st_size) {
@@ -803,7 +803,7 @@
 }
 
 status_t MediaPlayerService::Client::getMetadata(
-        bool update_only, bool apply_filter, Parcel *reply)
+        bool update_only, bool /*apply_filter*/, Parcel *reply)
 {
     sp<MediaPlayerBase> player = getPlayer();
     if (player == 0) return UNKNOWN_ERROR;
@@ -1926,8 +1926,8 @@
 status_t MediaPlayerService::AudioCache::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
-        AudioCallback cb, void *cookie, audio_output_flags_t flags,
-        const audio_offload_info_t *offloadInfo)
+        AudioCallback cb, void *cookie, audio_output_flags_t /*flags*/,
+        const audio_offload_info_t* /*offloadInfo*/)
 {
     ALOGV("open(%u, %d, 0x%x, %d, %d)", sampleRate, channelCount, channelMask, format, bufferCount);
     if (mHeap->getHeapID() < 0) {
@@ -1994,7 +1994,7 @@
 }
 
 void MediaPlayerService::AudioCache::notify(
-        void* cookie, int msg, int ext1, int ext2, const Parcel *obj)
+        void* cookie, int msg, int ext1, int ext2, const Parcel* /*obj*/)
 {
     ALOGV("notify(%p, %d, %d, %d)", cookie, msg, ext1, ext2);
     AudioCache* p = static_cast<AudioCache*>(cookie);
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 80c7e0a..fa28451 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -57,7 +57,7 @@
     disconnect();
 }
 
-status_t MetadataRetrieverClient::dump(int fd, const Vector<String16>& args) const
+status_t MetadataRetrieverClient::dump(int fd, const Vector<String16>& /*args*/) const
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -147,8 +147,8 @@
     }
     ALOGV("st_dev  = %llu", sb.st_dev);
     ALOGV("st_mode = %u", sb.st_mode);
-    ALOGV("st_uid  = %lu", sb.st_uid);
-    ALOGV("st_gid  = %lu", sb.st_gid);
+    ALOGV("st_uid  = %lu", static_cast<unsigned long>(sb.st_uid));
+    ALOGV("st_gid  = %lu", static_cast<unsigned long>(sb.st_gid));
     ALOGV("st_size = %llu", sb.st_size);
 
     if (offset >= sb.st_size) {
diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp
index deeddd1..749ef96 100644
--- a/media/libmediaplayerservice/MidiFile.cpp
+++ b/media/libmediaplayerservice/MidiFile.cpp
@@ -114,7 +114,7 @@
 }
 
 status_t MidiFile::setDataSource(
-        const sp<IMediaHTTPService> &httpService,
+        const sp<IMediaHTTPService> & /*httpService*/,
         const char* path,
         const KeyedVector<String8, String8> *) {
     ALOGV("MidiFile::setDataSource url=%s", path);
diff --git a/media/libmediaplayerservice/MidiFile.h b/media/libmediaplayerservice/MidiFile.h
index 12802ba..82e4e88 100644
--- a/media/libmediaplayerservice/MidiFile.h
+++ b/media/libmediaplayerservice/MidiFile.h
@@ -38,7 +38,7 @@
 
     virtual status_t    setDataSource(int fd, int64_t offset, int64_t length);
     virtual status_t    setVideoSurfaceTexture(
-                                const sp<IGraphicBufferProducer>& bufferProducer)
+                                const sp<IGraphicBufferProducer>& /*bufferProducer*/)
                             { return UNKNOWN_ERROR; }
     virtual status_t    prepare();
     virtual status_t    prepareAsync();
@@ -53,13 +53,13 @@
     virtual status_t    reset();
     virtual status_t    setLooping(int loop);
     virtual player_type playerType() { return SONIVOX_PLAYER; }
-    virtual status_t    invoke(const Parcel& request, Parcel *reply) {
+    virtual status_t    invoke(const Parcel& /*request*/, Parcel* /*reply*/) {
         return INVALID_OPERATION;
     }
-    virtual status_t    setParameter(int key, const Parcel &request) {
+    virtual status_t    setParameter(int /*key*/, const Parcel &/*request*/) {
         return INVALID_OPERATION;
     }
-    virtual status_t    getParameter(int key, Parcel *reply) {
+    virtual status_t    getParameter(int /*key*/, Parcel* /*reply*/) {
         return INVALID_OPERATION;
     }
 
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 06aac33..5cf9238 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -67,6 +67,14 @@
 
     CHECK(extractor != NULL);
 
+    sp<MetaData> fileMeta = extractor->getMetaData();
+    if (fileMeta != NULL) {
+        int64_t duration;
+        if (fileMeta->findInt64(kKeyDuration, &duration)) {
+            mDurationUs = duration;
+        }
+    }
+
     for (size_t i = 0; i < extractor->countTracks(); ++i) {
         sp<MetaData> meta = extractor->getTrackMetaData(i);
 
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 4adf018..0b65861 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <inttypes.h>
+
 #define LOG_TAG "MonoPipe"
 //#define LOG_NDEBUG 0
 
@@ -87,7 +89,7 @@
     static const uint64_t kUnsignedHiBitsMask = ~(0xFFFFFFFFull);
     if ((N & kSignedHiBitsMask) || (D & kUnsignedHiBitsMask)) {
         ALOGE("Cannot reduce sample rate to local clock frequency ratio to fit"
-              " in a 32/32 bit rational.  (max reduction is 0x%016llx/0x%016llx"
+              " in a 32/32 bit rational.  (max reduction is 0x%016" PRIx64 "/0x%016" PRIx64
               ").  getNextWriteTimestamp calls will be non-functional", N, D);
         return;
     }
@@ -308,7 +310,7 @@
         // error, but then zero out the ratio in the linear transform so
         // that we don't try to do any conversions from now on.  This
         // MonoPipe's getNextWriteTimestamp is now broken for good.
-        ALOGE("Overflow when attempting to convert %d audio frames to"
+        ALOGE("Overflow when attempting to convert %zu audio frames to"
               " duration in local time.  getNextWriteTimestamp will fail from"
               " now on.", audFrames);
         mSamplesToLocalTime.a_to_b_numer = 0;
diff --git a/media/libnbaio/NBAIO.cpp b/media/libnbaio/NBAIO.cpp
index ff3284c..d641e74 100644
--- a/media/libnbaio/NBAIO.cpp
+++ b/media/libnbaio/NBAIO.cpp
@@ -137,7 +137,7 @@
 ssize_t NBAIO_Port::negotiate(const NBAIO_Format offers[], size_t numOffers,
                                   NBAIO_Format counterOffers[], size_t& numCounterOffers)
 {
-    ALOGV("negotiate offers=%p numOffers=%u countersOffers=%p numCounterOffers=%u",
+    ALOGV("negotiate offers=%p numOffers=%zu countersOffers=%p numCounterOffers=%zu",
             offers, numOffers, counterOffers, numCounterOffers);
     if (Format_isValid(mFormat)) {
         for (size_t i = 0; i < numOffers; ++i) {
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index deee8e7..353920e 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -14,6 +14,12 @@
  * limitations under the License.
  */
 
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AACWriter"
 #include <utils/Log.h>
@@ -27,10 +33,6 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/mediarecorder.h>
-#include <sys/prctl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
 
 namespace android {
 
@@ -348,7 +350,7 @@
             mResumed = false;
         }
         timestampUs -= previousPausedDurationUs;
-        ALOGV("time stamp: %lld, previous paused duration: %lld",
+        ALOGV("time stamp: %" PRId64 ", previous paused duration: %" PRId64,
             timestampUs, previousPausedDurationUs);
         if (timestampUs > maxTimestampUs) {
             maxTimestampUs = timestampUs;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 9ab1417..2a583d0 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -2035,6 +2035,10 @@
             err = setupAVCEncoderParameters(msg);
             break;
 
+        case OMX_VIDEO_CodingHEVC:
+            err = setupHEVCEncoderParameters(msg);
+            break;
+
         case OMX_VIDEO_CodingVP8:
         case OMX_VIDEO_CodingVP9:
             err = setupVPXEncoderParameters(msg);
@@ -2371,6 +2375,62 @@
     return configureBitrate(bitrate, bitrateMode);
 }
 
+status_t ACodec::setupHEVCEncoderParameters(const sp<AMessage> &msg) {
+    int32_t bitrate, iFrameInterval;
+    if (!msg->findInt32("bitrate", &bitrate)
+            || !msg->findInt32("i-frame-interval", &iFrameInterval)) {
+        return INVALID_OPERATION;
+    }
+
+    OMX_VIDEO_CONTROLRATETYPE bitrateMode = getBitrateMode(msg);
+
+    float frameRate;
+    if (!msg->findFloat("frame-rate", &frameRate)) {
+        int32_t tmp;
+        if (!msg->findInt32("frame-rate", &tmp)) {
+            return INVALID_OPERATION;
+        }
+        frameRate = (float)tmp;
+    }
+
+    OMX_VIDEO_PARAM_HEVCTYPE hevcType;
+    InitOMXParams(&hevcType);
+    hevcType.nPortIndex = kPortIndexOutput;
+
+    status_t err = OK;
+    err = mOMX->getParameter(
+            mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+    if (err != OK) {
+        return err;
+    }
+
+    int32_t profile;
+    if (msg->findInt32("profile", &profile)) {
+        int32_t level;
+        if (!msg->findInt32("level", &level)) {
+            return INVALID_OPERATION;
+        }
+
+        err = verifySupportForProfileAndLevel(profile, level);
+        if (err != OK) {
+            return err;
+        }
+
+        hevcType.eProfile = static_cast<OMX_VIDEO_HEVCPROFILETYPE>(profile);
+        hevcType.eLevel = static_cast<OMX_VIDEO_HEVCLEVELTYPE>(level);
+    }
+
+    // TODO: Need OMX structure definition for setting iFrameInterval
+
+    err = mOMX->setParameter(
+            mNode, (OMX_INDEXTYPE)OMX_IndexParamVideoHevc, &hevcType, sizeof(hevcType));
+    if (err != OK) {
+        return err;
+    }
+
+    return configureBitrate(bitrate, bitrateMode);
+}
+
 status_t ACodec::setupVPXEncoderParameters(const sp<AMessage> &msg) {
     int32_t bitrate;
     int32_t iFrameInterval = 0;
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index 653ca36..9aa7d95 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -14,6 +14,12 @@
  * limitations under the License.
  */
 
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AMRWriter.h>
 #include <media/stagefright/MediaBuffer.h>
@@ -22,10 +28,6 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/mediarecorder.h>
-#include <sys/prctl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
 
 namespace android {
 
@@ -235,7 +237,7 @@
             mResumed = false;
         }
         timestampUs -= previousPausedDurationUs;
-        ALOGV("time stamp: %lld, previous paused duration: %lld",
+        ALOGV("time stamp: %" PRId64 ", previous paused duration: %" PRId64,
                 timestampUs, previousPausedDurationUs);
         if (timestampUs > maxTimestampUs) {
             maxTimestampUs = timestampUs;
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 2669849..fdac8fc 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <inttypes.h>
+
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioPlayer"
 #include <utils/Log.h>
@@ -566,12 +568,12 @@
                             int64_t timeToCompletionUs =
                                 (1000000ll * numFramesPendingPlayout) / mSampleRate;
 
-                            ALOGV("total number of frames played: %lld (%lld us)",
+                            ALOGV("total number of frames played: %" PRId64 " (%lld us)",
                                     (mNumFramesPlayed + numAdditionalFrames),
                                     1000000ll * (mNumFramesPlayed + numAdditionalFrames)
                                         / mSampleRate);
 
-                            ALOGV("%d frames left to play, %lld us (%.2f secs)",
+                            ALOGV("%d frames left to play, %" PRId64 " us (%.2f secs)",
                                  numFramesPendingPlayout,
                                  timeToCompletionUs, timeToCompletionUs / 1E6);
 
@@ -628,7 +630,7 @@
                 mPositionTimeRealUs =
                     ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
                         / mSampleRate;
-                ALOGV("buffer->size() = %d, "
+                ALOGV("buffer->size() = %zu, "
                      "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
                      mInputBuffer->range_length(),
                      mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
@@ -746,7 +748,7 @@
 
     // HAL position is relative to the first buffer we sent at mStartPosUs
     const int64_t renderedDuration = mStartPosUs + playedUs;
-    ALOGV("getOutputPlayPositionUs_l %lld", renderedDuration);
+    ALOGV("getOutputPlayPositionUs_l %" PRId64, renderedDuration);
     return renderedDuration;
 }
 
@@ -758,7 +760,7 @@
             return mSeekTimeUs;
         }
         mPositionTimeRealUs = getOutputPlayPositionUs_l();
-        ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %lld",
+        ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %" PRId64,
               mPositionTimeRealUs);
         return mPositionTimeRealUs;
     }
@@ -796,7 +798,7 @@
 status_t AudioPlayer::seekTo(int64_t time_us) {
     Mutex::Autolock autoLock(mLock);
 
-    ALOGV("seekTo( %lld )", time_us);
+    ALOGV("seekTo( %" PRId64 " )", time_us);
 
     mSeeking = true;
     mPositionTimeRealUs = mPositionTimeMediaUs = -1;
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index d0e0e8e..d9aed01 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -14,6 +14,9 @@
  * limitations under the License.
  */
 
+#include <inttypes.h>
+#include <stdlib.h>
+
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioSource"
 #include <utils/Log.h>
@@ -26,7 +29,6 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <cutils/properties.h>
-#include <stdlib.h>
 
 namespace android {
 
@@ -136,7 +138,7 @@
 }
 
 void AudioSource::waitOutstandingEncodingFrames_l() {
-    ALOGV("waitOutstandingEncodingFrames_l: %lld", mNumClientOwnedBuffers);
+    ALOGV("waitOutstandingEncodingFrames_l: %" PRId64, mNumClientOwnedBuffers);
     while (mNumClientOwnedBuffers > 0) {
         mFrameEncodingCompletionCondition.wait(mLock);
     }
@@ -269,7 +271,7 @@
 status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
     int64_t timeUs = systemTime() / 1000ll;
 
-    ALOGV("dataCallbackTimestamp: %lld us", timeUs);
+    ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
     Mutex::Autolock autoLock(mLock);
     if (!mStarted) {
         ALOGW("Spurious callback from AudioRecord. Drop the audio data.");
@@ -279,7 +281,7 @@
     // Drop retrieved and previously lost audio data.
     if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
         (void) mRecord->getInputFramesLost();
-        ALOGV("Drop audio data at %lld/%lld us", timeUs, mStartTimeUs);
+        ALOGV("Drop audio data at %" PRId64 "/%" PRId64 " us", timeUs, mStartTimeUs);
         return OK;
     }
 
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index d679be1..63799e1 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -19,7 +19,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AwesomePlayer"
 #define ATRACE_TAG ATRACE_TAG_VIDEO
+
 #include <inttypes.h>
+
 #include <utils/Log.h>
 #include <utils/Trace.h>
 
@@ -408,6 +410,13 @@
 
         totalBitRate += bitrate;
     }
+    sp<MetaData> fileMeta = mExtractor->getMetaData();
+    if (fileMeta != NULL) {
+        int64_t duration;
+        if (fileMeta->findInt64(kKeyDuration, &duration)) {
+            mDurationUs = duration;
+        }
+    }
 
     mBitrate = totalBitRate;
 
@@ -1708,7 +1717,7 @@
     }
 
     if (mAudioPlayer != NULL) {
-        ALOGV("seeking audio to %lld us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
+        ALOGV("seeking audio to %" PRId64 " us (%.2f secs).", videoTimeUs, videoTimeUs / 1E6);
 
         // If we don't have a video time, seek audio to the originally
         // requested seek time instead.
@@ -1772,7 +1781,7 @@
     if (!mVideoBuffer) {
         MediaSource::ReadOptions options;
         if (mSeeking != NO_SEEK) {
-            ALOGV("seeking to %lld us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
+            ALOGV("seeking to %" PRId64 " us (%.2f secs)", mSeekTimeUs, mSeekTimeUs / 1E6);
 
             options.setSeekTo(
                     mSeekTimeUs,
@@ -1842,7 +1851,7 @@
 
     if (mSeeking == SEEK_VIDEO_ONLY) {
         if (mSeekTimeUs > timeUs) {
-            ALOGI("XXX mSeekTimeUs = %lld us, timeUs = %lld us",
+            ALOGI("XXX mSeekTimeUs = %" PRId64 " us, timeUs = %" PRId64 " us",
                  mSeekTimeUs, timeUs);
         }
     }
@@ -1940,13 +1949,13 @@
 
         if (latenessUs > 40000) {
             // We're more than 40ms late.
-            ALOGV("we're late by %lld us (%.2f secs)",
+            ALOGV("we're late by %" PRId64 " us (%.2f secs)",
                  latenessUs, latenessUs / 1E6);
 
             if (!(mFlags & SLOW_DECODER_HACK)
                     || mSinceLastDropped > FRAME_DROP_FREQ)
             {
-                ALOGV("we're late by %lld us (%.2f secs) dropping "
+                ALOGV("we're late by %" PRId64 " us (%.2f secs) dropping "
                      "one after %d frames",
                      latenessUs, latenessUs / 1E6, mSinceLastDropped);
 
@@ -2308,12 +2317,12 @@
 
                     if (finalStatus != OK
                             || (metaDataSize >= 0
-                                && cachedDataRemaining >= metaDataSize)
+                                && (off64_t)cachedDataRemaining >= metaDataSize)
                             || (mFlags & PREPARE_CANCELLED)) {
                         break;
                     }
 
-                    ALOGV("now cached %d bytes of data", cachedDataRemaining);
+                    ALOGV("now cached %zu bytes of data", cachedDataRemaining);
 
                     if (metaDataSize < 0
                             && cachedDataRemaining >= kMinBytesForSniffing) {
@@ -2692,7 +2701,7 @@
 
 status_t AwesomePlayer::selectTrack(size_t trackIndex, bool select) {
     ATRACE_CALL();
-    ALOGV("selectTrack: trackIndex = %d and select=%d", trackIndex, select);
+    ALOGV("selectTrack: trackIndex = %zu and select=%d", trackIndex, select);
     Mutex::Autolock autoLock(mLock);
     size_t trackCount = mExtractor->countTracks();
     if (mTextDriver != NULL) {
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index b31e9e8..2b50763 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <inttypes.h>
+
 //#define LOG_NDEBUG 0
 #define LOG_TAG "CameraSource"
 #include <utils/Log.h>
@@ -77,7 +79,7 @@
 
 void CameraSourceListener::postData(int32_t msgType, const sp<IMemory> &dataPtr,
                                     camera_frame_metadata_t * /* metadata */) {
-    ALOGV("postData(%d, ptr:%p, size:%d)",
+    ALOGV("postData(%d, ptr:%p, size:%zu)",
          msgType, dataPtr->pointer(), dataPtr->size());
 
     sp<CameraSource> source = mSource.promote();
@@ -711,7 +713,7 @@
         if (NO_ERROR !=
             mFrameCompleteCondition.waitRelative(mLock,
                     mTimeBetweenFrameCaptureUs * 1000LL + CAMERA_SOURCE_TIMEOUT_NS)) {
-            ALOGW("Timed out waiting for outstanding frames being encoded: %d",
+            ALOGW("Timed out waiting for outstanding frames being encoded: %zu",
                 mFramesBeingEncoded.size());
         }
     }
@@ -722,7 +724,7 @@
     }
 
     if (mCollectStats) {
-        ALOGI("Frames received/encoded/dropped: %d/%d/%d in %lld us",
+        ALOGI("Frames received/encoded/dropped: %d/%d/%d in %" PRId64 " us",
                 mNumFramesReceived, mNumFramesEncoded, mNumFramesDropped,
                 mLastFrameTimestampUs - mFirstFrameTimeUs);
     }
@@ -809,7 +811,7 @@
                     ALOGW("camera recording proxy is gone");
                     return ERROR_END_OF_STREAM;
                 }
-                ALOGW("Timed out waiting for incoming camera video frames: %lld us",
+                ALOGW("Timed out waiting for incoming camera video frames: %" PRId64 " us",
                     mLastFrameTimestampUs);
             }
         }
@@ -832,10 +834,10 @@
 
 void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
         int32_t msgType, const sp<IMemory> &data) {
-    ALOGV("dataCallbackTimestamp: timestamp %lld us", timestampUs);
+    ALOGV("dataCallbackTimestamp: timestamp %" PRId64 " us", timestampUs);
     Mutex::Autolock autoLock(mLock);
     if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
-        ALOGV("Drop frame at %lld/%lld us", timestampUs, mStartTimeUs);
+        ALOGV("Drop frame at %" PRId64 "/%" PRId64 " us", timestampUs, mStartTimeUs);
         releaseOneRecordingFrame(data);
         return;
     }
@@ -874,7 +876,7 @@
     mFramesReceived.push_back(data);
     int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
     mFrameTimes.push_back(timeUs);
-    ALOGV("initial delay: %lld, current time stamp: %lld",
+    ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64,
         mStartTimeUs, timeUs);
     mFrameAvailableCondition.signal();
 }
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 15ba967..0acd9d0 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <inttypes.h>
+
 //#define LOG_NDEBUG 0
 #define LOG_TAG "CameraSourceTimeLapse"
 
@@ -79,7 +81,7 @@
       mSkipCurrentFrame(false) {
 
     mTimeBetweenFrameCaptureUs = timeBetweenFrameCaptureUs;
-    ALOGD("starting time lapse mode: %lld us",
+    ALOGD("starting time lapse mode: %" PRId64 " us",
         mTimeBetweenFrameCaptureUs);
 
     mVideoWidth = videoSize.width;
@@ -266,7 +268,7 @@
 
             // Really make sure that this video recording frame will not be dropped.
             if (*timestampUs < mStartTimeUs) {
-                ALOGI("set timestampUs to start time stamp %lld us", mStartTimeUs);
+                ALOGI("set timestampUs to start time stamp %" PRId64 " us", mStartTimeUs);
                 *timestampUs = mStartTimeUs;
             }
             return false;
diff --git a/media/libstagefright/DataURISource.cpp b/media/libstagefright/DataURISource.cpp
index 377bc85..2c39314 100644
--- a/media/libstagefright/DataURISource.cpp
+++ b/media/libstagefright/DataURISource.cpp
@@ -85,7 +85,7 @@
 }
 
 ssize_t DataURISource::readAt(off64_t offset, void *data, size_t size) {
-    if (offset >= mBuffer->size()) {
+    if ((offset < 0) || (offset >= (off64_t)mBuffer->size())) {
         return 0;
     }
 
diff --git a/media/libstagefright/ESDS.cpp b/media/libstagefright/ESDS.cpp
index 4a0c35c..427bf7b 100644
--- a/media/libstagefright/ESDS.cpp
+++ b/media/libstagefright/ESDS.cpp
@@ -91,7 +91,7 @@
     }
     while (more);
 
-    ALOGV("tag=0x%02x data_size=%d", *tag, *data_size);
+    ALOGV("tag=0x%02x data_size=%zu", *tag, *data_size);
 
     if (*data_size > size) {
         return ERROR_MALFORMED;
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index 78c12e1..9856f92 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -682,7 +682,7 @@
                     break;
                 }
 
-                ALOGV("writing access unit at time %.2f secs (index %d)",
+                ALOGV("writing access unit at time %.2f secs (index %zu)",
                      minTimeUs / 1E6, minIndex);
 
                 source = mSources.editItemAt(minIndex);
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 297f4fc..207acc8 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -16,17 +16,19 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MPEG4Extractor"
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
 #include <utils/Log.h>
 
 #include "include/MPEG4Extractor.h"
 #include "include/SampleTable.h"
 #include "include/ESDS.h"
 
-#include <ctype.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -51,6 +53,7 @@
                 int32_t timeScale,
                 const sp<SampleTable> &sampleTable,
                 Vector<SidxEntry> &sidx,
+                const Trex *trex,
                 off64_t firstMoofOffset);
 
     virtual status_t start(MetaData *params = NULL);
@@ -74,6 +77,7 @@
     uint32_t mCurrentSampleIndex;
     uint32_t mCurrentFragmentIndex;
     Vector<SidxEntry> &mSegments;
+    const Trex *mTrex;
     off64_t mFirstMoofOffset;
     off64_t mCurrentMoofOffset;
     off64_t mNextMoofOffset;
@@ -141,6 +145,7 @@
         off64_t offset;
         size_t size;
         uint32_t duration;
+        int32_t compositionOffset;
         uint8_t iv[16];
         Vector<size_t> clearsizes;
         Vector<size_t> encryptedsizes;
@@ -343,8 +348,7 @@
 }
 
 MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source)
-    : mSidxDuration(0),
-      mMoofOffset(0),
+    : mMoofOffset(0),
       mDataSource(source),
       mInitCheck(NO_INIT),
       mHasVideo(false),
@@ -369,7 +373,7 @@
     SINF *sinf = mFirstSINF;
     while (sinf) {
         SINF *next = sinf->next;
-        delete sinf->IPMPData;
+        delete[] sinf->IPMPData;
         delete sinf;
         sinf = next;
     }
@@ -409,7 +413,7 @@
         track = track->next;
     }
 
-    ALOGV("MPEG4Extractor::countTracks: %d tracks", n);
+    ALOGV("MPEG4Extractor::countTracks: %zu tracks", n);
     return n;
 }
 
@@ -694,7 +698,10 @@
                 return ERROR_MALFORMED;
             }
             sinf->len = dataLen - 3;
-            sinf->IPMPData = new char[sinf->len];
+            sinf->IPMPData = new (std::nothrow) char[sinf->len];
+            if (sinf->IPMPData == NULL) {
+                return ERROR_MALFORMED;
+            }
             data_offset += 2;
 
             if (mDataSource->readAt(data_offset, sinf->IPMPData, sinf->len) < sinf->len) {
@@ -787,7 +794,7 @@
         }
     } else if (chunk_size < 8) {
         // The smallest valid chunk is 8 bytes long.
-        ALOGE("invalid chunk size: %d", int(chunk_size));
+        ALOGE("invalid chunk size: %" PRIu64, chunk_size);
         return ERROR_MALFORMED;
     }
 
@@ -798,7 +805,7 @@
 #if 0
     static const char kWhitespace[] = "                                        ";
     const char *indent = &kWhitespace[sizeof(kWhitespace) - 1 - 2 * depth];
-    printf("%sfound chunk '%s' of size %lld\n", indent, chunk, chunk_size);
+    printf("%sfound chunk '%s' of size %" PRIu64 "\n", indent, chunk, chunk_size);
 
     char buffer[256];
     size_t n = chunk_size;
@@ -854,7 +861,7 @@
         case FOURCC('e', 'd', 't', 's'):
         {
             if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
-                ALOGV("sampleTable chunk is %d bytes long.", (size_t)chunk_size);
+                ALOGV("sampleTable chunk is %" PRIu64 " bytes long.", chunk_size);
 
                 if (mDataSource->flags()
                         & (DataSource::kWantsPrefetching
@@ -1112,7 +1119,10 @@
                 return ERROR_MALFORMED;
             }
 
-            pssh.data = new uint8_t[pssh.datalen];
+            pssh.data = new (std::nothrow) uint8_t[pssh.datalen];
+            if (pssh.data == NULL) {
+                return ERROR_MALFORMED;
+            }
             ALOGV("allocated pssh @ %p", pssh.data);
             ssize_t requested = (ssize_t) pssh.datalen;
             if (mDataSource->readAt(data_offset + 24, pssh.data, requested) < requested) {
@@ -1157,6 +1167,8 @@
 
             mLastTrack->timescale = ntohl(timescale);
 
+            // 14496-12 says all ones means indeterminate, but some files seem to use
+            // 0 instead. We treat both the same.
             int64_t duration = 0;
             if (version == 1) {
                 if (mDataSource->readAt(
@@ -1164,7 +1176,9 @@
                         < (ssize_t)sizeof(duration)) {
                     return ERROR_IO;
                 }
-                duration = ntoh64(duration);
+                if (duration != -1) {
+                    duration = ntoh64(duration);
+                }
             } else {
                 uint32_t duration32;
                 if (mDataSource->readAt(
@@ -1172,13 +1186,14 @@
                         < (ssize_t)sizeof(duration32)) {
                     return ERROR_IO;
                 }
-                // ffmpeg sets duration to -1, which is incorrect.
                 if (duration32 != 0xffffffff) {
                     duration = ntohl(duration32);
                 }
             }
-            mLastTrack->meta->setInt64(
-                    kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
+            if (duration != 0) {
+                mLastTrack->meta->setInt64(
+                        kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
+            }
 
             uint8_t lang[2];
             off64_t lang_offset;
@@ -1718,11 +1733,11 @@
         {
             *offset += chunk_size;
 
-            if (chunk_data_size < 24) {
+            if (chunk_data_size < 32) {
                 return ERROR_MALFORMED;
             }
 
-            uint8_t header[24];
+            uint8_t header[32];
             if (mDataSource->readAt(
                         data_offset, header, sizeof(header))
                     < (ssize_t)sizeof(header)) {
@@ -1730,14 +1745,27 @@
             }
 
             uint64_t creationTime;
+            uint64_t duration = 0;
             if (header[0] == 1) {
                 creationTime = U64_AT(&header[4]);
                 mHeaderTimescale = U32_AT(&header[20]);
+                duration = U64_AT(&header[24]);
+                if (duration == 0xffffffffffffffff) {
+                    duration = 0;
+                }
             } else if (header[0] != 0) {
                 return ERROR_MALFORMED;
             } else {
                 creationTime = U32_AT(&header[4]);
                 mHeaderTimescale = U32_AT(&header[12]);
+                uint32_t d32 = U32_AT(&header[16]);
+                if (d32 == 0xffffffff) {
+                    d32 = 0;
+                }
+                duration = d32;
+            }
+            if (duration != 0) {
+                mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
             String8 s;
@@ -1748,6 +1776,50 @@
             break;
         }
 
+        case FOURCC('m', 'e', 'h', 'd'):
+        {
+            *offset += chunk_size;
+
+            if (chunk_data_size < 8) {
+                return ERROR_MALFORMED;
+            }
+
+            uint8_t flags[4];
+            if (mDataSource->readAt(
+                        data_offset, flags, sizeof(flags))
+                    < (ssize_t)sizeof(flags)) {
+                return ERROR_IO;
+            }
+
+            uint64_t duration = 0;
+            if (flags[0] == 1) {
+                // 64 bit
+                if (chunk_data_size < 12) {
+                    return ERROR_MALFORMED;
+                }
+                mDataSource->getUInt64(data_offset + 4, &duration);
+                if (duration == 0xffffffffffffffff) {
+                    duration = 0;
+                }
+            } else if (flags[0] == 0) {
+                // 32 bit
+                uint32_t d32;
+                mDataSource->getUInt32(data_offset + 4, &d32);
+                if (d32 == 0xffffffff) {
+                    d32 = 0;
+                }
+                duration = d32;
+            } else {
+                return ERROR_MALFORMED;
+            }
+
+            if (duration != 0) {
+                mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
+            }
+
+            break;
+        }
+
         case FOURCC('m', 'd', 'a', 't'):
         {
             ALOGV("mdat chunk, drm: %d", mIsDrm);
@@ -1784,6 +1856,26 @@
             break;
         }
 
+        case FOURCC('t', 'r', 'e', 'x'):
+        {
+            *offset += chunk_size;
+
+            if (chunk_data_size < 24) {
+                return ERROR_IO;
+            }
+            uint32_t duration;
+            Trex trex;
+            if (!mDataSource->getUInt32(data_offset + 4, &trex.track_ID) ||
+                !mDataSource->getUInt32(data_offset + 8, &trex.default_sample_description_index) ||
+                !mDataSource->getUInt32(data_offset + 12, &trex.default_sample_duration) ||
+                !mDataSource->getUInt32(data_offset + 16, &trex.default_sample_size) ||
+                !mDataSource->getUInt32(data_offset + 20, &trex.default_sample_flags)) {
+                return ERROR_IO;
+            }
+            mTrex.add(trex);
+            break;
+        }
+
         case FOURCC('t', 'x', '3', 'g'):
         {
             uint32_t type;
@@ -1794,7 +1886,10 @@
                 size = 0;
             }
 
-            uint8_t *buffer = new uint8_t[size + chunk_size];
+            uint8_t *buffer = new (std::nothrow) uint8_t[size + chunk_size];
+            if (buffer == NULL) {
+                return ERROR_MALFORMED;
+            }
 
             if (size > 0) {
                 memcpy(buffer, data, size);
@@ -1959,7 +2054,7 @@
         offset += 16;
         size -= 16;
     }
-    ALOGV("sidx pres/off: %Ld/%Ld", earliestPresentationTime, firstOffset);
+    ALOGV("sidx pres/off: %" PRIu64 "/%" PRIu64, earliestPresentationTime, firstOffset);
 
     if (size < 4) {
         return -EINVAL;
@@ -2005,12 +2100,11 @@
         mSidxEntries.add(se);
     }
 
-    mSidxDuration = total_duration * 1000000 / timeScale;
-    ALOGV("duration: %lld", mSidxDuration);
+    uint64_t sidxDuration = total_duration * 1000000 / timeScale;
 
     int64_t metaDuration;
     if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
-        mLastTrack->meta->setInt64(kKeyDuration, mSidxDuration);
+        mLastTrack->meta->setInt64(kKeyDuration, sidxDuration);
     }
     return OK;
 }
@@ -2111,7 +2205,10 @@
         return ERROR_MALFORMED;
     }
 
-    uint8_t *buffer = new uint8_t[size + 1];
+    uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+    if (buffer == NULL) {
+        return ERROR_MALFORMED;
+    }
     if (mDataSource->readAt(
                 offset, buffer, size) != (ssize_t)size) {
         delete[] buffer;
@@ -2298,7 +2395,10 @@
         return ERROR_MALFORMED;
     }
 
-    uint8_t *buffer = new uint8_t[size];
+    uint8_t *buffer = new (std::nothrow) uint8_t[size];
+    if (buffer == NULL) {
+        return ERROR_MALFORMED;
+    }
     if (mDataSource->readAt(
                 offset, buffer, size) != (ssize_t)size) {
         delete[] buffer;
@@ -2477,11 +2577,24 @@
         return NULL;
     }
 
-    ALOGV("getTrack called, pssh: %d", mPssh.size());
+
+    Trex *trex = NULL;
+    int32_t trackId;
+    if (track->meta->findInt32(kKeyTrackID, &trackId)) {
+        for (size_t i = 0; i < mTrex.size(); i++) {
+            Trex *t = &mTrex.editItemAt(index);
+            if (t->track_ID == (uint32_t) trackId) {
+                trex = t;
+                break;
+            }
+        }
+    }
+
+    ALOGV("getTrack called, pssh: %zu", mPssh.size());
 
     return new MPEG4Source(
             track->meta, mDataSource, track->timescale, track->sampleTable,
-            mSidxEntries, mMoofOffset);
+            mSidxEntries, trex, mMoofOffset);
 }
 
 // static
@@ -2811,6 +2924,7 @@
         int32_t timeScale,
         const sp<SampleTable> &sampleTable,
         Vector<SidxEntry> &sidx,
+        const Trex *trex,
         off64_t firstMoofOffset)
     : mFormat(format),
       mDataSource(dataSource),
@@ -2819,6 +2933,7 @@
       mCurrentSampleIndex(0),
       mCurrentFragmentIndex(0),
       mSegments(sidx),
+      mTrex(trex),
       mFirstMoofOffset(firstMoofOffset),
       mCurrentMoofOffset(firstMoofOffset),
       mCurrentTime(0),
@@ -2835,6 +2950,8 @@
       mWantsNALFragments(false),
       mSrcBuffer(NULL) {
 
+    memset(&mTrackFragmentHeaderInfo, 0, sizeof(mTrackFragmentHeaderInfo));
+
     mFormat->findInt32(kKeyCryptoMode, &mCryptoMode);
     mDefaultIVSize = 0;
     mFormat->findInt32(kKeyCryptoDefaultIVSize, &mDefaultIVSize);
@@ -2917,7 +3034,11 @@
 
     mGroup->add_buffer(new MediaBuffer(max_size));
 
-    mSrcBuffer = new uint8_t[max_size];
+    mSrcBuffer = new (std::nothrow) uint8_t[max_size];
+    if (mSrcBuffer == NULL) {
+        // file probably specified a bad max size
+        return ERROR_MALFORMED;
+    }
 
     mStarted = true;
 
@@ -3402,8 +3523,8 @@
     } else if (mTrackFragmentHeaderInfo.mFlags
             & TrackFragmentHeaderInfo::kDefaultSampleDurationPresent) {
         sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
-    } else {
-        sampleDuration = mTrackFragmentHeaderInfo.mDefaultSampleDuration;
+    } else if (mTrex) {
+        sampleDuration = mTrex->default_sample_duration;
     }
 
     if (flags & kSampleSizePresent) {
@@ -3430,7 +3551,7 @@
         sampleCtsOffset = 0;
     }
 
-    if (size < sampleCount * bytesPerSample) {
+    if (size < (off64_t)sampleCount * bytesPerSample) {
         return -EINVAL;
     }
 
@@ -3464,7 +3585,7 @@
             offset += 4;
         }
 
-        ALOGV("adding sample %d at offset 0x%08llx, size %u, duration %u, "
+        ALOGV("adding sample %d at offset 0x%08" PRIx64 ", size %u, duration %u, "
               " flags 0x%08x", i + 1,
                 dataOffset, sampleSize, sampleDuration,
                 (flags & kFirstSampleFlagsPresent) && i == 0
@@ -3472,6 +3593,7 @@
         tmp.offset = dataOffset;
         tmp.size = sampleSize;
         tmp.duration = sampleDuration;
+        tmp.compositionOffset = sampleCtsOffset;
         mCurrentSamples.add(tmp);
 
         dataOffset += sampleSize;
@@ -3874,7 +3996,7 @@
         const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
         offset = smpl->offset;
         size = smpl->size;
-        cts = mCurrentTime;
+        cts = mCurrentTime + smpl->compositionOffset;
         mCurrentTime += smpl->duration;
         isSyncSample = (mCurrentSampleIndex == 0); // XXX
 
@@ -4181,7 +4303,7 @@
 
         char chunkstring[5];
         MakeFourCCString(chunkType, chunkstring);
-        ALOGV("saw chunk type %s, size %lld @ %lld", chunkstring, chunkSize, offset);
+        ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld", chunkstring, chunkSize, offset);
         switch (chunkType) {
             case FOURCC('f', 't', 'y', 'p'):
             {
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 24e53b3..4b8440b 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -16,13 +16,17 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MPEG4Writer"
-#include <inttypes.h>
-#include <utils/Log.h>
 
 #include <arpa/inet.h>
-
+#include <fcntl.h>
+#include <inttypes.h>
 #include <pthread.h>
 #include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <utils/Log.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/MPEG4Writer.h>
@@ -34,10 +38,6 @@
 #include <media/stagefright/Utils.h>
 #include <media/mediarecorder.h>
 #include <cutils/properties.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
 
 #include "include/ESDS.h"
 
@@ -441,7 +441,7 @@
 
     // At most 2 tracks can be supported.
     if (mTracks.size() >= 2) {
-        ALOGE("Too many tracks (%d) to add", mTracks.size());
+        ALOGE("Too many tracks (%zu) to add", mTracks.size());
         return ERROR_UNSUPPORTED;
     }
 
@@ -555,8 +555,8 @@
         size = MAX_MOOV_BOX_SIZE;
     }
 
-    ALOGI("limits: %lld/%lld bytes/us, bit rate: %d bps and the estimated"
-         " moov size %lld bytes",
+    ALOGI("limits: %" PRId64 "/%" PRId64 " bytes/us, bit rate: %d bps and the"
+         " estimated moov size %" PRId64 " bytes",
          mMaxFileSizeLimitBytes, mMaxFileDurationLimitUs, bitRate, size);
     return factor * size;
 }
@@ -592,8 +592,8 @@
         // If file size is set to be larger than the 32 bit file
         // size limit, treat it as an error.
         if (mMaxFileSizeLimitBytes > kMax32BitFileSize) {
-            ALOGW("32-bit file size limit (%lld bytes) too big. "
-                 "It is changed to %lld bytes",
+            ALOGW("32-bit file size limit (%" PRId64 " bytes) too big. "
+                 "It is changed to %" PRId64 " bytes",
                 mMaxFileSizeLimitBytes, kMax32BitFileSize);
             mMaxFileSizeLimitBytes = kMax32BitFileSize;
         }
@@ -854,7 +854,7 @@
     }
 
     if (mTracks.size() > 1) {
-        ALOGD("Duration from tracks range is [%lld, %lld] us",
+        ALOGD("Duration from tracks range is [%" PRId64 ", %" PRId64 "] us",
             minDurationUs, maxDurationUs);
     }
 
@@ -1321,12 +1321,12 @@
 }
 
 void MPEG4Writer::setStartTimestampUs(int64_t timeUs) {
-    ALOGI("setStartTimestampUs: %lld", timeUs);
+    ALOGI("setStartTimestampUs: %" PRId64, timeUs);
     CHECK_GE(timeUs, 0ll);
     Mutex::Autolock autoLock(mLock);
     if (mStartTimestampUs < 0 || mStartTimestampUs > timeUs) {
         mStartTimestampUs = timeUs;
-        ALOGI("Earliest track starting time: %lld", mStartTimestampUs);
+        ALOGI("Earliest track starting time: %" PRId64, mStartTimestampUs);
     }
 }
 
@@ -1527,7 +1527,7 @@
     {
         int64_t timeUs;
         if (params && params->findInt64(kKeyTrackTimeStatus, &timeUs)) {
-            ALOGV("Receive request to track progress status for every %lld us", timeUs);
+            ALOGV("Receive request to track progress status for every %" PRId64 " us", timeUs);
             mTrackEveryTimeDurationUs = timeUs;
             mTrackingProgressStatus = true;
         }
@@ -1561,7 +1561,7 @@
 }
 
 void MPEG4Writer::writeChunkToFile(Chunk* chunk) {
-    ALOGV("writeChunkToFile: %lld from %s track",
+    ALOGV("writeChunkToFile: %" PRId64 " from %s track",
         chunk->mTimeStampUs, chunk->mTrack->isAudio()? "audio": "video");
 
     int32_t isFirstSample = true;
@@ -1737,7 +1737,7 @@
             startTimeOffsetUs = kInitialDelayTimeUs;
         }
         startTimeUs += startTimeOffsetUs;
-        ALOGI("Start time offset: %lld us", startTimeOffsetUs);
+        ALOGI("Start time offset: %" PRId64 " us", startTimeOffsetUs);
     }
 
     meta->setInt64(kKeyTime, startTimeUs);
@@ -1817,7 +1817,7 @@
 static const uint8_t *findNextStartCode(
         const uint8_t *data, size_t length) {
 
-    ALOGV("findNextStartCode: %p %d", data, length);
+    ALOGV("findNextStartCode: %p %zu", data, length);
 
     size_t bytesLeft = length;
     while (bytesLeft > 4  &&
@@ -2238,7 +2238,7 @@
             }
 
             timestampUs = decodingTimeUs;
-            ALOGV("decoding time: %lld and ctts offset time: %lld",
+            ALOGV("decoding time: %" PRId64 " and ctts offset time: %" PRId64,
                 timestampUs, cttsOffsetTimeUs);
 
             // Update ctts box table if necessary
@@ -2291,7 +2291,7 @@
             return ERROR_MALFORMED;
         }
 
-        ALOGV("%s media time stamp: %lld and previous paused duration %lld",
+        ALOGV("%s media time stamp: %" PRId64 " and previous paused duration %" PRId64,
                 trackName, timestampUs, previousPausedDurationUs);
         if (timestampUs > mTrackDurationUs) {
             mTrackDurationUs = timestampUs;
@@ -2306,7 +2306,7 @@
             ((timestampUs * mTimeScale + 500000LL) / 1000000LL -
                 (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL);
         if (currDurationTicks < 0ll) {
-            ALOGE("timestampUs %lld < lastTimestampUs %lld for %s track",
+            ALOGE("timestampUs %" PRId64 " < lastTimestampUs %" PRId64 " for %s track",
                 timestampUs, lastTimestampUs, trackName);
             copy->release();
             return UNKNOWN_ERROR;
@@ -2347,7 +2347,7 @@
             }
             previousSampleSize = sampleSize;
         }
-        ALOGV("%s timestampUs/lastTimestampUs: %lld/%lld",
+        ALOGV("%s timestampUs/lastTimestampUs: %" PRId64 "/%" PRId64,
                 trackName, timestampUs, lastTimestampUs);
         lastDurationUs = timestampUs - lastTimestampUs;
         lastDurationTicks = currDurationTicks;
@@ -2455,7 +2455,7 @@
     ALOGI("Received total/0-length (%d/%d) buffers and encoded %d frames. - %s",
             count, nZeroLengthFrames, mStszTableEntries->count(), trackName);
     if (mIsAudio) {
-        ALOGI("Audio track drift time: %lld us", mOwner->getDriftTimeUs());
+        ALOGI("Audio track drift time: %" PRId64 " us", mOwner->getDriftTimeUs());
     }
 
     if (err == ERROR_END_OF_STREAM) {
@@ -2538,11 +2538,11 @@
 }
 
 void MPEG4Writer::Track::trackProgressStatus(int64_t timeUs, status_t err) {
-    ALOGV("trackProgressStatus: %lld us", timeUs);
+    ALOGV("trackProgressStatus: %" PRId64 " us", timeUs);
 
     if (mTrackEveryTimeDurationUs > 0 &&
         timeUs - mPreviousTrackTimeUs >= mTrackEveryTimeDurationUs) {
-        ALOGV("Fire time tracking progress status at %lld us", timeUs);
+        ALOGV("Fire time tracking progress status at %" PRId64 " us", timeUs);
         mOwner->trackProgressStatus(mTrackId, timeUs - mPreviousTrackTimeUs, err);
         mPreviousTrackTimeUs = timeUs;
     }
@@ -2576,13 +2576,13 @@
 }
 
 void MPEG4Writer::setDriftTimeUs(int64_t driftTimeUs) {
-    ALOGV("setDriftTimeUs: %lld us", driftTimeUs);
+    ALOGV("setDriftTimeUs: %" PRId64 " us", driftTimeUs);
     Mutex::Autolock autolock(mLock);
     mDriftTimeUs = driftTimeUs;
 }
 
 int64_t MPEG4Writer::getDriftTimeUs() {
-    ALOGV("getDriftTimeUs: %lld us", mDriftTimeUs);
+    ALOGV("getDriftTimeUs: %" PRId64 " us", mDriftTimeUs);
     Mutex::Autolock autolock(mLock);
     return mDriftTimeUs;
 }
@@ -3038,7 +3038,7 @@
         return;
     }
 
-    ALOGV("ctts box has %d entries with range [%lld, %lld]",
+    ALOGV("ctts box has %d entries with range [%" PRId64 ", %" PRId64 "]",
             mCttsTableEntries->count(), mMinCttsOffsetTimeUs, mMaxCttsOffsetTimeUs);
 
     mOwner->beginBox("ctts");
diff --git a/media/libstagefright/MediaBuffer.cpp b/media/libstagefright/MediaBuffer.cpp
index 8af0880..1f80a47 100644
--- a/media/libstagefright/MediaBuffer.cpp
+++ b/media/libstagefright/MediaBuffer.cpp
@@ -134,7 +134,7 @@
 
 void MediaBuffer::set_range(size_t offset, size_t length) {
     if ((mGraphicBuffer == NULL) && (offset + length > mSize)) {
-        ALOGE("offset = %d, length = %d, mSize = %d", offset, length, mSize);
+        ALOGE("offset = %zu, length = %zu, mSize = %zu", offset, length, mSize);
     }
     CHECK((mGraphicBuffer != NULL) || (offset + length <= mSize));
 
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index b9c5904..14c8028 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1456,7 +1456,7 @@
         ++i;
     }
 
-    ALOGV("Found %u pieces of codec specific data.", mCSD.size());
+    ALOGV("Found %zu pieces of codec specific data.", mCSD.size());
 }
 
 status_t MediaCodec::queueCSDInputBuffer(size_t bufferIndex) {
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 924173c..9868ecf 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -17,6 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MediaCodecSource"
 #define DEBUG_DRIFT_TIME 0
+
+#include <inttypes.h>
+
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
@@ -677,7 +680,7 @@
                     }
                     mbuf->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs);
 
-                    ALOGV("[video] time %lld us (%.2f secs), dts/pts diff %lld",
+                    ALOGV("[video] time %" PRId64 " us (%.2f secs), dts/pts diff %" PRId64,
                             timeUs, timeUs / 1E6, decodingTimeUs - timeUs);
                 } else {
                     int64_t driftTimeUs = 0;
@@ -687,7 +690,7 @@
                     mDriftTimeQueue.erase(mDriftTimeQueue.begin());
                     mbuf->meta_data()->setInt64(kKeyDriftTime, driftTimeUs);
 #endif // DEBUG_DRIFT_TIME
-                    ALOGV("[audio] time %lld us (%.2f secs), drift %lld",
+                    ALOGV("[audio] time %" PRId64 " us (%.2f secs), drift %" PRId64,
                             timeUs, timeUs / 1E6, driftTimeUs);
                 }
                 mbuf->meta_data()->setInt64(kKeyTime, timeUs);
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index 90335ee..c7c6f34 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -176,7 +176,7 @@
     }
 
     if (trackIndex >= mTrackList.size()) {
-        ALOGE("WriteSampleData() get an invalid index %d", trackIndex);
+        ALOGE("WriteSampleData() get an invalid index %zu", trackIndex);
         return -EINVAL;
     }
 
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 61cf0ad..c1feff8 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <inttypes.h>
+
 //#define LOG_NDEBUG 0
 #define LOG_TAG "NuCachedSource2"
 #include <utils/Log.h>
@@ -135,7 +137,7 @@
 }
 
 void PageCache::copy(size_t from, void *data, size_t size) {
-    ALOGV("copy from %d size %d", from, size);
+    ALOGV("copy from %zu size %zu", from, size);
 
     if (size == 0) {
         return;
@@ -333,7 +335,7 @@
             mNumRetriesLeft = 0;
         }
 
-        ALOGE("source returned error %d, %d retries left", n, mNumRetriesLeft);
+        ALOGE("source returned error %zd, %d retries left", n, mNumRetriesLeft);
         mCache->releasePage(page);
     } else if (n == 0) {
         ALOGI("ERROR_END_OF_STREAM");
@@ -464,14 +466,14 @@
     size_t actualBytes = mCache->releaseFromStart(maxBytes);
     mCacheOffset += actualBytes;
 
-    ALOGI("restarting prefetcher, totalSize = %d", mCache->totalSize());
+    ALOGI("restarting prefetcher, totalSize = %zu", mCache->totalSize());
     mFetching = true;
 }
 
 ssize_t NuCachedSource2::readAt(off64_t offset, void *data, size_t size) {
     Mutex::Autolock autoSerializer(mSerializer);
 
-    ALOGV("readAt offset %lld, size %d", offset, size);
+    ALOGV("readAt offset %lld, size %zu", offset, size);
 
     Mutex::Autolock autoLock(mLock);
 
@@ -539,7 +541,7 @@
 ssize_t NuCachedSource2::readInternal(off64_t offset, void *data, size_t size) {
     CHECK_LE(size, (size_t)mHighwaterThresholdBytes);
 
-    ALOGV("readInternal offset %lld size %d", offset, size);
+    ALOGV("readInternal offset %lld size %zu", offset, size);
 
     Mutex::Autolock autoLock(mLock);
 
@@ -679,7 +681,7 @@
         mKeepAliveIntervalUs = kDefaultKeepAliveIntervalUs;
     }
 
-    ALOGV("lowwater = %d bytes, highwater = %d bytes, keepalive = %lld us",
+    ALOGV("lowwater = %zu bytes, highwater = %zu bytes, keepalive = %" PRId64 " us",
          mLowwaterThresholdBytes,
          mHighwaterThresholdBytes,
          mKeepAliveIntervalUs);
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 64f56e9..f24cf3a 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -389,7 +389,7 @@
                 info->mFinalResult = err;
 
                 if (info->mFinalResult != ERROR_END_OF_STREAM) {
-                    ALOGW("read on track %d failed with error %d",
+                    ALOGW("read on track %zu failed with error %d",
                           info->mTrackIndex, err);
                 }
 
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index f3eeb03..8c15929 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -338,7 +338,7 @@
 
     const TOCEntry &entry = mTableOfContents.itemAt(left);
 
-    ALOGV("seeking to entry %d / %d at offset %lld",
+    ALOGV("seeking to entry %zu / %zu at offset %lld",
          left, mTableOfContents.size(), entry.mPageOffset);
 
     return seekToOffset(entry.mPageOffset);
@@ -381,7 +381,7 @@
     ssize_t n;
     if ((n = mSource->readAt(offset, header, sizeof(header)))
             < (ssize_t)sizeof(header)) {
-        ALOGV("failed to read %zu bytes at offset 0x%016llx, got %d bytes",
+        ALOGV("failed to read %zu bytes at offset 0x%016llx, got %zd bytes",
              sizeof(header), offset, n);
 
         if (n < 0) {
@@ -505,7 +505,7 @@
                     packetSize);
 
             if (n < (ssize_t)packetSize) {
-                ALOGV("failed to read %zu bytes at 0x%016llx, got %d bytes",
+                ALOGV("failed to read %zu bytes at 0x%016llx, got %zd bytes",
                      packetSize, dataOffset, n);
                 return ERROR_IO;
             }
@@ -546,7 +546,7 @@
                 buffer = NULL;
             }
 
-            ALOGV("readPage returned %d", n);
+            ALOGV("readPage returned %zd", n);
 
             return n < 0 ? n : (status_t)ERROR_END_OF_STREAM;
         }
@@ -590,7 +590,7 @@
     if ((err = readNextPacket(&packet)) != OK) {
         return err;
     }
-    ALOGV("read packet of size %d\n", packet->range_length());
+    ALOGV("read packet of size %zu\n", packet->range_length());
     err = verifyHeader(packet, 1);
     packet->release();
     packet = NULL;
@@ -601,7 +601,7 @@
     if ((err = readNextPacket(&packet)) != OK) {
         return err;
     }
-    ALOGV("read packet of size %d\n", packet->range_length());
+    ALOGV("read packet of size %zu\n", packet->range_length());
     err = verifyHeader(packet, 3);
     packet->release();
     packet = NULL;
@@ -612,7 +612,7 @@
     if ((err = readNextPacket(&packet)) != OK) {
         return err;
     }
-    ALOGV("read packet of size %d\n", packet->range_length());
+    ALOGV("read packet of size %zu\n", packet->range_length());
     err = verifyHeader(packet, 5);
     packet->release();
     packet = NULL;
@@ -903,7 +903,7 @@
         return;
     }
 
-    ALOGV("got flac of size %d", flacSize);
+    ALOGV("got flac of size %zu", flacSize);
 
     uint32_t picType;
     uint32_t typeLen;
@@ -953,7 +953,7 @@
         goto exit;
     }
 
-    ALOGV("got image data, %d trailing bytes",
+    ALOGV("got image data, %zu trailing bytes",
          flacSize - 32 - typeLen - descLen - dataLen);
 
     fileMeta->setData(
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index af96225..8cc41e7 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -16,7 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "StagefrightMetadataRetriever"
+
 #include <inttypes.h>
+
 #include <utils/Log.h>
 
 #include "include/StagefrightMetadataRetriever.h"
@@ -87,7 +89,7 @@
         int fd, int64_t offset, int64_t length) {
     fd = dup(fd);
 
-    ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
+    ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
 
     mParsedMetaData = false;
     mMetaData.clear();
@@ -242,7 +244,7 @@
             const char *mime;
             CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
 
-            ALOGV("thumbNailTime = %lld us, timeUs = %lld us, mime = %s",
+            ALOGV("thumbNailTime = %" PRId64 " us, timeUs = %" PRId64 " us, mime = %s",
                  thumbNailTime, timeUs, mime);
         }
     }
@@ -325,7 +327,7 @@
 VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
         int64_t timeUs, int option) {
 
-    ALOGV("getFrameAtTime: %lld us option: %d", timeUs, option);
+    ALOGV("getFrameAtTime: %" PRId64 " us option: %d", timeUs, option);
 
     if (mExtractor.get() == NULL) {
         ALOGV("no extractor.");
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 62aea36..4e1c65c 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -16,6 +16,8 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "SurfaceMediaSource"
 
+#include <inttypes.h>
+
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/SurfaceMediaSource.h>
 #include <media/stagefright/MediaDefs.h>
@@ -179,7 +181,7 @@
 }
 
 status_t SurfaceMediaSource::setMaxAcquiredBufferCount(size_t count) {
-    ALOGV("setMaxAcquiredBufferCount(%d)", count);
+    ALOGV("setMaxAcquiredBufferCount(%zu)", count);
     Mutex::Autolock lock(mMutex);
 
     CHECK_GT(count, 1);
@@ -209,7 +211,7 @@
     mFrameAvailableCondition.signal();
 
     while (mNumPendingBuffers > 0) {
-        ALOGI("Still waiting for %d buffers to be returned.",
+        ALOGI("Still waiting for %zu buffers to be returned.",
                 mNumPendingBuffers);
 
 #if DEBUG_PENDING_BUFFERS
@@ -269,7 +271,7 @@
     memcpy(data, &type, 4);
     memcpy(data + 4, &bufferHandle, sizeof(buffer_handle_t));
 
-    ALOGV("handle = %p, , offset = %d, length = %d",
+    ALOGV("handle = %p, , offset = %zu, length = %zu",
             bufferHandle, (*buffer)->range_length(), (*buffer)->range_offset());
 }
 
@@ -363,7 +365,7 @@
     (*buffer)->setObserver(this);
     (*buffer)->add_ref();
     (*buffer)->meta_data()->setInt64(kKeyTime, mCurrentTimestamp / 1000);
-    ALOGV("Frames encoded = %d, timestamp = %lld, time diff = %lld",
+    ALOGV("Frames encoded = %d, timestamp = %" PRId64 ", time diff = %" PRId64,
             mNumFramesEncoded, mCurrentTimestamp / 1000,
             mCurrentTimestamp / 1000 - prevTimeStamp / 1000);
 
diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp
index 3d2eb1f..da50c56 100644
--- a/media/libstagefright/TimedEventQueue.cpp
+++ b/media/libstagefright/TimedEventQueue.cpp
@@ -17,7 +17,11 @@
 #undef __STRICT_ANSI__
 #define __STDINT_LIMITS
 #define __STDC_LIMIT_MACROS
+
+#include <inttypes.h>
 #include <stdint.h>
+#include <sys/prctl.h>
+#include <sys/time.h>
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "TimedEventQueue"
@@ -26,9 +30,6 @@
 
 #include "include/TimedEventQueue.h"
 
-#include <sys/prctl.h>
-#include <sys/time.h>
-
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooper.h>
 #include <binder/IServiceManager.h>
@@ -258,7 +259,7 @@
                 static int64_t kMaxTimeoutUs = 10000000ll;  // 10 secs
                 bool timeoutCapped = false;
                 if (delay_us > kMaxTimeoutUs) {
-                    ALOGW("delay_us exceeds max timeout: %lld us", delay_us);
+                    ALOGW("delay_us exceeds max timeout: %" PRId64 " us", delay_us);
 
                     // We'll never block for more than 10 secs, instead
                     // we will split up the full timeout into chunks of
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 7ff31a1..d53051e 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -335,7 +335,7 @@
                 // there can't be another param here, so use all the rest
                 i = csd0->size();
             }
-            ALOGV("block at %d, last was %d", i, lastparamoffset);
+            ALOGV("block at %zu, last was %d", i, lastparamoffset);
             if (lastparamoffset > 0) {
                 int size = i - lastparamoffset;
                 avcc[avccidx++] = size >> 8;
@@ -366,7 +366,7 @@
                 // there can't be another param here, so use all the rest
                 i = csd1->size();
             }
-            ALOGV("block at %d, last was %d", i, lastparamoffset);
+            ALOGV("block at %zu, last was %d", i, lastparamoffset);
             if (lastparamoffset > 0) {
                 int size = i - lastparamoffset;
                 avcc[avccidx++] = size >> 8;
diff --git a/media/libstagefright/VBRISeeker.cpp b/media/libstagefright/VBRISeeker.cpp
index af858b9..e988f6d 100644
--- a/media/libstagefright/VBRISeeker.cpp
+++ b/media/libstagefright/VBRISeeker.cpp
@@ -16,6 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "VBRISeeker"
+
+#include <inttypes.h>
+
 #include <utils/Log.h>
 
 #include "include/VBRISeeker.h"
@@ -75,7 +78,7 @@
     size_t entrySize = U16_AT(&vbriHeader[22]);
     size_t scale = U16_AT(&vbriHeader[20]);
 
-    ALOGV("%d entries, scale=%d, size_per_entry=%d",
+    ALOGV("%zu entries, scale=%zu, size_per_entry=%zu",
          numEntries,
          scale,
          entrySize);
@@ -119,7 +122,7 @@
 
         seeker->mSegments.push(numBytes);
 
-        ALOGV("entry #%d: %u offset 0x%016llx", i, numBytes, offset);
+        ALOGV("entry #%zu: %u offset 0x%016llx", i, numBytes, offset);
         offset += numBytes;
     }
 
@@ -160,7 +163,7 @@
         *pos += mSegments.itemAt(segmentIndex++);
     }
 
-    ALOGV("getOffsetForTime %lld us => 0x%016llx", *timeUs, *pos);
+    ALOGV("getOffsetForTime %" PRId64 " us => 0x%016llx", *timeUs, *pos);
 
     *timeUs = nowUs;
 
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index fe9058b..7124fd3 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -414,7 +414,7 @@
         } else {
             pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * (mBitsPerSample >> 3);
         }
-        if (pos > mSize) {
+        if (pos > (off64_t)mSize) {
             pos = mSize;
         }
         mCurrentPos = pos + mOffset;
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 64bf2b6..ab30865 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -65,10 +65,9 @@
       mInputBufferCount(0),
       mOutputBufferCount(0),
       mSignalledError(false),
+      mLastInHeader(NULL),
+      mCurrentInputTime(0),
       mOutputPortSettingsChange(NONE) {
-    for (unsigned int i = 0; i < kNumDelayBlocksMax; i++) {
-        mAnchorTimeUs[i] = 0;
-    }
     initPorts();
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
@@ -496,14 +495,11 @@
             } else {
                 mEndOfInput = false;
             }
-            if (inHeader->nOffset == 0) { // TODO: does nOffset != 0 happen?
-                mAnchorTimeUs[mInputBufferCount % kNumDelayBlocksMax] =
-                        inHeader->nTimeStamp;
-            }
 
             if (inHeader->nFilledLen == 0) {
                 inInfo->mOwnedByUs = false;
                 inQueue.erase(inQueue.begin());
+                mLastInHeader = NULL;
                 inInfo = NULL;
                 notifyEmptyBufferDone(inHeader);
                 inHeader = NULL;
@@ -568,6 +564,18 @@
                 INT prevSampleRate = mStreamInfo->sampleRate;
                 INT prevNumChannels = mStreamInfo->numChannels;
 
+                if (inHeader != mLastInHeader) {
+                    mLastInHeader = inHeader;
+                    mCurrentInputTime = inHeader->nTimeStamp;
+                } else {
+                    if (mStreamInfo->sampleRate) {
+                        mCurrentInputTime += mStreamInfo->aacSamplesPerFrame *
+                                1000000ll / mStreamInfo->sampleRate;
+                    } else {
+                        ALOGW("no sample rate yet");
+                    }
+                }
+                mAnchorTimes.add(mCurrentInputTime);
                 aacDecoder_Fill(mAACDecoder,
                                 inBuffer,
                                 inBufferLength,
@@ -671,6 +679,7 @@
                             inInfo->mOwnedByUs = false;
                             mInputBufferCount++;
                             inQueue.erase(inQueue.begin());
+                            mLastInHeader = NULL;
                             inInfo = NULL;
                             notifyEmptyBufferDone(inHeader);
                             inHeader = NULL;
@@ -687,11 +696,12 @@
                     inInfo->mOwnedByUs = false;
                     mInputBufferCount++;
                     inQueue.erase(inQueue.begin());
+                    mLastInHeader = NULL;
                     inInfo = NULL;
                     notifyEmptyBufferDone(inHeader);
                     inHeader = NULL;
                 } else {
-                    ALOGW("inHeader->nFilledLen = %d", inHeader->nFilledLen);
+                    ALOGV("inHeader->nFilledLen = %d", inHeader->nFilledLen);
                 }
             }
         }
@@ -779,8 +789,8 @@
                 outHeader->nFlags = 0;
             }
 
-            outHeader->nTimeStamp = mAnchorTimeUs[mOutputBufferCount
-                    % kNumDelayBlocksMax];
+            outHeader->nTimeStamp = mAnchorTimes.isEmpty() ? 0 : mAnchorTimes.itemAt(0);
+            mAnchorTimes.removeAt(0);
 
             mOutputBufferCount++;
             outInfo->mOwnedByUs = false;
@@ -820,8 +830,8 @@
                     outHeader->nFilledLen = 0;
                     outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
-                    outHeader->nTimeStamp = mAnchorTimeUs[mOutputBufferCount
-                            % kNumDelayBlocksMax];
+                    outHeader->nTimeStamp = mAnchorTimes.itemAt(0);
+                    mAnchorTimes.removeAt(0);
 
                     mOutputBufferCount++;
                     outInfo->mOwnedByUs = false;
@@ -842,6 +852,8 @@
         // depend on fragments from the last one decoded.
         // drain all existing data
         drainDecoder();
+        mAnchorTimes.clear();
+        mLastInHeader = NULL;
     } else {
         while (outputDelayRingBufferSamplesAvailable() > 0) {
             int32_t ns = outputDelayRingBufferGetSamples(0,
@@ -896,6 +908,8 @@
     mOutputDelayRingBufferReadPos = 0;
     mEndOfInput = false;
     mEndOfOutput = false;
+    mAnchorTimes.clear();
+    mLastInHeader = NULL;
 
     // To make the codec behave the same before and after a reset, we need to invalidate the
     // streaminfo struct. This does that:
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.h b/media/libstagefright/codecs/aacdec/SoftAAC2.h
index 5cde03a..865bd15 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.h
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.h
@@ -58,7 +58,9 @@
     size_t mInputBufferCount;
     size_t mOutputBufferCount;
     bool mSignalledError;
-    int64_t mAnchorTimeUs[kNumDelayBlocksMax];
+    OMX_BUFFERHEADERTYPE *mLastInHeader;
+    int64_t mCurrentInputTime;
+    Vector<int64_t> mAnchorTimes;
 
     CDrcPresModeWrapper mDrcWrap;
 
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index dc38ea8..cabd6bd 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -379,7 +379,7 @@
         }
         default:
         {
-            ALOGE("Wrong number of temporal layers %u", mTemporalLayers);
+            ALOGE("Wrong number of temporal layers %zu", mTemporalLayers);
             return UNKNOWN_ERROR;
         }
     }
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 326d85b..2af0998 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -896,6 +896,9 @@
                             ? ATSParser::DISCONTINUITY_FORMATCHANGE
                             : ATSParser::DISCONTINUITY_SEEK,
                         NULL /* extra */);
+
+                seekDiscontinuity = false;
+                explicitDiscontinuity = false;
             }
         }
 
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 7b4bc6d..1fe6fcf 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -39,6 +39,14 @@
     uint32_t mDurationUs;
 };
 
+struct Trex {
+    uint32_t track_ID;
+    uint32_t default_sample_description_index;
+    uint32_t default_sample_duration;
+    uint32_t default_sample_size;
+    uint32_t default_sample_flags;
+};
+
 class MPEG4Extractor : public MediaExtractor {
 public:
     // Extractor assumes ownership of "source".
@@ -74,11 +82,12 @@
     };
 
     Vector<SidxEntry> mSidxEntries;
-    uint64_t mSidxDuration;
     off64_t mMoofOffset;
 
     Vector<PsshInfo> mPssh;
 
+    Vector<Trex> mTrex;
+
     sp<DataSource> mDataSource;
     status_t mInitCheck;
     bool mHasVideo;
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 16f6c58..67e6d7b 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -114,7 +114,7 @@
 
 void GraphicBufferSource::omxExecuting() {
     Mutex::Autolock autoLock(mMutex);
-    ALOGV("--> executing; avail=%d, codec vec size=%zd",
+    ALOGV("--> executing; avail=%zu, codec vec size=%zd",
             mNumFramesAvailable, mCodecBuffers.size());
     CHECK(!mExecuting);
     mExecuting = true;
@@ -136,7 +136,7 @@
         }
     }
 
-    ALOGV("done loading initial frames, avail=%d", mNumFramesAvailable);
+    ALOGV("done loading initial frames, avail=%zu", mNumFramesAvailable);
 
     // If EOS has already been signaled, and there are no more frames to
     // submit, try to send EOS now as well.
@@ -188,7 +188,7 @@
         mLooper.clear();
     }
 
-    ALOGV("--> loaded; avail=%d eos=%d eosSent=%d",
+    ALOGV("--> loaded; avail=%zu eos=%d eosSent=%d",
             mNumFramesAvailable, mEndOfStream, mEndOfStreamSent);
 
     // Codec is no longer executing.  Discard all codec-related state.
@@ -291,7 +291,7 @@
     if (mNumFramesAvailable) {
         // Fill this codec buffer.
         CHECK(!mEndOfStreamSent);
-        ALOGV("buffer freed, %d frames avail (eos=%d)",
+        ALOGV("buffer freed, %zu frames avail (eos=%d)",
                 mNumFramesAvailable, mEndOfStream);
         fillCodecBuffer_l();
     } else if (mEndOfStream) {
@@ -320,7 +320,8 @@
         ssize_t index = mOriginalTimeUs.indexOfKey(header->nTimeStamp);
         if (index >= 0) {
             ALOGV("OUT timestamp: %lld -> %lld",
-                    header->nTimeStamp, mOriginalTimeUs[index]);
+                    static_cast<long long>(header->nTimeStamp),
+                    static_cast<long long>(mOriginalTimeUs[index]));
             header->nTimeStamp = mOriginalTimeUs[index];
             mOriginalTimeUs.removeItemsAt(index);
         } else {
@@ -331,7 +332,7 @@
         }
         if (mOriginalTimeUs.size() > BufferQueue::NUM_BUFFER_SLOTS) {
             // something terribly wrong must have happened, giving up...
-            ALOGE("mOriginalTimeUs has too many entries (%d)",
+            ALOGE("mOriginalTimeUs has too many entries (%zu)",
                     mOriginalTimeUs.size());
             mMaxTimestampGapUs = -1ll;
         }
@@ -388,12 +389,12 @@
     int cbi = findAvailableCodecBuffer_l();
     if (cbi < 0) {
         // No buffers available, bail.
-        ALOGV("fillCodecBuffer_l: no codec buffers, avail now %d",
+        ALOGV("fillCodecBuffer_l: no codec buffers, avail now %zu",
                 mNumFramesAvailable);
         return false;
     }
 
-    ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%d",
+    ALOGV("fillCodecBuffer_l: acquiring buffer, avail=%zu",
             mNumFramesAvailable);
     BufferQueue::BufferItem item;
     status_t err = mConsumer->acquireBuffer(&item, 0);
@@ -540,7 +541,7 @@
 
 status_t GraphicBufferSource::signalEndOfInputStream() {
     Mutex::Autolock autoLock(mMutex);
-    ALOGV("signalEndOfInputStream: exec=%d avail=%d eos=%d",
+    ALOGV("signalEndOfInputStream: exec=%d avail=%zu eos=%d",
             mExecuting, mNumFramesAvailable, mEndOfStream);
 
     if (mEndOfStream) {
@@ -580,7 +581,7 @@
                     / mTimePerCaptureUs;
             if (nFrames <= 0) {
                 // skip this frame as it's too close to previous capture
-                ALOGV("skipping frame, timeUs %lld", timeUs);
+                ALOGV("skipping frame, timeUs %lld", static_cast<long long>(timeUs));
                 return -1;
             }
             mPrevCaptureUs = mPrevCaptureUs + nFrames * mTimePerCaptureUs;
@@ -588,7 +589,9 @@
         }
 
         ALOGV("timeUs %lld, captureUs %lld, frameUs %lld",
-                timeUs, mPrevCaptureUs, mPrevFrameUs);
+                static_cast<long long>(timeUs),
+                static_cast<long long>(mPrevCaptureUs),
+                static_cast<long long>(mPrevFrameUs));
 
         return mPrevFrameUs;
     } else if (mMaxTimestampGapUs > 0ll) {
@@ -615,7 +618,9 @@
         mPrevOriginalTimeUs = originalTimeUs;
         mPrevModifiedTimeUs = timeUs;
         mOriginalTimeUs.add(timeUs, originalTimeUs);
-        ALOGV("IN  timestamp: %lld -> %lld", originalTimeUs, timeUs);
+        ALOGV("IN  timestamp: %lld -> %lld",
+            static_cast<long long>(originalTimeUs),
+            static_cast<long long>(timeUs));
     }
 
     return timeUs;
@@ -723,7 +728,7 @@
 void GraphicBufferSource::onFrameAvailable() {
     Mutex::Autolock autoLock(mMutex);
 
-    ALOGV("onFrameAvailable exec=%d avail=%d",
+    ALOGV("onFrameAvailable exec=%d avail=%zu",
             mExecuting, mNumFramesAvailable);
 
     if (mEndOfStream || mSuspended) {
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index 6b6d0ab..ae3cb33 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -91,7 +91,7 @@
     }
 
     if (err != OMX_ErrorNoMore) {
-        ALOGE("OMX plugin failed w/ error 0x%08x after registering %d "
+        ALOGE("OMX plugin failed w/ error 0x%08x after registering %zu "
              "components", err, mPluginByComponentName.size());
     }
 }
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index c4f87a0..e6e19e3 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -363,7 +363,7 @@
 }
 
 int MtpDataPacket::writeData(int fd, void* data, uint32_t length) {
-    allocate(length);
+    allocate(length + MTP_CONTAINER_HEADER_SIZE);
     memcpy(mBuffer + MTP_CONTAINER_HEADER_SIZE, data, length);
     length += MTP_CONTAINER_HEADER_SIZE;
     MtpPacket::putUInt32(MTP_CONTAINER_LENGTH_OFFSET, length);
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 157f2ce..aa43967 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -325,6 +325,14 @@
         mSendObjectHandle = kInvalidObjectHandle;
     }
 
+    int containertype = mRequest.getContainerType();
+    if (containertype != MTP_CONTAINER_TYPE_COMMAND) {
+        ALOGE("wrong container type %d", containertype);
+        return false;
+    }
+
+    ALOGV("got command %s (%x)", MtpDebug::getOperationCodeName(operation), operation);
+
     switch (operation) {
         case MTP_OPERATION_GET_DEVICE_INFO:
             response = doGetDeviceInfo();
@@ -415,7 +423,8 @@
             response = doEndEditObject();
             break;
         default:
-            ALOGE("got unsupported command %s", MtpDebug::getOperationCodeName(operation));
+            ALOGE("got unsupported command %s (%x)",
+                    MtpDebug::getOperationCodeName(operation), operation);
             response = MTP_RESPONSE_OPERATION_NOT_SUPPORTED;
             break;
     }
@@ -793,7 +802,7 @@
     int result = mDatabase->getObjectFilePath(handle, pathBuf, fileLength, format);
     if (result != MTP_RESPONSE_OK)
         return result;
-    if (offset + length > fileLength)
+    if (offset + length > (uint64_t)fileLength)
         length = fileLength - offset;
 
     const char* filePath = (const char *)pathBuf;
@@ -950,22 +959,28 @@
     fchmod(mfr.fd, mFilePermission);
     umask(mask);
 
-    if (initialData > 0)
+    if (initialData > 0) {
         ret = write(mfr.fd, mData.getData(), initialData);
+    }
 
-    if (mSendObjectFileSize - initialData > 0) {
-        mfr.offset = initialData;
-        if (mSendObjectFileSize == 0xFFFFFFFF) {
-            // tell driver to read until it receives a short packet
-            mfr.length = 0xFFFFFFFF;
-        } else {
-            mfr.length = mSendObjectFileSize - initialData;
+    if (ret < 0) {
+        ALOGE("failed to write initial data");
+        result = MTP_RESPONSE_GENERAL_ERROR;
+    } else {
+        if (mSendObjectFileSize - initialData > 0) {
+            mfr.offset = initialData;
+            if (mSendObjectFileSize == 0xFFFFFFFF) {
+                // tell driver to read until it receives a short packet
+                mfr.length = 0xFFFFFFFF;
+            } else {
+                mfr.length = mSendObjectFileSize - initialData;
+            }
+
+            ALOGV("receiving %s\n", (const char *)mSendObjectFilePath);
+            // transfer the file
+            ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+            ALOGV("MTP_RECEIVE_FILE returned %d\n", ret);
         }
-
-        ALOGV("receiving %s\n", (const char *)mSendObjectFilePath);
-        // transfer the file
-        ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
-        ALOGV("MTP_RECEIVE_FILE returned %d\n", ret);
     }
     close(mfr.fd);
 
@@ -990,7 +1005,7 @@
 
 static void deleteRecursive(const char* path) {
     char pathbuf[PATH_MAX];
-    int pathLength = strlen(path);
+    size_t pathLength = strlen(path);
     if (pathLength >= sizeof(pathbuf) - 1) {
         ALOGE("path too long: %s\n", path);
     }
@@ -1112,12 +1127,13 @@
 
     // can't start writing past the end of the file
     if (offset > edit->mSize) {
-        ALOGD("writing past end of object, offset: %lld, edit->mSize: %lld", offset, edit->mSize);
+        ALOGD("writing past end of object, offset: %" PRIu64 ", edit->mSize: %" PRIu64,
+            offset, edit->mSize);
         return MTP_RESPONSE_GENERAL_ERROR;
     }
 
     const char* filePath = (const char *)edit->mPath;
-    ALOGV("receiving partial %s %lld %" PRIu32 "\n", filePath, offset, length);
+    ALOGV("receiving partial %s %" PRIu64 " %" PRIu32, filePath, offset, length);
 
     // read the header, and possibly some data
     int ret = mData.read(mFD);
@@ -1131,15 +1147,19 @@
         length -= initialData;
     }
 
-    if (length > 0) {
-        mtp_file_range  mfr;
-        mfr.fd = edit->mFD;
-        mfr.offset = offset;
-        mfr.length = length;
+    if (ret < 0) {
+        ALOGE("failed to write initial data");
+    } else {
+        if (length > 0) {
+            mtp_file_range  mfr;
+            mfr.fd = edit->mFD;
+            mfr.offset = offset;
+            mfr.length = length;
 
-        // transfer the file
-        ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
-        ALOGV("MTP_RECEIVE_FILE returned %d", ret);
+            // transfer the file
+            ret = ioctl(mFD, MTP_RECEIVE_FILE, (unsigned long)&mfr);
+            ALOGV("MTP_RECEIVE_FILE returned %d", ret);
+        }
     }
     if (ret < 0) {
         mResponse.setParameter(1, 0);
diff --git a/media/mtp/MtpStorageInfo.cpp b/media/mtp/MtpStorageInfo.cpp
index dcd37cd..2b1a9ae 100644
--- a/media/mtp/MtpStorageInfo.cpp
+++ b/media/mtp/MtpStorageInfo.cpp
@@ -16,6 +16,8 @@
 
 #define LOG_TAG "MtpStorageInfo"
 
+#include <inttypes.h>
+
 #include "MtpDebug.h"
 #include "MtpDataPacket.h"
 #include "MtpStorageInfo.h"
@@ -63,7 +65,7 @@
 void MtpStorageInfo::print() {
     ALOGD("Storage Info %08X:\n\tmStorageType: %d\n\tmFileSystemType: %d\n\tmAccessCapability: %d\n",
             mStorageID, mStorageType, mFileSystemType, mAccessCapability);
-    ALOGD("\tmMaxCapacity: %lld\n\tmFreeSpaceBytes: %lld\n\tmFreeSpaceObjects: %d\n",
+    ALOGD("\tmMaxCapacity: %" PRIu64 "\n\tmFreeSpaceBytes: %" PRIu64 "\n\tmFreeSpaceObjects: %d\n",
             mMaxCapacity, mFreeSpaceBytes, mFreeSpaceObjects);
     ALOGD("\tmStorageDescription: %s\n\tmVolumeIdentifier: %s\n",
             mStorageDescription, mVolumeIdentifier);
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index c5d8858..ed00b72 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include <inttypes.h>
+
 //#define LOG_NDEBUG 0
 #define LOG_TAG "NdkMediaCodec"
 
@@ -257,7 +259,7 @@
     if (mData->mCodec->getInputBuffers(&abufs) == 0) {
         size_t n = abufs.size();
         if (idx >= n) {
-            ALOGE("buffer index %d out of range", idx);
+            ALOGE("buffer index %zu out of range", idx);
             return NULL;
         }
         if (out_size != NULL) {
@@ -275,7 +277,7 @@
     if (mData->mCodec->getOutputBuffers(&abufs) == 0) {
         size_t n = abufs.size();
         if (idx >= n) {
-            ALOGE("buffer index %d out of range", idx);
+            ALOGE("buffer index %zu out of range", idx);
             return NULL;
         }
         if (out_size != NULL) {
@@ -345,7 +347,7 @@
 EXPORT
 media_status_t AMediaCodec_releaseOutputBufferAtTime(
         AMediaCodec *mData, size_t idx, int64_t timestampNs) {
-    ALOGV("render @ %lld", timestampNs);
+    ALOGV("render @ %" PRId64, timestampNs);
     return translate_error(mData->mCodec->renderOutputBufferAndRelease(idx, timestampNs));
 }
 
@@ -413,7 +415,7 @@
     size_t cryptosize = sizeof(AMediaCodecCryptoInfo) + sizeof(size_t) * numsubsamples * 2;
     AMediaCodecCryptoInfo *ret = (AMediaCodecCryptoInfo*) malloc(cryptosize);
     if (!ret) {
-        ALOGE("couldn't allocate %d bytes", cryptosize);
+        ALOGE("couldn't allocate %zu bytes", cryptosize);
         return NULL;
     }
     ret->numsubsamples = numsubsamples;
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 492e002..970a43c 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -133,13 +133,13 @@
 
 EXPORT
 media_status_t AMediaExtractor_selectTrack(AMediaExtractor *mData, size_t idx) {
-    ALOGV("selectTrack(%z)", idx);
+    ALOGV("selectTrack(%zu)", idx);
     return translate_error(mData->mImpl->selectTrack(idx));
 }
 
 EXPORT
 media_status_t AMediaExtractor_unselectTrack(AMediaExtractor *mData, size_t idx) {
-    ALOGV("unselectTrack(%z)", idx);
+    ALOGV("unselectTrack(%zu)", idx);
     return translate_error(mData->mImpl->unselectTrack(idx));
 }
 
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 67dc2c2..a354d58 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "NdkMediaFormat"
 
+#include <inttypes.h>
 
 #include "NdkMediaFormat.h"
 
@@ -89,21 +90,21 @@
             {
                 int32_t val;
                 f->findInt32(name, &val);
-                ret.appendFormat("int32(%d)", val);
+                ret.appendFormat("int32(%" PRId32 ")", val);
                 break;
             }
             case AMessage::kTypeInt64:
             {
                 int64_t val;
                 f->findInt64(name, &val);
-                ret.appendFormat("int64(%lld)", val);
+                ret.appendFormat("int64(%" PRId64 ")", val);
                 break;
             }
             case AMessage::kTypeSize:
             {
                 size_t val;
                 f->findSize(name, &val);
-                ret.appendFormat("size_t(%d)", val);
+                ret.appendFormat("size_t(%zu)", val);
                 break;
             }
             case AMessage::kTypeFloat:
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 6130084..79bdfe8 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -54,6 +54,7 @@
                 return mStreamType;
             }
             bool        isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; }
+            bool        isDirect() const { return (mFlags & IAudioFlinger::TRACK_DIRECT) != 0; }
             status_t    setParameters(const String8& keyValuePairs);
             status_t    attachAuxEffect(int EffectId);
             void        setAuxBuffer(int EffectId, int32_t *buffer);
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 792d722..67a0119 100755
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1661,7 +1661,7 @@
     track->mState = TrackBase::STOPPED;
     if (!trackActive) {
         removeTrack_l(track);
-    } else if (track->isFastTrack() || track->isOffloaded()) {
+    } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) {
         track->mState = TrackBase::STOPPING_1;
     }
 
@@ -1870,7 +1870,9 @@
     }
     mNormalFrameCount = multiplier * mFrameCount;
     // round up to nearest 16 frames to satisfy AudioMixer
-    mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
+    if (mType == MIXER || mType == DUPLICATING) {
+        mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
+    }
     ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
@@ -2658,7 +2660,7 @@
     if (mNormalSink != 0) {
         return mNormalSink->getTimestamp(timestamp);
     }
-    if (mType == OFFLOAD && mOutput->stream->get_presentation_position) {
+    if ((mType == OFFLOAD || mType == DIRECT) && mOutput->stream->get_presentation_position) {
         uint64_t position64;
         int ret = mOutput->stream->get_presentation_position(
                                                 mOutput->stream, &position64, &timestamp.mTime);
@@ -2862,8 +2864,6 @@
         }
 #endif
 
-    } else {
-        mFastMixer = NULL;
     }
 
     switch (kUseFastMixer) {
@@ -2882,7 +2882,7 @@
 
 AudioFlinger::MixerThread::~MixerThread()
 {
-    if (mFastMixer != NULL) {
+    if (mFastMixer != 0) {
         FastMixerStateQueue *sq = mFastMixer->sq();
         FastMixerState *state = sq->begin();
         if (state->mCommand == FastMixerState::COLD_IDLE) {
@@ -2904,7 +2904,7 @@
         ALOG_ASSERT(fastTrack->mBufferProvider != NULL);
         delete fastTrack->mBufferProvider;
         sq->end(false /*didModify*/);
-        delete mFastMixer;
+        mFastMixer.clear();
 #ifdef AUDIO_WATCHDOG
         if (mAudioWatchdog != 0) {
             mAudioWatchdog->requestExit();
@@ -2920,7 +2920,7 @@
 
 uint32_t AudioFlinger::MixerThread::correctLatency_l(uint32_t latency) const
 {
-    if (mFastMixer != NULL) {
+    if (mFastMixer != 0) {
         MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
         latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
     }
@@ -2937,7 +2937,7 @@
 {
     // FIXME we should only do one push per cycle; confirm this is true
     // Start the fast mixer if it's not already running
-    if (mFastMixer != NULL) {
+    if (mFastMixer != 0) {
         FastMixerStateQueue *sq = mFastMixer->sq();
         FastMixerState *state = sq->begin();
         if (state->mCommand != FastMixerState::MIX_WRITE &&
@@ -2971,7 +2971,7 @@
 void AudioFlinger::MixerThread::threadLoop_standby()
 {
     // Idle the fast mixer if it's currently running
-    if (mFastMixer != NULL) {
+    if (mFastMixer != 0) {
         FastMixerStateQueue *sq = mFastMixer->sq();
         FastMixerState *state = sq->begin();
         if (!(state->mCommand & FastMixerState::IDLE)) {
@@ -3134,7 +3134,7 @@
     FastMixerState *state = NULL;
     bool didModify = false;
     FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
-    if (mFastMixer != NULL) {
+    if (mFastMixer != 0) {
         sq = mFastMixer->sq();
         state = sq->begin();
     }
@@ -3677,7 +3677,7 @@
 
     // if !&IDLE, holds the FastMixer state to restore after new parameters processed
     FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
-    if (mFastMixer != NULL) {
+    if (mFastMixer != 0) {
         FastMixerStateQueue *sq = mFastMixer->sq();
         FastMixerState *state = sq->begin();
         if (!(state->mCommand & FastMixerState::IDLE)) {
@@ -3782,7 +3782,7 @@
     }
 
     if (!(previousCommand & FastMixerState::IDLE)) {
-        ALOG_ASSERT(mFastMixer != NULL);
+        ALOG_ASSERT(mFastMixer != 0);
         FastMixerStateQueue *sq = mFastMixer->sq();
         FastMixerState *state = sq->begin();
         ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
@@ -3949,14 +3949,16 @@
         // The first time a track is added we wait
         // for all its buffers to be filled before processing it
         uint32_t minFrames;
-        if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing()) {
+        if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()) {
             minFrames = mNormalFrameCount;
         } else {
             minFrames = 1;
         }
 
-        if ((track->framesReady() >= minFrames) && track->isReady() &&
-                !track->isPaused() && !track->isTerminated())
+        ALOGI("prepareTracks_l minFrames %d state %d frames ready %d, ",
+              minFrames, track->mState, track->framesReady());
+        if ((track->framesReady() >= minFrames) && track->isReady() && !track->isPaused() &&
+                !track->isStopping_2() && !track->isStopped())
         {
             ALOGVV("track %d s=%08x [OK]", track->name(), cblk->mServer);
 
@@ -3983,17 +3985,26 @@
             if (!mEffectChains.isEmpty() && last) {
                 mEffectChains[0]->clearInputBuffer();
             }
-
-            ALOGVV("track %d s=%08x [NOT READY]", track->name(), cblk->mServer);
-            if ((track->sharedBuffer() != 0) || track->isTerminated() ||
-                    track->isStopped() || track->isPaused()) {
+            if (track->isStopping_1()) {
+                track->mState = TrackBase::STOPPING_2;
+            }
+            if ((track->sharedBuffer() != 0) || track->isStopped() ||
+                    track->isStopping_2() || track->isPaused()) {
                 // We have consumed all the buffers of this track.
                 // Remove it from the list of active tracks.
-                // TODO: implement behavior for compressed audio
-                size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
+                size_t audioHALFrames;
+                if (audio_is_linear_pcm(mFormat)) {
+                    audioHALFrames = (latency_l() * mSampleRate) / 1000;
+                } else {
+                    audioHALFrames = 0;
+                }
+
                 size_t framesWritten = mBytesWritten / mFrameSize;
                 if (mStandby || !last ||
                         track->presentationComplete(framesWritten, audioHALFrames)) {
+                    if (track->isStopping_2()) {
+                        track->mState = TrackBase::STOPPED;
+                    }
                     if (track->isStopped()) {
                         track->reset();
                     }
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 07887fb..93d2635 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -852,7 +852,7 @@
                 AudioMixer* mAudioMixer;    // normal mixer
 private:
                 // one-time initialization, no locks required
-                FastMixer*  mFastMixer;         // non-NULL if there is also a fast mixer
+                sp<FastMixer>     mFastMixer;     // non-0 if there is also a fast mixer
                 sp<AudioWatchdog> mAudioWatchdog; // non-0 if there is an audio watchdog thread
 
                 // contents are not guaranteed to be consistent, no locks required
@@ -868,7 +868,7 @@
                 int32_t     mFastMixerFutex;    // for cold idle
 
 public:
-    virtual     bool        hasFastMixer() const { return mFastMixer != NULL; }
+    virtual     bool        hasFastMixer() const { return mFastMixer != 0; }
     virtual     FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex) const {
                               ALOG_ASSERT(fastIndex < FastMixerState::kMaxFastTracks);
                               return mFastMixerDumpState.mTracks[fastIndex].mUnderruns;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 1e906ad..4fbb973 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -706,7 +706,7 @@
             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
                 reset();
                 mState = STOPPED;
-            } else if (!isFastTrack() && !isOffloaded()) {
+            } else if (!isFastTrack() && !isOffloaded() && !isDirect()) {
                 mState = STOPPED;
             } else {
                 // For fast tracks prepareTracks_l() will set state to STOPPING_2
@@ -860,7 +860,7 @@
     }
     Mutex::Autolock _l(thread->mLock);
     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-    if (!isOffloaded()) {
+    if (!isOffloaded() && !isDirect()) {
         if (!playbackThread->mLatchQValid) {
             mPreviousValid = false;
             return INVALID_OPERATION;
@@ -980,8 +980,6 @@
     }
 
     if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
-        ALOGV("presentationComplete() session %d complete: framesWritten %d",
-                  mSessionId, framesWritten);
         triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
         mAudioTrackServerProxy->setStreamEndDone();
         return true;
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index 874f18f..7bba05b 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -1,5 +1,8 @@
 # Build the unit tests for audioflinger
 
+#
+# resampler unit test
+#
 LOCAL_PATH:= $(call my-dir)
 include $(CLEAR_VARS)
 
@@ -20,6 +23,7 @@
 	bionic/libstdc++/include \
 	external/gtest/include \
 	external/stlport/stlport \
+	$(call include-path-for, audio-utils) \
 	frameworks/av/services/audioflinger
 
 LOCAL_SRC_FILES := \
@@ -29,3 +33,41 @@
 LOCAL_MODULE_TAGS := tests
 
 include $(BUILD_EXECUTABLE)
+
+#
+# audio mixer test tool
+#
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+	test-mixer.cpp \
+	../AudioMixer.cpp.arm \
+
+LOCAL_C_INCLUDES := \
+	bionic \
+	bionic/libstdc++/include \
+	external/stlport/stlport \
+	$(call include-path-for, audio-effects) \
+	$(call include-path-for, audio-utils) \
+	frameworks/av/services/audioflinger
+
+LOCAL_STATIC_LIBRARIES := \
+	libsndfile
+
+LOCAL_SHARED_LIBRARIES := \
+	libstlport \
+	libeffects \
+	libnbaio \
+	libcommon_time_client \
+	libaudioresampler \
+	libaudioutils \
+	libdl \
+	libcutils \
+	libutils \
+	liblog
+
+LOCAL_MODULE:= test-mixer
+
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_EXECUTABLE)
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh
new file mode 100755
index 0000000..93bff47
--- /dev/null
+++ b/services/audioflinger/tests/mixer_to_wav_tests.sh
@@ -0,0 +1,134 @@
+#!/bin/bash
+#
+# This script uses test-mixer to generate WAV files
+# for evaluation of the AudioMixer component.
+#
+# Sine and chirp signals are used for input because they
+# show up as clear lines, either horizontal or diagonal,
+# on a spectrogram. This means easy verification of multiple
+# track mixing.
+#
+# After execution, look for created subdirectories like
+# mixer_i_i
+# mixer_i_f
+# mixer_f_f
+#
+# Recommend using a program such as audacity to evaluate
+# the output WAV files, e.g.
+#
+# cd testdir
+# audacity *.wav
+#
+# Using Audacity:
+#
+# Under "Waveform" view mode you can zoom into the
+# start of the WAV file to verify proper ramping.
+#
+# Select "Spectrogram" to see verify the lines
+# (sine = horizontal, chirp = diagonal) which should
+# be clear (except for around the start as the volume
+# ramping causes spectral distortion).
+
+if [ -z "$ANDROID_BUILD_TOP" ]; then
+    echo "Android build environment not set"
+    exit -1
+fi
+
+# ensure we have mm
+. $ANDROID_BUILD_TOP/build/envsetup.sh
+
+pushd $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/
+
+# build
+pwd
+mm
+
+# send to device
+echo "waiting for device"
+adb root && adb wait-for-device remount
+adb push $OUT/system/lib/libaudioresampler.so /system/lib
+adb push $OUT/system/bin/test-mixer /system/bin
+
+# createwav creates a series of WAV files testing various
+# mixer settings
+# $1 = flags
+# $2 = directory
+function createwav() {
+# create directory if it doesn't exist
+    if [ ! -d $2 ]; then
+        mkdir $2
+    fi
+
+# Test:
+# process__genericResampling
+# track__Resample / track__genericResample
+    adb shell test-mixer $1 -s 48000 \
+        -o /sdcard/tm48000gr.wav \
+        sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000
+    adb pull /sdcard/tm48000gr.wav $2
+
+# Test:
+# process__genericResample
+# track__Resample / track__genericResample
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+# Aux buffer
+    adb shell test-mixer $1 -s 9307 \
+        -a /sdcard/aux9307gra.wav -o /sdcard/tm9307gra.wav \
+        sine:2,1000,3000 sine:1,2000,9307 chirp:2,9307
+    adb pull /sdcard/tm9307gra.wav $2
+    adb pull /sdcard/aux9307gra.wav $2
+
+# Test:
+# process__genericNoResampling
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+    adb shell test-mixer $1 -s 32000 \
+        -o /sdcard/tm32000gnr.wav \
+        sine:2,1000,32000 chirp:2,32000  sine:1,3000,32000
+    adb pull /sdcard/tm32000gnr.wav $2
+
+# Test:
+# process__genericNoResampling
+# track__NoResample / track__16BitsStereo / track__16BitsMono
+# Aux buffer
+    adb shell test-mixer $1 -s 32000 \
+        -a /sdcard/aux32000gnra.wav -o /sdcard/tm32000gnra.wav \
+        sine:2,1000,32000 chirp:2,32000  sine:1,3000,32000
+    adb pull /sdcard/tm32000gnra.wav $2
+    adb pull /sdcard/aux32000gnra.wav $2
+
+# Test:
+# process__NoResampleOneTrack / process__OneTrack16BitsStereoNoResampling
+# Downmixer
+    adb shell test-mixer $1 -s 32000 \
+        -o /sdcard/tm32000nrot.wav \
+        sine:6,1000,32000
+    adb pull /sdcard/tm32000nrot.wav $2
+
+# Test:
+# process__NoResampleOneTrack / OneTrack16BitsStereoNoResampling
+# Aux buffer
+    adb shell test-mixer $1 -s 44100 \
+        -a /sdcard/aux44100nrota.wav -o /sdcard/tm44100nrota.wav \
+        sine:2,2000,44100
+    adb pull /sdcard/tm44100nrota.wav $2
+    adb pull /sdcard/aux44100nrota.wav $2
+}
+
+#
+# Call createwav to generate WAV files in various combinations
+#
+# i_i = integer input track, integer mixer output
+# f_f = float input track,   float mixer output
+# i_f = integer input track, float_mixer output
+#
+# If the mixer output is float, then the output WAV file is pcm float.
+#
+# TODO: create a "snr" like "diff" to automatically
+# compare files in these directories together.
+#
+
+createwav "" "tests/mixer_i_i"
+createwav "-f -m" "tests/mixer_f_f"
+createwav "-m" "tests/mixer_i_f"
+
+popd
diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp
index 8f9c270..4a67d0b 100644
--- a/services/audioflinger/tests/resampler_tests.cpp
+++ b/services/audioflinger/tests/resampler_tests.cpp
@@ -33,200 +33,7 @@
 #include <gtest/gtest.h>
 #include <media/AudioBufferProvider.h>
 #include "AudioResampler.h"
-
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-
-template<typename T, typename U>
-struct is_same
-{
-    static const bool value = false;
-};
-
-template<typename T>
-struct is_same<T, T>  // partial specialization
-{
-    static const bool value = true;
-};
-
-template<typename T>
-static inline T convertValue(double val)
-{
-    if (is_same<T, int16_t>::value) {
-        return floor(val * 32767.0 + 0.5);
-    } else if (is_same<T, int32_t>::value) {
-        return floor(val * (1UL<<31) + 0.5);
-    }
-    return val; // assume float or double
-}
-
-/* Creates a type-independent audio buffer provider from
- * a buffer base address, size, framesize, and input increment array.
- *
- * No allocation or deallocation of the provided buffer is done.
- */
-class TestProvider : public android::AudioBufferProvider {
-public:
-    TestProvider(const void* addr, size_t frames, size_t frameSize,
-            const std::vector<size_t>& inputIncr)
-    : mAddr(addr),
-      mNumFrames(frames),
-      mFrameSize(frameSize),
-      mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0)
-    {
-    }
-
-    virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS )
-    {
-        size_t requestedFrames = buffer->frameCount;
-        if (requestedFrames > mNumFrames - mNextFrame) {
-            buffer->frameCount = mNumFrames - mNextFrame;
-        }
-        if (!mInputIncr.empty()) {
-            size_t provided = mInputIncr[mNextIdx++];
-            ALOGV("getNextBuffer() mValue[%d]=%u not %u",
-                    mNextIdx-1, provided, buffer->frameCount);
-            if (provided < buffer->frameCount) {
-                buffer->frameCount = provided;
-            }
-            if (mNextIdx >= mInputIncr.size()) {
-                mNextIdx = 0;
-            }
-        }
-        ALOGV("getNextBuffer() requested %u frames out of %u frames available"
-                " and returned %u frames\n",
-                requestedFrames, mNumFrames - mNextFrame, buffer->frameCount);
-        mUnrel = buffer->frameCount;
-        if (buffer->frameCount > 0) {
-            buffer->raw = (char *)mAddr + mFrameSize * mNextFrame;
-            return android::NO_ERROR;
-        } else {
-            buffer->raw = NULL;
-            return android::NOT_ENOUGH_DATA;
-        }
-    }
-
-    virtual void releaseBuffer(Buffer* buffer)
-    {
-        if (buffer->frameCount > mUnrel) {
-            ALOGE("releaseBuffer() released %u frames but only %u available "
-                    "to release\n", buffer->frameCount, mUnrel);
-            mNextFrame += mUnrel;
-            mUnrel = 0;
-        } else {
-
-            ALOGV("releaseBuffer() released %u frames out of %u frames available "
-                    "to release\n", buffer->frameCount, mUnrel);
-            mNextFrame += buffer->frameCount;
-            mUnrel -= buffer->frameCount;
-        }
-        buffer->frameCount = 0;
-        buffer->raw = NULL;
-    }
-
-    void reset()
-    {
-        mNextFrame = 0;
-    }
-
-    size_t getNumFrames()
-    {
-        return mNumFrames;
-    }
-
-    void setIncr(const std::vector<size_t> inputIncr)
-    {
-        mNextIdx = 0;
-        mInputIncr = inputIncr;
-    }
-
-protected:
-    const void* mAddr;   // base address
-    size_t mNumFrames;   // total frames
-    int mFrameSize;      // frame size (# channels * bytes per sample)
-    size_t mNextFrame;   // index of next frame to provide
-    size_t mUnrel;       // number of frames not yet released
-    std::vector<size_t> mInputIncr; // number of frames provided per call
-    size_t mNextIdx;     // index of next entry in mInputIncr to use
-};
-
-/* Creates a buffer filled with a sine wave.
- *
- * Returns a pair consisting of the sine signal buffer and the number of frames.
- * The caller must delete[] the buffer when no longer needed (no shared_ptr<>).
- */
-template<typename T>
-static std::pair<T*, size_t> createSine(size_t channels,
-        double freq, double samplingRate, double time)
-{
-    double tscale = 1. / samplingRate;
-    size_t frames = static_cast<size_t>(samplingRate * time);
-    T* buffer = new T[frames * channels];
-    for (size_t i = 0; i < frames; ++i) {
-        double t = i * tscale;
-        double y = sin(2. * M_PI * freq * t);
-        T yt = convertValue<T>(y);
-
-        for (size_t j = 0; j < channels; ++j) {
-            buffer[i*channels + j] = yt / (j + 1);
-        }
-    }
-    return std::make_pair(buffer, frames);
-}
-
-/* Creates a buffer filled with a chirp signal (a sine wave sweep).
- *
- * Returns a pair consisting of the chirp signal buffer and the number of frames.
- * The caller must delete[] the buffer when no longer needed (no shared_ptr<>).
- *
- * When creating the Chirp, note that the frequency is the true sinusoidal
- * frequency not the sampling rate.
- *
- * http://en.wikipedia.org/wiki/Chirp
- */
-template<typename T>
-static std::pair<T*, size_t> createChirp(size_t channels,
-        double minfreq, double maxfreq, double samplingRate, double time)
-{
-    double tscale = 1. / samplingRate;
-    size_t frames = static_cast<size_t>(samplingRate * time);
-    T *buffer = new T[frames * channels];
-    // note the chirp constant k has a divide-by-two.
-    double k = (maxfreq - minfreq) / (2. * time);
-    for (size_t i = 0; i < frames; ++i) {
-        double t = i * tscale;
-        double y = sin(2. * M_PI * (k * t + minfreq) * t);
-        T yt = convertValue<T>(y);
-
-        for (size_t j = 0; j < channels; ++j) {
-            buffer[i*channels + j] = yt / (j + 1);
-        }
-    }
-    return std::make_pair(buffer, frames);
-}
-
-/* This derived class creates a buffer provider of datatype T,
- * consisting of an input signal, e.g. from createChirp().
- * The number of frames can be obtained from the base class
- * TestProvider::getNumFrames().
- */
-template <typename T>
-class SignalProvider : public TestProvider {
-public:
-    SignalProvider(const std::pair<T*, size_t>& bufferInfo, size_t channels,
-            const std::vector<size_t>& values)
-    : TestProvider(bufferInfo.first, bufferInfo.second, channels * sizeof(T), values),
-      mManagedPtr(bufferInfo.first)
-    {
-    }
-
-    virtual ~SignalProvider()
-    {
-        delete[] mManagedPtr;
-    }
-
-protected:
-    T* mManagedPtr;
-};
+#include "test_utils.h"
 
 void resample(void *output, size_t outputFrames, const std::vector<size_t> &outputIncr,
         android::AudioBufferProvider *provider, android::AudioResampler *resampler)
@@ -261,10 +68,11 @@
         enum android::AudioResampler::src_quality quality)
 {
     // create the provider
-    std::vector<size_t> inputIncr;
-    SignalProvider<int16_t> provider(createChirp<int16_t>(channels,
-            0., outputFreq/2., outputFreq, outputFreq/2000.),
-            channels, inputIncr);
+    std::vector<int> inputIncr;
+    SignalProvider provider;
+    provider.setChirp<int16_t>(channels,
+            0., outputFreq/2., outputFreq, outputFreq/2000.);
+    provider.setIncr(inputIncr);
 
     // calculate the output size
     size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
@@ -339,10 +147,11 @@
         enum android::AudioResampler::src_quality quality)
 {
     // create the provider
-    std::vector<size_t> inputIncr;
-    SignalProvider<int16_t> provider(createChirp<int16_t>(channels,
-            0., inputFreq/2., inputFreq, inputFreq/2000.),
-            channels, inputIncr);
+    std::vector<int> inputIncr;
+    SignalProvider provider;
+    provider.setChirp<int16_t>(channels,
+            0., inputFreq/2., inputFreq, inputFreq/2000.);
+    provider.setIncr(inputIncr);
 
     // calculate the output size
     size_t outputFrames = ((int64_t) provider.getNumFrames() * outputFreq) / inputFreq;
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
new file mode 100644
index 0000000..3940702
--- /dev/null
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <math.h>
+#include <vector>
+#include <audio_utils/primitives.h>
+#include <audio_utils/sndfile.h>
+#include <media/AudioBufferProvider.h>
+#include "AudioMixer.h"
+#include "test_utils.h"
+
+/* Testing is typically through creation of an output WAV file from several
+ * source inputs, to be later analyzed by an audio program such as Audacity.
+ *
+ * Sine or chirp functions are typically more useful as input to the mixer
+ * as they show up as straight lines on a spectrogram if successfully mixed.
+ *
+ * A sample shell script is provided: mixer_to_wave_tests.sh
+ */
+
+using namespace android;
+
+static void usage(const char* name) {
+    fprintf(stderr, "Usage: %s [-f] [-m]"
+                    " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
+                    " (<input-file> | <command>)+\n", name);
+    fprintf(stderr, "    -f    enable floating point input track\n");
+    fprintf(stderr, "    -m    enable floating point mixer output\n");
+    fprintf(stderr, "    -s    mixer sample-rate\n");
+    fprintf(stderr, "    -o    <output-file> WAV file, pcm16 (or float if -m specified)\n");
+    fprintf(stderr, "    -a    <aux-buffer-file>\n");
+    fprintf(stderr, "    -P    # frames provided per call to resample() in CSV format\n");
+    fprintf(stderr, "    <input-file> is a WAV file\n");
+    fprintf(stderr, "    <command> can be 'sine:<channels>,<frequency>,<samplerate>'\n");
+    fprintf(stderr, "                     'chirp:<channels>,<samplerate>'\n");
+}
+
+static int writeFile(const char *filename, const void *buffer,
+        uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) {
+    if (filename == NULL) {
+        return 0; // ok to pass in NULL filename
+    }
+    // write output to file.
+    SF_INFO info;
+    info.frames = 0;
+    info.samplerate = sampleRate;
+    info.channels = channels;
+    info.format = SF_FORMAT_WAV | (isBufferFloat ? SF_FORMAT_FLOAT : SF_FORMAT_PCM_16);
+    printf("saving file:%s  channels:%d  samplerate:%d  frames:%d\n",
+            filename, info.channels, info.samplerate, frames);
+    SNDFILE *sf = sf_open(filename, SFM_WRITE, &info);
+    if (sf == NULL) {
+        perror(filename);
+        return EXIT_FAILURE;
+    }
+    if (isBufferFloat) {
+        (void) sf_writef_float(sf, (float*)buffer, frames);
+    } else {
+        (void) sf_writef_short(sf, (short*)buffer, frames);
+    }
+    sf_close(sf);
+    return EXIT_SUCCESS;
+}
+
+int main(int argc, char* argv[]) {
+    const char* const progname = argv[0];
+    bool useInputFloat = false;
+    bool useMixerFloat = false;
+    bool useRamp = true;
+    uint32_t outputSampleRate = 48000;
+    uint32_t outputChannels = 2; // stereo for now
+    std::vector<int> Pvalues;
+    const char* outputFilename = NULL;
+    const char* auxFilename = NULL;
+    std::vector<int32_t> Names;
+    std::vector<SignalProvider> Providers;
+
+    for (int ch; (ch = getopt(argc, argv, "fms:o:a:P:")) != -1;) {
+        switch (ch) {
+        case 'f':
+            useInputFloat = true;
+            break;
+        case 'm':
+            useMixerFloat = true;
+            break;
+        case 's':
+            outputSampleRate = atoi(optarg);
+            break;
+        case 'o':
+            outputFilename = optarg;
+            break;
+        case 'a':
+            auxFilename = optarg;
+            break;
+        case 'P':
+            if (parseCSV(optarg, Pvalues) < 0) {
+                fprintf(stderr, "incorrect syntax for -P option\n");
+                return EXIT_FAILURE;
+            }
+            break;
+        case '?':
+        default:
+            usage(progname);
+            return EXIT_FAILURE;
+        }
+    }
+    argc -= optind;
+    argv += optind;
+
+    if (argc == 0) {
+        usage(progname);
+        return EXIT_FAILURE;
+    }
+    if ((unsigned)argc > AudioMixer::MAX_NUM_TRACKS) {
+        fprintf(stderr, "too many tracks: %d > %u", argc, AudioMixer::MAX_NUM_TRACKS);
+        return EXIT_FAILURE;
+    }
+
+    size_t outputFrames = 0;
+
+    // create providers for each track
+    Providers.resize(argc);
+    for (int i = 0; i < argc; ++i) {
+        static const char chirp[] = "chirp:";
+        static const char sine[] = "sine:";
+        static const double kSeconds = 1;
+
+        if (!strncmp(argv[i], chirp, strlen(chirp))) {
+            std::vector<int> v;
+
+            parseCSV(argv[i] + strlen(chirp), v);
+            if (v.size() == 2) {
+                printf("creating chirp(%d %d)\n", v[0], v[1]);
+                if (useInputFloat) {
+                    Providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+                } else {
+                    Providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+                }
+                Providers[i].setIncr(Pvalues);
+            } else {
+                fprintf(stderr, "malformed input '%s'\n", argv[i]);
+            }
+        } else if (!strncmp(argv[i], sine, strlen(sine))) {
+            std::vector<int> v;
+
+            parseCSV(argv[i] + strlen(sine), v);
+            if (v.size() == 3) {
+                printf("creating sine(%d %d)\n", v[0], v[1]);
+                if (useInputFloat) {
+                    Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+                } else {
+                    Providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+                }
+                Providers[i].setIncr(Pvalues);
+            } else {
+                fprintf(stderr, "malformed input '%s'\n", argv[i]);
+            }
+        } else {
+            printf("creating filename(%s)\n", argv[i]);
+            if (useInputFloat) {
+                Providers[i].setFile<float>(argv[i]);
+            } else {
+                Providers[i].setFile<short>(argv[i]);
+            }
+            Providers[i].setIncr(Pvalues);
+        }
+        // calculate the number of output frames
+        size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate
+                / Providers[i].getSampleRate();
+        if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
+            outputFrames = nframes;
+        }
+    }
+
+    // create the output buffer.
+    const size_t outputFrameSize = outputChannels
+            * (useMixerFloat ? sizeof(float) : sizeof(int16_t));
+    const size_t outputSize = outputFrames * outputFrameSize;
+    void *outputAddr = NULL;
+    (void) posix_memalign(&outputAddr, 32, outputSize);
+    memset(outputAddr, 0, outputSize);
+
+    // create the aux buffer, if needed.
+    const size_t auxFrameSize = sizeof(int32_t); // Q4.27 always
+    const size_t auxSize = outputFrames * auxFrameSize;
+    void *auxAddr = NULL;
+    if (auxFilename) {
+        (void) posix_memalign(&auxAddr, 32, auxSize);
+        memset(auxAddr, 0, auxSize);
+    }
+
+    // create the mixer.
+    const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
+    AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
+    audio_format_t inputFormat = useInputFloat
+            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+    audio_format_t mixerFormat = useMixerFloat
+            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+    float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks
+    static float f0; // zero
+
+    // set up the tracks.
+    for (size_t i = 0; i < Providers.size(); ++i) {
+        //printf("track %d out of %d\n", i, Providers.size());
+        uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels());
+        int32_t name = mixer->getTrackName(channelMask,
+                inputFormat, AUDIO_SESSION_OUTPUT_MIX);
+        ALOG_ASSERT(name >= 0);
+        Names.push_back(name);
+        mixer->setBufferProvider(name, &Providers[i]);
+        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+                (void *) outputAddr);
+        mixer->setParameter(
+                name,
+                AudioMixer::TRACK,
+                AudioMixer::MIXER_FORMAT, (void *)mixerFormat);
+        mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
+                (void *)(uintptr_t)inputFormat);
+        mixer->setParameter(
+                name,
+                AudioMixer::RESAMPLE,
+                AudioMixer::SAMPLE_RATE,
+                (void *)(uintptr_t)Providers[i].getSampleRate());
+        if (useRamp) {
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
+            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME0, &f);
+            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::VOLUME1, &f);
+        } else {
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
+        }
+        if (auxFilename) {
+            mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+                    (void *) auxAddr);
+            mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::AUXLEVEL, &f0);
+            mixer->setParameter(name, AudioMixer::RAMP_VOLUME, AudioMixer::AUXLEVEL, &f);
+        }
+        mixer->enable(name);
+    }
+
+    // pump the mixer to process data.
+    size_t i;
+    for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
+        for (size_t j = 0; j < Names.size(); ++j) {
+            mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+                    (char *) outputAddr + i * outputFrameSize);
+            if (auxFilename) {
+                mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+                        (char *) auxAddr + i * auxFrameSize);
+            }
+        }
+        mixer->process(AudioBufferProvider::kInvalidPTS);
+    }
+    outputFrames = i; // reset output frames to the data actually produced.
+
+    // write to files
+    writeFile(outputFilename, outputAddr,
+            outputSampleRate, outputChannels, outputFrames, useMixerFloat);
+    if (auxFilename) {
+        // Aux buffer is always in q4_27 format for now.
+        // memcpy_to_i16_from_q4_27(), but with stereo frame count (not sample count)
+        ditherAndClamp((int32_t*)auxAddr, (int32_t*)auxAddr, outputFrames >> 1);
+        writeFile(auxFilename, auxAddr, outputSampleRate, 1, outputFrames, false);
+    }
+
+    delete mixer;
+    free(outputAddr);
+    free(auxAddr);
+    return EXIT_SUCCESS;
+}
diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h
new file mode 100644
index 0000000..f954292
--- /dev/null
+++ b/services/audioflinger/tests/test_utils.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_TEST_UTILS_H
+#define ANDROID_AUDIO_TEST_UTILS_H
+
+#include <audio_utils/sndfile.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+template<typename T, typename U>
+struct is_same
+{
+    static const bool value = false;
+};
+
+template<typename T>
+struct is_same<T, T>  // partial specialization
+{
+    static const bool value = true;
+};
+
+template<typename T>
+static inline T convertValue(double val)
+{
+    if (is_same<T, int16_t>::value) {
+        return floor(val * 32767.0 + 0.5);
+    } else if (is_same<T, int32_t>::value) {
+        return floor(val * (1UL<<31) + 0.5);
+    }
+    return val; // assume float or double
+}
+
+// Convert a list of integers in CSV format to a Vector of those values.
+// Returns the number of elements in the list, or -1 on error.
+static inline int parseCSV(const char *string, std::vector<int>& values)
+{
+    // pass 1: count the number of values and do syntax check
+    size_t numValues = 0;
+    bool hadDigit = false;
+    for (const char *p = string; ; ) {
+        switch (*p++) {
+        case '0': case '1': case '2': case '3': case '4':
+        case '5': case '6': case '7': case '8': case '9':
+            hadDigit = true;
+            break;
+        case '\0':
+            if (hadDigit) {
+                // pass 2: allocate and initialize vector of values
+                values.resize(++numValues);
+                values[0] = atoi(p = string);
+                for (size_t i = 1; i < numValues; ) {
+                    if (*p++ == ',') {
+                        values[i++] = atoi(p);
+                    }
+                }
+                return numValues;
+            }
+            // fall through
+        case ',':
+            if (hadDigit) {
+                hadDigit = false;
+                numValues++;
+                break;
+            }
+            // fall through
+        default:
+            return -1;
+        }
+    }
+}
+
+/* Creates a type-independent audio buffer provider from
+ * a buffer base address, size, framesize, and input increment array.
+ *
+ * No allocation or deallocation of the provided buffer is done.
+ */
+class TestProvider : public android::AudioBufferProvider {
+public:
+    TestProvider(void* addr, size_t frames, size_t frameSize,
+            const std::vector<int>& inputIncr)
+    : mAddr(addr),
+      mNumFrames(frames),
+      mFrameSize(frameSize),
+      mNextFrame(0), mUnrel(0), mInputIncr(inputIncr), mNextIdx(0)
+    {
+    }
+
+    TestProvider()
+    : mAddr(NULL), mNumFrames(0), mFrameSize(0),
+      mNextFrame(0), mUnrel(0), mNextIdx(0)
+    {
+    }
+
+    void setIncr(const std::vector<int>& inputIncr) {
+        mInputIncr = inputIncr;
+        mNextIdx = 0;
+    }
+
+    virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS)
+    {
+        size_t requestedFrames = buffer->frameCount;
+        if (requestedFrames > mNumFrames - mNextFrame) {
+            buffer->frameCount = mNumFrames - mNextFrame;
+        }
+        if (!mInputIncr.empty()) {
+            size_t provided = mInputIncr[mNextIdx++];
+            ALOGV("getNextBuffer() mValue[%d]=%u not %u",
+                    mNextIdx-1, provided, buffer->frameCount);
+            if (provided < buffer->frameCount) {
+                buffer->frameCount = provided;
+            }
+            if (mNextIdx >= mInputIncr.size()) {
+                mNextIdx = 0;
+            }
+        }
+        ALOGV("getNextBuffer() requested %u frames out of %u frames available"
+                " and returned %u frames\n",
+                requestedFrames, mNumFrames - mNextFrame, buffer->frameCount);
+        mUnrel = buffer->frameCount;
+        if (buffer->frameCount > 0) {
+            buffer->raw = (char *)mAddr + mFrameSize * mNextFrame;
+            return android::NO_ERROR;
+        } else {
+            buffer->raw = NULL;
+            return android::NOT_ENOUGH_DATA;
+        }
+    }
+
+    virtual void releaseBuffer(Buffer* buffer)
+    {
+        if (buffer->frameCount > mUnrel) {
+            ALOGE("releaseBuffer() released %u frames but only %u available "
+                    "to release\n", buffer->frameCount, mUnrel);
+            mNextFrame += mUnrel;
+            mUnrel = 0;
+        } else {
+
+            ALOGV("releaseBuffer() released %u frames out of %u frames available "
+                    "to release\n", buffer->frameCount, mUnrel);
+            mNextFrame += buffer->frameCount;
+            mUnrel -= buffer->frameCount;
+        }
+        buffer->frameCount = 0;
+        buffer->raw = NULL;
+    }
+
+    void reset()
+    {
+        mNextFrame = 0;
+    }
+
+    size_t getNumFrames()
+    {
+        return mNumFrames;
+    }
+
+
+protected:
+    void* mAddr;   // base address
+    size_t mNumFrames;   // total frames
+    int mFrameSize;      // frame size (# channels * bytes per sample)
+    size_t mNextFrame;   // index of next frame to provide
+    size_t mUnrel;       // number of frames not yet released
+    std::vector<int> mInputIncr; // number of frames provided per call
+    size_t mNextIdx;     // index of next entry in mInputIncr to use
+};
+
+/* Creates a buffer filled with a sine wave.
+ */
+template<typename T>
+static void createSine(void *vbuffer, size_t frames,
+        size_t channels, double sampleRate, double freq)
+{
+    double tscale = 1. / sampleRate;
+    T* buffer = reinterpret_cast<T*>(vbuffer);
+    for (size_t i = 0; i < frames; ++i) {
+        double t = i * tscale;
+        double y = sin(2. * M_PI * freq * t);
+        T yt = convertValue<T>(y);
+
+        for (size_t j = 0; j < channels; ++j) {
+            buffer[i*channels + j] = yt / (j + 1);
+        }
+    }
+}
+
+/* Creates a buffer filled with a chirp signal (a sine wave sweep).
+ *
+ * When creating the Chirp, note that the frequency is the true sinusoidal
+ * frequency not the sampling rate.
+ *
+ * http://en.wikipedia.org/wiki/Chirp
+ */
+template<typename T>
+static void createChirp(void *vbuffer, size_t frames,
+        size_t channels, double sampleRate,  double minfreq, double maxfreq)
+{
+    double tscale = 1. / sampleRate;
+    T *buffer = reinterpret_cast<T*>(vbuffer);
+    // note the chirp constant k has a divide-by-two.
+    double k = (maxfreq - minfreq) / (2. * tscale * frames);
+    for (size_t i = 0; i < frames; ++i) {
+        double t = i * tscale;
+        double y = sin(2. * M_PI * (k * t + minfreq) * t);
+        T yt = convertValue<T>(y);
+
+        for (size_t j = 0; j < channels; ++j) {
+            buffer[i*channels + j] = yt / (j + 1);
+        }
+    }
+}
+
+/* This derived class creates a buffer provider of datatype T,
+ * consisting of an input signal, e.g. from createChirp().
+ * The number of frames can be obtained from the base class
+ * TestProvider::getNumFrames().
+ */
+
+class SignalProvider : public TestProvider {
+public:
+    SignalProvider()
+    : mSampleRate(0),
+      mChannels(0)
+    {
+    }
+
+    virtual ~SignalProvider()
+    {
+        free(mAddr);
+        mAddr = NULL;
+    }
+
+    template <typename T>
+    void setChirp(size_t channels, double minfreq, double maxfreq, double sampleRate, double time)
+    {
+        createBufferByFrames<T>(channels, sampleRate, sampleRate*time);
+        createChirp<T>(mAddr, mNumFrames, mChannels, mSampleRate, minfreq, maxfreq);
+    }
+
+    template <typename T>
+    void setSine(size_t channels,
+            double freq, double sampleRate, double time)
+    {
+        createBufferByFrames<T>(channels, sampleRate, sampleRate*time);
+        createSine<T>(mAddr, mNumFrames,  mChannels, mSampleRate, freq);
+    }
+
+    template <typename T>
+    void setFile(const char *file_in)
+    {
+        SF_INFO info;
+        info.format = 0;
+        SNDFILE *sf = sf_open(file_in, SFM_READ, &info);
+        if (sf == NULL) {
+            perror(file_in);
+            return;
+        }
+        createBufferByFrames<T>(info.channels, info.samplerate, info.frames);
+        if (is_same<T, float>::value) {
+            (void) sf_readf_float(sf, (float *) mAddr, mNumFrames);
+        } else if (is_same<T, short>::value) {
+            (void) sf_readf_short(sf, (short *) mAddr, mNumFrames);
+        }
+        sf_close(sf);
+    }
+
+    template <typename T>
+    void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames)
+    {
+        mNumFrames = frames;
+        mChannels = channels;
+        mFrameSize = mChannels * sizeof(T);
+        free(mAddr);
+        mAddr = malloc(mFrameSize * mNumFrames);
+        mSampleRate = sampleRate;
+    }
+
+    uint32_t getSampleRate() const {
+        return mSampleRate;
+    }
+
+    uint32_t getNumChannels() const {
+        return mChannels;
+    }
+
+protected:
+    uint32_t mSampleRate;
+    uint32_t mChannels;
+};
+
+#endif // ANDROID_AUDIO_TEST_UTILS_H
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index c025a45..33e4397 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -90,6 +90,12 @@
                                         audio_channel_mask_t channelMask,
                                         audio_output_flags_t flags,
                                         const audio_offload_info_t *offloadInfo) = 0;
+    virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr,
+                                                uint32_t samplingRate,
+                                                audio_format_t format,
+                                                audio_channel_mask_t channelMask,
+                                                audio_output_flags_t flags,
+                                                const audio_offload_info_t *offloadInfo) = 0;
     // indicates to the audio policy manager that the output starts being used by corresponding stream.
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
index 8cc386a..6342d8f 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
@@ -131,6 +131,22 @@
                                     format, channelMask, flags, offloadInfo);
 }
 
+audio_io_handle_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    if (mAudioPolicyManager == NULL) {
+        return 0;
+    }
+    ALOGV("getOutput()");
+    Mutex::Autolock _l(mLock);
+    return mAudioPolicyManager->getOutputForAttr(attr, samplingRate,
+                                    format, channelMask, flags, offloadInfo);
+}
+
 status_t AudioPolicyService::startOutput(audio_io_handle_t output,
                                          audio_stream_type_t stream,
                                          int session)
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index d4c9374..4fcf43b 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -122,6 +122,11 @@
     STRING_TO_ENUM(AUDIO_FORMAT_MP3),
     STRING_TO_ENUM(AUDIO_FORMAT_AAC),
     STRING_TO_ENUM(AUDIO_FORMAT_VORBIS),
+    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V1),
+    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V2),
+    STRING_TO_ENUM(AUDIO_FORMAT_OPUS),
+    STRING_TO_ENUM(AUDIO_FORMAT_AC3),
+    STRING_TO_ENUM(AUDIO_FORMAT_E_AC3),
 };
 
 const StringToEnum sOutChannelsNameToEnumTable[] = {
@@ -623,13 +628,53 @@
                                     audio_output_flags_t flags,
                                     const audio_offload_info_t *offloadInfo)
 {
-    audio_io_handle_t output = 0;
-    uint32_t latency = 0;
+
     routing_strategy strategy = getStrategy(stream);
     audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
     ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
           device, stream, samplingRate, format, channelMask, flags);
 
+    return getOutputForDevice(device, stream, samplingRate,format, channelMask, flags,
+            offloadInfo);
+}
+
+audio_io_handle_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
+                                    uint32_t samplingRate,
+                                    audio_format_t format,
+                                    audio_channel_mask_t channelMask,
+                                    audio_output_flags_t flags,
+                                    const audio_offload_info_t *offloadInfo)
+{
+    if (attr == NULL) {
+        ALOGE("getOutputForAttr() called with NULL audio attributes");
+        return 0;
+    }
+    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s",
+            attr->usage, attr->content_type, attr->tags);
+
+    // TODO this is where filtering for custom policies (rerouting, dynamic sources) will go
+    routing_strategy strategy = (routing_strategy) getStrategyForAttr(attr);
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    ALOGV("getOutputForAttr() device %d, samplingRate %d, format %x, channelMask %x, flags %x",
+          device, samplingRate, format, channelMask, flags);
+
+    audio_stream_type_t stream = streamTypefromAttributesInt(attr);
+    return getOutputForDevice(device, stream, samplingRate, format, channelMask, flags,
+                offloadInfo);
+}
+
+audio_io_handle_t AudioPolicyManager::getOutputForDevice(
+        audio_devices_t device,
+        audio_stream_type_t stream,
+        uint32_t samplingRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo)
+{
+    audio_io_handle_t output = 0;
+    uint32_t latency = 0;
+
 #ifdef AUDIO_POLICY_TEST
     if (mCurOutput != 0) {
         ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d",
@@ -1515,7 +1560,7 @@
     snprintf(buffer, SIZE,
              " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
     write(fd, buffer, strlen(buffer));
-    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+    for (size_t i = 0; i < AUDIO_STREAM_CNT; i++) {
         snprintf(buffer, SIZE, " %02zu      ", i);
         write(fd, buffer, strlen(buffer));
         mStreams[i].dump(fd);
@@ -1646,14 +1691,20 @@
             *num_ports += mInputs.size();
         }
         if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
-            for (size_t i = 0; i < mOutputs.size() && portsWritten < portsMax; i++) {
-                mOutputs[i]->toAudioPort(&ports[portsWritten++]);
+            size_t numOutputs = 0;
+            for (size_t i = 0; i < mOutputs.size(); i++) {
+                if (!mOutputs[i]->isDuplicated()) {
+                    numOutputs++;
+                    if (portsWritten < portsMax) {
+                        mOutputs[i]->toAudioPort(&ports[portsWritten++]);
+                    }
+                }
             }
-            *num_ports += mOutputs.size();
+            *num_ports += numOutputs;
         }
     }
     *generation = curAudioPortGeneration();
-    ALOGV("listAudioPorts() got %d ports needed %d", portsWritten, *num_ports);
+    ALOGV("listAudioPorts() got %zu ports needed %d", portsWritten, *num_ports);
     return NO_ERROR;
 }
 
@@ -1781,6 +1832,8 @@
             ALOGV("createAudioPatch() output not found for id %d", patch->sources[0].id);
             return BAD_VALUE;
         }
+        ALOG_ASSERT(!outputDesc->isDuplicated(),"duplicated output %d in source in ports",
+                                                outputDesc->mIoHandle);
         if (patchDesc != 0) {
             if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
                 ALOGV("createAudioPatch() source id differs for patch current id %d new id %d",
@@ -1795,7 +1848,7 @@
             return BAD_VALUE;
         }
 
-        if (!outputDesc->mProfile->isCompatibleProfile(devDesc->mType,
+        if (!outputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
                                                        patch->sources[0].sample_rate,
                                                      patch->sources[0].format,
                                                      patch->sources[0].channel_mask,
@@ -1804,9 +1857,9 @@
         }
         // TODO: reconfigure output format and channels here
         ALOGV("createAudioPatch() setting device %08x on output %d",
-                                              devDesc->mType, outputDesc->mIoHandle);
+                                              devDesc->mDeviceType, outputDesc->mIoHandle);
         setOutputDevice(outputDesc->mIoHandle,
-                        devDesc->mType,
+                        devDesc->mDeviceType,
                        true,
                        0,
                        handle);
@@ -1840,7 +1893,7 @@
                 return BAD_VALUE;
             }
 
-            if (!inputDesc->mProfile->isCompatibleProfile(devDesc->mType,
+            if (!inputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
                                                            patch->sinks[0].sample_rate,
                                                          patch->sinks[0].format,
                                                          patch->sinks[0].channel_mask,
@@ -1849,9 +1902,9 @@
             }
             // TODO: reconfigure output format and channels here
             ALOGV("createAudioPatch() setting device %08x on output %d",
-                                                  devDesc->mType, inputDesc->mIoHandle);
+                                                  devDesc->mDeviceType, inputDesc->mIoHandle);
             setInputDevice(inputDesc->mIoHandle,
-                           devDesc->mType,
+                           devDesc->mDeviceType,
                            true,
                            handle);
             index = mAudioPatches.indexOfKey(*handle);
@@ -1997,7 +2050,7 @@
             generation == NULL) {
         return BAD_VALUE;
     }
-    ALOGV("listAudioPatches() num_patches %d patches %p available patches %d",
+    ALOGV("listAudioPatches() num_patches %d patches %p available patches %zu",
           *num_patches, patches, mAudioPatches.size());
     if (patches == NULL) {
         *num_patches = 0;
@@ -2009,13 +2062,13 @@
             i  < mAudioPatches.size() && patchesWritten < patchesMax; i++) {
         patches[patchesWritten] = mAudioPatches[i]->mPatch;
         patches[patchesWritten++].id = mAudioPatches[i]->mHandle;
-        ALOGV("listAudioPatches() patch %d num_sources %d num_sinks %d",
+        ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d",
               i, mAudioPatches[i]->mPatch.num_sources, mAudioPatches[i]->mPatch.num_sinks);
     }
     *num_patches = mAudioPatches.size();
 
     *generation = curAudioPortGeneration();
-    ALOGV("listAudioPatches() got %d patches needed %d", patchesWritten, *num_patches);
+    ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches);
     return NO_ERROR;
 }
 
@@ -2039,6 +2092,9 @@
             if (outputDesc == NULL) {
                 return BAD_VALUE;
             }
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "setAudioPortConfig() called on duplicated output %d",
+                        outputDesc->mIoHandle);
             audioPortConfig = outputDesc;
         } else if (config->role == AUDIO_PORT_ROLE_SINK) {
             sp<AudioInputDescriptor> inputDesc = getInputFromId(config->id);
@@ -2779,7 +2835,7 @@
             {
                 if (mHwModules[module_idx]->mInputProfiles[profile_index]->mSupportedDevices.types()
                         & (device & ~AUDIO_DEVICE_BIT_IN)) {
-                    ALOGV("checkInputsForDevice(): adding profile %d from module %d",
+                    ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
                           profile_index, module_idx);
                     profiles.add(mHwModules[module_idx]->mInputProfiles[profile_index]);
                 }
@@ -2903,7 +2959,7 @@
                  profile_index++) {
                 sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
                 if (profile->mSupportedDevices.types() & device) {
-                    ALOGV("checkInputsForDevice(): clearing direct input profile %d on module %d",
+                    ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
                           profile_index, module_index);
                     if (profile->mSamplingRates[0] == 0) {
                         profile->mSamplingRates.clear();
@@ -3239,6 +3295,44 @@
     }
 }
 
+uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
+    // flags to strategy mapping
+    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
+    }
+
+    // usage to strategy mapping
+    switch (attr->usage) {
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return (uint32_t) STRATEGY_MEDIA;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return (uint32_t) STRATEGY_PHONE;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return (uint32_t) STRATEGY_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return (uint32_t) STRATEGY_SONIFICATION;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return (uint32_t) STRATEGY_SONIFICATION_RESPECTFUL;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return (uint32_t) STRATEGY_MEDIA;
+    }
+}
+
 void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
     switch(stream) {
     case AUDIO_STREAM_MUSIC:
@@ -4124,9 +4218,7 @@
     if (stream == AUDIO_STREAM_MUSIC &&
         index != mStreams[stream].mIndexMin &&
         (device == AUDIO_DEVICE_OUT_AUX_DIGITAL ||
-         device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET ||
-         device == AUDIO_DEVICE_OUT_USB_ACCESSORY ||
-         device == AUDIO_DEVICE_OUT_USB_DEVICE)) {
+         device == AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET)) {
         return 1.0;
     }
 
@@ -4500,10 +4592,12 @@
                                                  struct audio_port_config *dstConfig,
                                                  const struct audio_port_config *srcConfig) const
 {
+    ALOG_ASSERT(!isDuplicated(), "toAudioPortConfig() called on duplicated output %d", mIoHandle);
+
     dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
                             AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
     if (srcConfig != NULL) {
-        dstConfig->config_mask &= srcConfig->config_mask;
+        dstConfig->config_mask |= srcConfig->config_mask;
     }
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
 
@@ -4518,6 +4612,7 @@
 void AudioPolicyManager::AudioOutputDescriptor::toAudioPort(
                                                     struct audio_port *port) const
 {
+    ALOG_ASSERT(!isDuplicated(), "toAudioPort() called on duplicated output %d", mIoHandle);
     mProfile->toAudioPort(port);
     port->id = mId;
     toAudioPortConfig(&port->active_config);
@@ -4583,10 +4678,12 @@
                                                    struct audio_port_config *dstConfig,
                                                    const struct audio_port_config *srcConfig) const
 {
+    ALOG_ASSERT(mProfile != 0,
+                "toAudioPortConfig() called on input with null profile %d", mIoHandle);
     dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
                             AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
     if (srcConfig != NULL) {
-        dstConfig->config_mask &= srcConfig->config_mask;
+        dstConfig->config_mask |= srcConfig->config_mask;
     }
 
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
@@ -4602,6 +4699,8 @@
 void AudioPolicyManager::AudioInputDescriptor::toAudioPort(
                                                     struct audio_port *port) const
 {
+    ALOG_ASSERT(mProfile != 0, "toAudioPort() called on input with null profile %d", mIoHandle);
+
     mProfile->toAudioPort(port);
     port->id = mId;
     toAudioPortConfig(&port->active_config);
@@ -4911,7 +5010,7 @@
     }
     port->num_formats = i;
 
-    ALOGV("AudioPort::toAudioPort() num gains %d", mGains.size());
+    ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
 
     for (i = 0; i < mGains.size() && i < AUDIO_PORT_MAX_GAINS; i++) {
         port->gains[i] = mGains[i]->mGain;
@@ -5609,7 +5708,7 @@
 {
     sp<DeviceDescriptor> device;
     for (size_t i = 0; i < size(); i++) {
-        ALOGV("DeviceVector::getDeviceFromId(%d) itemAt(%d)->mId %d", id, i, itemAt(i)->mId);
+        ALOGV("DeviceVector::getDeviceFromId(%d) itemAt(%zu)->mId %d", id, i, itemAt(i)->mId);
         if (itemAt(i)->mId == id) {
             device = itemAt(i);
             break;
@@ -5652,7 +5751,7 @@
 {
     dstConfig->config_mask = AUDIO_PORT_CONFIG_CHANNEL_MASK|AUDIO_PORT_CONFIG_GAIN;
     if (srcConfig != NULL) {
-        dstConfig->config_mask &= srcConfig->config_mask;
+        dstConfig->config_mask |= srcConfig->config_mask;
     }
 
     AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
@@ -5916,4 +6015,46 @@
     mHwModules.add(module);
 }
 
+audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
+{
+    // flags to stream type mapping
+    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        return AUDIO_STREAM_ENFORCED_AUDIBLE;
+    }
+    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
+        return AUDIO_STREAM_BLUETOOTH_SCO;
+    }
+
+    // usage to stream type mapping
+    switch (attr->usage) {
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        return AUDIO_STREAM_MUSIC;
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return AUDIO_STREAM_SYSTEM;
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return AUDIO_STREAM_VOICE_CALL;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return AUDIO_STREAM_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+        return AUDIO_STREAM_ALARM;
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return AUDIO_STREAM_RING;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return AUDIO_STREAM_NOTIFICATION;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return AUDIO_STREAM_MUSIC;
+    }
+}
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index 1abeb6a..c23d994 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -84,6 +84,12 @@
                                             audio_channel_mask_t channelMask,
                                             audio_output_flags_t flags,
                                             const audio_offload_info_t *offloadInfo);
+        virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr,
+                                            uint32_t samplingRate,
+                                            audio_format_t format,
+                                            audio_channel_mask_t channelMask,
+                                            audio_output_flags_t flags,
+                                            const audio_offload_info_t *offloadInfo);
         virtual status_t startOutput(audio_io_handle_t output,
                                      audio_stream_type_t stream,
                                      int session = 0);
@@ -116,6 +122,8 @@
 
         // return the strategy corresponding to a given stream type
         virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
+        // return the strategy corresponding to the given audio attributes
+        virtual uint32_t getStrategyForAttr(const audio_attributes_t *attr);
 
         // return the enabled output devices for the given stream type
         virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
@@ -755,6 +763,17 @@
         uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; }
         // converts device address to string sent to audio HAL via setParameters
         static String8 addressToParameter(audio_devices_t device, const String8 address);
+        // internal method to return the output handle for the given device and format
+        audio_io_handle_t getOutputForDevice(
+                audio_devices_t device,
+                audio_stream_type_t stream,
+                uint32_t samplingRate,
+                audio_format_t format,
+                audio_channel_mask_t channelMask,
+                audio_output_flags_t flags,
+                const audio_offload_info_t *offloadInfo);
+        // internal function to derive a stream type value from audio attributes
+        audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
 };
 
 };
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/AudioPolicyService.h
index 66d9cad..69673cd 100644
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/AudioPolicyService.h
@@ -70,6 +70,12 @@
                                         audio_output_flags_t flags =
                                                 AUDIO_OUTPUT_FLAG_NONE,
                                         const audio_offload_info_t *offloadInfo = NULL);
+    virtual audio_io_handle_t getOutputForAttr(const audio_attributes_t *attr,
+                                            uint32_t samplingRate = 0,
+                                            audio_format_t format = AUDIO_FORMAT_DEFAULT,
+                                            audio_channel_mask_t channelMask = 0,
+                                            audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                            const audio_offload_info_t *offloadInfo = NULL);
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
                                  int session = 0);
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 280478c..648e82c 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -261,35 +261,20 @@
         return ret;
     }
 
-    ssize_t index = -1;
-    {   // Scope for service lock
-        Mutex::Autolock lock(mServiceLock);
-        index = mShimParams.indexOfKey(cameraId);
-        // Release service lock so initializeShimMetadata can be called correctly.
-    }
-
-    if (index < 0) {
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        ret = initializeShimMetadata(cameraId);
-        IPCThreadState::self()->restoreCallingIdentity(token);
-        if (ret != OK) {
-            return ret;
-        }
+    CameraParameters shimParams;
+    if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) {
+        // Error logged by callee
+        return ret;
     }
 
     Vector<Size> sizes;
     Vector<Size> jpegSizes;
     Vector<int32_t> formats;
     const char* supportedPreviewFormats;
-    {   // Scope for service lock
-        Mutex::Autolock lock(mServiceLock);
-        index = mShimParams.indexOfKey(cameraId);
-
-        mShimParams[index].getSupportedPreviewSizes(/*out*/sizes);
-
-        mShimParams[index].getSupportedPreviewFormats(/*out*/formats);
-
-        mShimParams[index].getSupportedPictureSizes(/*out*/jpegSizes);
+    {
+        shimParams.getSupportedPreviewSizes(/*out*/sizes);
+        shimParams.getSupportedPreviewFormats(/*out*/formats);
+        shimParams.getSupportedPictureSizes(/*out*/jpegSizes);
     }
 
     // Always include IMPLEMENTATION_DEFINED
@@ -481,6 +466,7 @@
     int uid = getCallingUid();
     status_t ret = validateConnect(cameraId, uid);
     if (ret != OK) {
+        // Error already logged by callee
         return ret;
     }
 
@@ -503,6 +489,7 @@
                                       client);
 
             if (ret != OK) {
+                // Error already logged by callee
                 return ret;
             }
         }
@@ -524,6 +511,52 @@
     return OK;
 }
 
+status_t CameraService::getLegacyParametersLazy(int cameraId,
+        /*out*/
+        CameraParameters* parameters) {
+
+    ALOGV("%s: for cameraId: %d", __FUNCTION__, cameraId);
+
+    status_t ret = 0;
+
+    if (parameters == NULL) {
+        ALOGE("%s: parameters must not be null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    ssize_t index = -1;
+    {   // Scope for service lock
+        Mutex::Autolock lock(mServiceLock);
+        index = mShimParams.indexOfKey(cameraId);
+        // Release service lock so initializeShimMetadata can be called correctly.
+
+        if (index >= 0) {
+            *parameters = mShimParams[index];
+        }
+    }
+
+    if (index < 0) {
+        int64_t token = IPCThreadState::self()->clearCallingIdentity();
+        ret = initializeShimMetadata(cameraId);
+        IPCThreadState::self()->restoreCallingIdentity(token);
+        if (ret != OK) {
+            // Error already logged by callee
+            return ret;
+        }
+
+        {   // Scope for service lock
+            Mutex::Autolock lock(mServiceLock);
+            index = mShimParams.indexOfKey(cameraId);
+
+            LOG_ALWAYS_FATAL_IF(index < 0, "index should have been initialized");
+
+            *parameters = mShimParams[index];
+        }
+    }
+
+    return OK;
+}
+
 status_t CameraService::validateConnect(int cameraId,
                                     /*inout*/
                                     int& clientUid) const {
@@ -626,7 +659,8 @@
                                       int clientUid,
                                       int callingPid,
                                       /*out*/
-                                      sp<Client>& client) {
+                                      sp<Client>& client,
+                                      int halVersion) {
 
     int facing = -1;
     int deviceVersion = getDeviceVersion(cameraId, &facing);
@@ -639,28 +673,47 @@
                      cameraId);
     }
 
-    switch(deviceVersion) {
-      case CAMERA_DEVICE_API_VERSION_1_0:
-        client = new CameraClient(this, cameraClient,
-                clientPackageName, cameraId,
-                facing, callingPid, clientUid, getpid());
-        break;
-      case CAMERA_DEVICE_API_VERSION_2_0:
-      case CAMERA_DEVICE_API_VERSION_2_1:
-      case CAMERA_DEVICE_API_VERSION_3_0:
-      case CAMERA_DEVICE_API_VERSION_3_1:
-      case CAMERA_DEVICE_API_VERSION_3_2:
-        client = new Camera2Client(this, cameraClient,
-                clientPackageName, cameraId,
-                facing, callingPid, clientUid, getpid(),
-                deviceVersion);
-        break;
-      case -1:
-        ALOGE("Invalid camera id %d", cameraId);
-        return BAD_VALUE;
-      default:
-        ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-        return INVALID_OPERATION;
+    if (halVersion < 0 || halVersion == deviceVersion) {
+        // Default path: HAL version is unspecified by caller, create CameraClient
+        // based on device version reported by the HAL.
+        switch(deviceVersion) {
+          case CAMERA_DEVICE_API_VERSION_1_0:
+            client = new CameraClient(this, cameraClient,
+                    clientPackageName, cameraId,
+                    facing, callingPid, clientUid, getpid());
+            break;
+          case CAMERA_DEVICE_API_VERSION_2_0:
+          case CAMERA_DEVICE_API_VERSION_2_1:
+          case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
+            client = new Camera2Client(this, cameraClient,
+                    clientPackageName, cameraId,
+                    facing, callingPid, clientUid, getpid());
+            break;
+          case -1:
+            ALOGE("Invalid camera id %d", cameraId);
+            return BAD_VALUE;
+          default:
+            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+            return INVALID_OPERATION;
+        }
+    } else {
+        // A particular HAL version is requested by caller. Create CameraClient
+        // based on the requested HAL version.
+        if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 &&
+            halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
+            // Only support higher HAL version device opened as HAL1.0 device.
+            client = new CameraClient(this, cameraClient,
+                    clientPackageName, cameraId,
+                    facing, callingPid, clientUid, getpid());
+        } else {
+            // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
+            ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
+                    " opened as HAL %x device", halVersion, deviceVersion,
+                    CAMERA_DEVICE_API_VERSION_1_0);
+            return INVALID_OPERATION;
+        }
     }
 
     status_t status = connectFinishUnsafe(client, client->getRemote());
@@ -729,6 +782,70 @@
     return OK;
 }
 
+status_t CameraService::connectLegacy(
+        const sp<ICameraClient>& cameraClient,
+        int cameraId, int halVersion,
+        const String16& clientPackageName,
+        int clientUid,
+        /*out*/
+        sp<ICamera>& device) {
+
+    if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
+            mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_3) {
+        /*
+         * Either the HAL version is unspecified in which case this just creates
+         * a camera client selected by the latest device version, or
+         * it's a particular version in which case the HAL must supported
+         * the open_legacy call
+         */
+        ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!",
+                __FUNCTION__, mModule->common.module_api_version);
+        return INVALID_OPERATION;
+    }
+
+    String8 clientName8(clientPackageName);
+    int callingPid = getCallingPid();
+
+    LOG1("CameraService::connect legacy E (pid %d \"%s\", id %d)", callingPid,
+            clientName8.string(), cameraId);
+
+    status_t status = validateConnect(cameraId, /*inout*/clientUid);
+    if (status != OK) {
+        return status;
+    }
+
+    sp<Client> client;
+    {
+        Mutex::Autolock lock(mServiceLock);
+        sp<BasicClient> clientTmp;
+        if (!canConnectUnsafe(cameraId, clientPackageName,
+                              cameraClient->asBinder(),
+                              /*out*/clientTmp)) {
+            return -EBUSY;
+        } else if (client.get() != NULL) {
+            device = static_cast<Client*>(clientTmp.get());
+            return OK;
+        }
+
+        status = connectHelperLocked(cameraClient,
+                                     cameraId,
+                                     clientPackageName,
+                                     clientUid,
+                                     callingPid,
+                                     client,
+                                     halVersion);
+        if (status != OK) {
+            return status;
+        }
+
+    }
+    // important: release the mutex here so the client can call back
+    //    into the service from its destructor (can be at the end of the call)
+
+    device = client;
+    return OK;
+}
+
 status_t CameraService::connectFinishUnsafe(const sp<BasicClient>& client,
                                             const sp<IBinder>& remoteCallback) {
     status_t status = client->initialize(mModule);
@@ -791,8 +908,8 @@
           case CAMERA_DEVICE_API_VERSION_3_0:
           case CAMERA_DEVICE_API_VERSION_3_1:
           case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new ProCamera2Client(this, cameraCb, String16(),
-                    cameraId, facing, callingPid, USE_CALLING_UID, getpid());
+            client = new ProCamera2Client(this, cameraCb, clientPackageName,
+                    cameraId, facing, callingPid, clientUid, getpid());
             break;
           case -1:
             ALOGE("Invalid camera id %d", cameraId);
@@ -871,8 +988,8 @@
           case CAMERA_DEVICE_API_VERSION_3_0:
           case CAMERA_DEVICE_API_VERSION_3_1:
           case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new CameraDeviceClient(this, cameraCb, String16(),
-                    cameraId, facing, callingPid, USE_CALLING_UID, getpid());
+            client = new CameraDeviceClient(this, cameraCb, clientPackageName,
+                    cameraId, facing, callingPid, clientUid, getpid());
             break;
           case -1:
             ALOGE("Invalid camera id %d", cameraId);
@@ -961,6 +1078,78 @@
     return BAD_VALUE;
 }
 
+status_t CameraService::getLegacyParameters(
+            int cameraId,
+            /*out*/
+            String16* parameters) {
+    ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
+
+    if (parameters == NULL) {
+        ALOGE("%s: parameters must not be null", __FUNCTION__);
+        return BAD_VALUE;
+    }
+
+    status_t ret = 0;
+
+    CameraParameters shimParams;
+    if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) {
+        // Error logged by caller
+        return ret;
+    }
+
+    String8 shimParamsString8 = shimParams.flatten();
+    String16 shimParamsString16 = String16(shimParamsString8);
+
+    *parameters = shimParamsString16;
+
+    return OK;
+}
+
+status_t CameraService::supportsCameraApi(int cameraId, int apiVersion) {
+    ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
+
+    switch (apiVersion) {
+        case API_VERSION_1:
+        case API_VERSION_2:
+            break;
+        default:
+            ALOGE("%s: Bad API version %d", __FUNCTION__, apiVersion);
+            return BAD_VALUE;
+    }
+
+    int facing = -1;
+    int deviceVersion = getDeviceVersion(cameraId, &facing);
+
+    switch(deviceVersion) {
+      case CAMERA_DEVICE_API_VERSION_1_0:
+      case CAMERA_DEVICE_API_VERSION_2_0:
+      case CAMERA_DEVICE_API_VERSION_2_1:
+      case CAMERA_DEVICE_API_VERSION_3_0:
+      case CAMERA_DEVICE_API_VERSION_3_1:
+        if (apiVersion == API_VERSION_2) {
+            ALOGV("%s: Camera id %d uses HAL prior to HAL3.2, doesn't support api2 without shim",
+                    __FUNCTION__, cameraId);
+            return -EOPNOTSUPP;
+        } else { // if (apiVersion == API_VERSION_1) {
+            ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported",
+                    __FUNCTION__, cameraId);
+            return OK;
+        }
+      case CAMERA_DEVICE_API_VERSION_3_2:
+        ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
+                __FUNCTION__, cameraId);
+        return OK;
+      case -1:
+        ALOGE("%s: Invalid camera id %d", __FUNCTION__, cameraId);
+        return BAD_VALUE;
+      default:
+        ALOGE("%s: Unknown camera device HAL version: %d", __FUNCTION__, deviceVersion);
+        return INVALID_OPERATION;
+    }
+
+    return OK;
+}
+
 void CameraService::removeClientByRemote(const wp<IBinder>& remoteBinder) {
     int callingPid = getCallingPid();
     LOG1("CameraService::removeClientByRemote E (pid %d)", callingPid);
@@ -1090,6 +1279,8 @@
     switch (code) {
         case BnCameraService::CONNECT:
         case BnCameraService::CONNECT_PRO:
+        case BnCameraService::CONNECT_DEVICE:
+        case BnCameraService::CONNECT_LEGACY:
             const int pid = getCallingPid();
             const int self_pid = getpid();
             if (pid != self_pid) {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index ee39d52..28590eb 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -83,6 +83,11 @@
             /*out*/
             sp<ICamera>& device);
 
+    virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
+            int halVersion, const String16& clientPackageName, int clientUid,
+            /*out*/
+            sp<ICamera>& device);
+
     virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
             int cameraId, const String16& clientPackageName, int clientUid,
             /*out*/
@@ -100,6 +105,15 @@
     virtual status_t    removeListener(
                                     const sp<ICameraServiceListener>& listener);
 
+    virtual status_t    getLegacyParameters(
+            int cameraId,
+            /*out*/
+            String16* parameters);
+
+    // OK = supports api of that version, -EOPNOTSUPP = does not support
+    virtual status_t    supportsCameraApi(
+            int cameraId, int apiVersion);
+
     // Extra permissions checks
     virtual status_t    onTransact(uint32_t code, const Parcel& data,
                                    Parcel* reply, uint32_t flags);
@@ -414,6 +428,14 @@
     status_t            initializeShimMetadata(int cameraId);
 
     /**
+     * Get the cached CameraParameters for the camera. If they haven't been
+     * cached yet, then initialize them for the first time.
+     *
+     * Returns OK on success, or a negative error code.
+     */
+    status_t            getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
+
+    /**
      * Generate the CameraCharacteristics metadata required by the Camera2 API
      * from the available HAL1 CameraParameters and CameraInfo.
      *
@@ -433,7 +455,8 @@
                                       int clientUid,
                                       int callingPid,
                                       /*out*/
-                                      sp<Client>& client);
+                                      sp<Client>& client,
+                                      int halVersion = CAMERA_HAL_API_VERSION_UNSPECIFIED);
 };
 
 } // namespace android
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 0447979..1642896 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -53,12 +53,10 @@
         int cameraFacing,
         int clientPid,
         uid_t clientUid,
-        int servicePid,
-        int deviceVersion):
+        int servicePid):
         Camera2ClientBase(cameraService, cameraClient, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid),
-        mParameters(cameraId, cameraFacing),
-        mDeviceVersion(deviceVersion)
+        mParameters(cameraId, cameraFacing)
 {
     ATRACE_CALL();
 
@@ -80,7 +78,7 @@
     {
         SharedParameters::Lock l(mParameters);
 
-        res = l.mParameters.initialize(&(mDevice->info()));
+        res = l.mParameters.initialize(&(mDevice->info()), mDeviceVersion);
         if (res != OK) {
             ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
                     __FUNCTION__, mCameraId, strerror(-res), res);
@@ -755,6 +753,7 @@
     // ever take a picture.
     // TODO: Find a better compromise, though this likely would involve HAL
     // changes.
+    int lastJpegStreamId = mJpegProcessor->getStreamId();
     res = updateProcessorStream(mJpegProcessor, params);
     if (res != OK) {
         ALOGE("%s: Camera %d: Can't pre-configure still image "
@@ -762,6 +761,7 @@
                 __FUNCTION__, mCameraId, strerror(-res), res);
         return res;
     }
+    bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
 
     Vector<int32_t> outputStreams;
     bool callbacksEnabled = (params.previewCallbackFlags &
@@ -817,6 +817,12 @@
                     __FUNCTION__, mCameraId, strerror(-res), res);
             return res;
         }
+
+        if (jpegStreamChanged) {
+            ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed",
+                    __FUNCTION__, mCameraId);
+            mZslProcessor->clearZslQueue();
+        }
         outputStreams.push(getZslStreamId());
     } else {
         mZslProcessor->deleteStream();
@@ -1270,6 +1276,7 @@
 
         ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
 
+        int lastJpegStreamId = mJpegProcessor->getStreamId();
         res = updateProcessorStream(mJpegProcessor, l.mParameters);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't set up still image stream: %s (%d)",
@@ -1277,6 +1284,14 @@
             return res;
         }
         takePictureCounter = ++l.mParameters.takePictureCounter;
+
+        // Clear ZSL buffer queue when Jpeg size is changed.
+        bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
+        if (l.mParameters.zslMode && jpegStreamChanged) {
+            ALOGV("%s: Camera %d: Clear ZSL buffer queue when Jpeg size is changed",
+                    __FUNCTION__, mCameraId);
+            mZslProcessor->clearZslQueue();
+        }
     }
 
     ATRACE_ASYNC_BEGIN(kTakepictureLabel, takePictureCounter);
@@ -1659,8 +1674,8 @@
 }
 
 status_t Camera2Client::registerFrameListener(int32_t minId, int32_t maxId,
-        wp<camera2::FrameProcessor::FilteredListener> listener) {
-    return mFrameProcessor->registerListener(minId, maxId, listener);
+        wp<camera2::FrameProcessor::FilteredListener> listener, bool sendPartials) {
+    return mFrameProcessor->registerListener(minId, maxId, listener, sendPartials);
 }
 
 status_t Camera2Client::removeFrameListener(int32_t minId, int32_t maxId,
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index fe0bf74..5ce757a 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -89,8 +89,7 @@
             int cameraFacing,
             int clientPid,
             uid_t clientUid,
-            int servicePid,
-            int deviceVersion);
+            int servicePid);
 
     virtual ~Camera2Client();
 
@@ -118,7 +117,8 @@
     int getZslStreamId() const;
 
     status_t registerFrameListener(int32_t minId, int32_t maxId,
-            wp<camera2::FrameProcessor::FilteredListener> listener);
+            wp<camera2::FrameProcessor::FilteredListener> listener,
+            bool sendPartials = true);
     status_t removeFrameListener(int32_t minId, int32_t maxId,
             wp<camera2::FrameProcessor::FilteredListener> listener);
 
@@ -170,7 +170,6 @@
 
     void     setPreviewCallbackFlagL(Parameters &params, int flag);
     status_t updateRequests(Parameters &params);
-    int mDeviceVersion;
 
     // Used with stream IDs
     static const int NO_STREAM = -1;
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 30b7bb8..517226d 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -79,7 +79,7 @@
         ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
         mHardware.clear();
-        return NO_INIT;
+        return res;
     }
 
     mHardware->setCallbacks(notifyCallback,
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 8268f65..cb9aca6 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -350,8 +350,10 @@
         return DONE;
     }
 
+    // We don't want to get partial results for ZSL capture.
     client->registerFrameListener(mCaptureId, mCaptureId + 1,
-            this);
+            this,
+            /*sendPartials*/false);
 
     // TODO: Actually select the right thing here.
     res = processor->pushToReprocess(mCaptureId);
@@ -393,8 +395,14 @@
 
     bool isAeConverged = false;
     // Get the onFrameAvailable callback when the requestID == mCaptureId
+    // We don't want to get partial results for normal capture, as we need
+    // Get ANDROID_SENSOR_TIMESTAMP from the capture result, but partial
+    // result doesn't have to have this metadata available.
+    // TODO: Update to use the HALv3 shutter notification for remove the
+    // need for this listener and make it faster. see bug 12530628.
     client->registerFrameListener(mCaptureId, mCaptureId + 1,
-            this);
+            this,
+            /*sendPartials*/false);
 
     {
         Mutex::Autolock l(mInputMutex);
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 69bea24..3de5d90 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -78,7 +78,7 @@
     }
 
     if (mSynthesize3ANotify) {
-        process3aState(frame.mMetadata, client);
+        process3aState(frame, client);
     }
 
     return FrameProcessorBase::processSingleFrame(frame, device);
@@ -212,14 +212,15 @@
     return OK;
 }
 
-status_t FrameProcessor::process3aState(const CameraMetadata &frame,
+status_t FrameProcessor::process3aState(const CaptureResult &frame,
         const sp<Camera2Client> &client) {
 
     ATRACE_CALL();
+    const CameraMetadata &metadata = frame.mMetadata;
     camera_metadata_ro_entry_t entry;
     int cameraId = client->getCameraId();
 
-    entry = frame.find(ANDROID_REQUEST_FRAME_COUNT);
+    entry = metadata.find(ANDROID_REQUEST_FRAME_COUNT);
     int32_t frameNumber = entry.data.i32[0];
 
     // Don't send 3A notifications for the same frame number twice
@@ -238,26 +239,31 @@
 
     // TODO: Also use AE mode, AE trigger ID
 
-    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_MODE,
+    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
             &new3aState.afMode, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_MODE,
+    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
             &new3aState.awbMode, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AE_STATE,
+    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
             &new3aState.aeState, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AF_STATE,
+    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
             &new3aState.afState, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<uint8_t>(frame, ANDROID_CONTROL_AWB_STATE,
+    gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
             &new3aState.awbState, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AF_TRIGGER_ID,
-            &new3aState.afTriggerId, frameNumber, cameraId);
+    if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
+        new3aState.afTriggerId = frame.mResultExtras.afTriggerId;
+        new3aState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
+    } else {
+        gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AF_TRIGGER_ID,
+                 &new3aState.afTriggerId, frameNumber, cameraId);
 
-    gotAllStates &= get3aResult<int32_t>(frame, ANDROID_CONTROL_AE_PRECAPTURE_ID,
-            &new3aState.aeTriggerId, frameNumber, cameraId);
+        gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
+                 &new3aState.aeTriggerId, frameNumber, cameraId);
+    }
 
     if (!gotAllStates) return BAD_VALUE;
 
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 514bd1a..4afca50 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -58,7 +58,7 @@
             const sp<Camera2Client> &client);
 
     // Send 3A state change notifications to client based on frame metadata
-    status_t process3aState(const CameraMetadata &frame,
+    status_t process3aState(const CaptureResult &frame,
             const sp<Camera2Client> &client);
 
     // Helper for process3aState
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 51b1980..6459300 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -29,6 +29,7 @@
 
 #include "Parameters.h"
 #include "system/camera.h"
+#include "hardware/camera_common.h"
 #include <media/MediaProfiles.h>
 #include <media/mediarecorder.h>
 
@@ -45,7 +46,7 @@
 Parameters::~Parameters() {
 }
 
-status_t Parameters::initialize(const CameraMetadata *info) {
+status_t Parameters::initialize(const CameraMetadata *info, int deviceVersion) {
     status_t res;
 
     if (info->entryCount() == 0) {
@@ -53,6 +54,7 @@
         return BAD_VALUE;
     }
     Parameters::info = info;
+    mDeviceVersion = deviceVersion;
 
     res = buildFastInfo();
     if (res != OK) return res;
@@ -140,16 +142,14 @@
     previewTransform = degToTransform(0,
             cameraFacing == CAMERA_FACING_FRONT);
 
-    camera_metadata_ro_entry_t availableFormats =
-        staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
-
     {
         String8 supportedPreviewFormats;
+        SortedVector<int32_t> outputFormats = getAvailableOutputFormats();
         bool addComma = false;
-        for (size_t i=0; i < availableFormats.count; i++) {
+        for (size_t i=0; i < outputFormats.size(); i++) {
             if (addComma) supportedPreviewFormats += ",";
             addComma = true;
-            switch (availableFormats.data.i32[i]) {
+            switch (outputFormats[i]) {
             case HAL_PIXEL_FORMAT_YCbCr_422_SP:
                 supportedPreviewFormats +=
                     CameraParameters::PIXEL_FORMAT_YUV422SP;
@@ -191,7 +191,7 @@
 
             default:
                 ALOGW("%s: Camera %d: Unknown preview format: %x",
-                        __FUNCTION__, cameraId, availableFormats.data.i32[i]);
+                        __FUNCTION__, cameraId, outputFormats[i]);
                 addComma = false;
                 break;
             }
@@ -239,24 +239,23 @@
                 supportedPreviewFrameRates);
     }
 
-    camera_metadata_ro_entry_t availableJpegSizes =
-        staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, 2);
-    if (!availableJpegSizes.count) return NO_INIT;
+    Vector<Size> availableJpegSizes = getAvailableJpegSizes();
+    if (!availableJpegSizes.size()) return NO_INIT;
 
     // TODO: Pick maximum
-    pictureWidth = availableJpegSizes.data.i32[0];
-    pictureHeight = availableJpegSizes.data.i32[1];
+    pictureWidth = availableJpegSizes[0].width;
+    pictureHeight = availableJpegSizes[0].height;
 
     params.setPictureSize(pictureWidth,
             pictureHeight);
 
     {
         String8 supportedPictureSizes;
-        for (size_t i=0; i < availableJpegSizes.count; i += 2) {
+        for (size_t i=0; i < availableJpegSizes.size(); i++) {
             if (i != 0) supportedPictureSizes += ",";
             supportedPictureSizes += String8::format("%dx%d",
-                    availableJpegSizes.data.i32[i],
-                    availableJpegSizes.data.i32[i+1]);
+                    availableJpegSizes[i].width,
+                    availableJpegSizes[i].height);
         }
         params.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
                 supportedPictureSizes);
@@ -952,9 +951,8 @@
         staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
     if (!availableFocalLengths.count) return NO_INIT;
 
-    camera_metadata_ro_entry_t availableFormats =
-        staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
-    if (!availableFormats.count) return NO_INIT;
+    SortedVector<int32_t> availableFormats = getAvailableOutputFormats();
+    if (!availableFormats.size()) return NO_INIT;
 
 
     if (sceneModeOverrides.count > 0) {
@@ -1038,8 +1036,8 @@
 
     // Check if the HAL supports HAL_PIXEL_FORMAT_YCbCr_420_888
     fastInfo.useFlexibleYuv = false;
-    for (size_t i = 0; i < availableFormats.count; i++) {
-        if (availableFormats.data.i32[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+    for (size_t i = 0; i < availableFormats.size(); i++) {
+        if (availableFormats[i] == HAL_PIXEL_FORMAT_YCbCr_420_888) {
             fastInfo.useFlexibleYuv = true;
             break;
         }
@@ -1198,8 +1196,7 @@
                     "is active!", __FUNCTION__);
             return BAD_VALUE;
         }
-        camera_metadata_ro_entry_t availableFormats =
-            staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+        SortedVector<int32_t> availableFormats = getAvailableOutputFormats();
         // If using flexible YUV, always support NV21/YV12. Otherwise, check
         // HAL's list.
         if (! (fastInfo.useFlexibleYuv &&
@@ -1208,11 +1205,10 @@
                  validatedParams.previewFormat ==
                         HAL_PIXEL_FORMAT_YV12) ) ) {
             // Not using flexible YUV format, so check explicitly
-            for (i = 0; i < availableFormats.count; i++) {
-                if (availableFormats.data.i32[i] ==
-                        validatedParams.previewFormat) break;
+            for (i = 0; i < availableFormats.size(); i++) {
+                if (availableFormats[i] == validatedParams.previewFormat) break;
             }
-            if (i == availableFormats.count) {
+            if (i == availableFormats.size()) {
                 ALOGE("%s: Requested preview format %s (0x%x) is not supported",
                         __FUNCTION__, newParams.getPreviewFormat(),
                         validatedParams.previewFormat);
@@ -1302,15 +1298,14 @@
             &validatedParams.pictureHeight);
     if (validatedParams.pictureWidth == pictureWidth ||
             validatedParams.pictureHeight == pictureHeight) {
-        camera_metadata_ro_entry_t availablePictureSizes =
-            staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
-        for (i = 0; i < availablePictureSizes.count; i+=2) {
-            if ((availablePictureSizes.data.i32[i] ==
+        Vector<Size> availablePictureSizes = getAvailableJpegSizes();
+        for (i = 0; i < availablePictureSizes.size(); i++) {
+            if ((availablePictureSizes[i].width ==
                     validatedParams.pictureWidth) &&
-                (availablePictureSizes.data.i32[i+1] ==
+                (availablePictureSizes[i].height ==
                     validatedParams.pictureHeight)) break;
         }
-        if (i == availablePictureSizes.count) {
+        if (i == availablePictureSizes.size()) {
             ALOGE("%s: Requested picture size %d x %d is not supported",
                     __FUNCTION__, validatedParams.pictureWidth,
                     validatedParams.pictureHeight);
@@ -2527,22 +2522,37 @@
         ALOGE("%s: Input size is null", __FUNCTION__);
         return BAD_VALUE;
     }
+    sizes->clear();
 
-    const size_t SIZE_COUNT = sizeof(Size) / sizeof(int);
-    camera_metadata_ro_entry_t availableProcessedSizes =
-        staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT);
-    if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE;
-
-    Size filteredSize;
-    for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) {
-        filteredSize.width = availableProcessedSizes.data.i32[i];
-        filteredSize.height = availableProcessedSizes.data.i32[i+1];
-            // Need skip the preview sizes that are too large.
-            if (filteredSize.width <= limit.width &&
-                    filteredSize.height <= limit.height) {
-                sizes->push(filteredSize);
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+        Vector<StreamConfiguration> scs = getStreamConfigurations();
+        for (size_t i=0; i < scs.size(); i++) {
+            const StreamConfiguration &sc = scs[i];
+            if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+                    sc.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+                    sc.width <= limit.width && sc.height <= limit.height) {
+                Size sz = {sc.width, sc.height};
+                sizes->push(sz);
             }
+        }
+    } else {
+        const size_t SIZE_COUNT = sizeof(Size) / sizeof(int);
+        camera_metadata_ro_entry_t availableProcessedSizes =
+            staticInfo(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, SIZE_COUNT);
+        if (availableProcessedSizes.count < SIZE_COUNT) return BAD_VALUE;
+
+        Size filteredSize;
+        for (size_t i = 0; i < availableProcessedSizes.count; i += SIZE_COUNT) {
+            filteredSize.width = availableProcessedSizes.data.i32[i];
+            filteredSize.height = availableProcessedSizes.data.i32[i+1];
+                // Need skip the preview sizes that are too large.
+                if (filteredSize.width <= limit.width &&
+                        filteredSize.height <= limit.height) {
+                    sizes->push(filteredSize);
+                }
+        }
     }
+
     if (sizes->isEmpty()) {
         ALOGE("generated preview size list is empty!!");
         return BAD_VALUE;
@@ -2576,6 +2586,78 @@
     return maxSize;
 }
 
+Vector<Parameters::StreamConfiguration> Parameters::getStreamConfigurations() {
+    const int STREAM_CONFIGURATION_SIZE = 4;
+    const int STREAM_FORMAT_OFFSET = 0;
+    const int STREAM_WIDTH_OFFSET = 1;
+    const int STREAM_HEIGHT_OFFSET = 2;
+    const int STREAM_IS_INPUT_OFFSET = 3;
+    Vector<StreamConfiguration> scs;
+    if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
+        ALOGE("StreamConfiguration is only valid after device HAL 3.2!");
+        return scs;
+    }
+
+    camera_metadata_ro_entry_t availableStreamConfigs =
+                staticInfo(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+    for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+        int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+        int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+        int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+        int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+        StreamConfiguration sc = {format, width, height, isInput};
+        scs.add(sc);
+    }
+    return scs;
+}
+
+SortedVector<int32_t> Parameters::getAvailableOutputFormats() {
+    SortedVector<int32_t> outputFormats; // Non-duplicated output formats
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+        Vector<StreamConfiguration> scs = getStreamConfigurations();
+        for (size_t i=0; i < scs.size(); i++) {
+            const StreamConfiguration &sc = scs[i];
+            if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) {
+                outputFormats.add(sc.format);
+            }
+        }
+    } else {
+        camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
+        for (size_t i=0; i < availableFormats.count; i++) {
+            outputFormats.add(availableFormats.data.i32[i]);
+        }
+    }
+    return outputFormats;
+}
+
+Vector<Parameters::Size> Parameters::getAvailableJpegSizes() {
+    Vector<Parameters::Size> jpegSizes;
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+        Vector<StreamConfiguration> scs = getStreamConfigurations();
+        for (size_t i=0; i < scs.size(); i++) {
+            const StreamConfiguration &sc = scs[i];
+            if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
+                    sc.format == HAL_PIXEL_FORMAT_BLOB) {
+                Size sz = {sc.width, sc.height};
+                jpegSizes.add(sz);
+            }
+        }
+    } else {
+        const int JPEG_SIZE_ENTRY_COUNT = 2;
+        const int WIDTH_OFFSET = 0;
+        const int HEIGHT_OFFSET = 1;
+        camera_metadata_ro_entry_t availableJpegSizes =
+            staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
+        for (size_t i=0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) {
+            int width = availableJpegSizes.data.i32[i + WIDTH_OFFSET];
+            int height = availableJpegSizes.data.i32[i + HEIGHT_OFFSET];
+            Size sz = {width, height};
+            jpegSizes.add(sz);
+        }
+    }
+    return jpegSizes;
+}
+
 Parameters::CropRegion Parameters::calculateCropRegion(
                             Parameters::CropRegion::Outputs outputs) const {
 
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 28dd788..f95c69a 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -226,7 +226,7 @@
     ~Parameters();
 
     // Sets up default parameters
-    status_t initialize(const CameraMetadata *info);
+    status_t initialize(const CameraMetadata *info, int deviceVersion);
 
     // Build fast-access device static info from static info
     status_t buildFastInfo();
@@ -346,6 +346,24 @@
     status_t getFilteredSizes(Size limit, Vector<Size> *sizes);
     // Get max size (from the size array) that matches the given aspect ratio.
     Size getMaxSizeForRatio(float ratio, const int32_t* sizeArray, size_t count);
+
+    struct StreamConfiguration {
+        int32_t format;
+        int32_t width;
+        int32_t height;
+        int32_t isInput;
+    };
+    // Helper function extract available stream configuration
+    // Only valid since device HAL version 3.2
+    // returns an empty Vector if device HAL version does support it
+    Vector<StreamConfiguration> getStreamConfigurations();
+
+    // Helper function to get non-duplicated available output formats
+    SortedVector<int32_t> getAvailableOutputFormats();
+    // Helper function to get available output jpeg sizes
+    Vector<Size> getAvailableJpegSizes();
+
+    int mDeviceVersion;
 };
 
 // This class encapsulates the Parameters class so that it can only be accessed
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 2064e2c..99abced 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -430,10 +430,13 @@
 
     Mutex::Autolock m(mMutex);
 
-    // If a recording stream is being started up, free up any
-    // outstanding buffers left from the previous recording session.
-    // There should never be any, so if there are, warn about it.
-    if (isStreamActive(outputStreams, mRecordingStreamId)) {
+    // If a recording stream is being started up and no recording
+    // stream is active yet, free up any outstanding buffers left
+    // from the previous recording session. There should never be
+    // any, so if there are, warn about it.
+    bool isRecordingStreamIdle = !isStreamActive(mActiveStreamIds, mRecordingStreamId);
+    bool startRecordingStream = isStreamActive(outputStreams, mRecordingStreamId);
+    if (startRecordingStream && isRecordingStreamIdle) {
         releaseAllRecordingFramesLocked();
     }
 
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 2a2a5af..10463c1 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -202,7 +202,8 @@
     }
     client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
             Camera2Client::kPreviewRequestIdEnd,
-            this);
+            this,
+            /*sendPartials*/false);
 
     return OK;
 }
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 1dcb718..79ea2c3 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -148,7 +148,8 @@
     }
     client->registerFrameListener(Camera2Client::kPreviewRequestIdStart,
             Camera2Client::kPreviewRequestIdEnd,
-            this);
+            this,
+            /*sendPartials*/false);
 
     return OK;
 }
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 8154e59..de42cee 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -82,7 +82,7 @@
     mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
                                       FRAME_PROCESSOR_LISTENER_MAX_ID,
                                       /*listener*/this,
-                                      /*quirkSendPartials*/true);
+                                      /*sendPartials*/true);
 
     return OK;
 }
@@ -102,7 +102,7 @@
 status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests,
                                                bool streaming, int64_t* lastFrameNumber) {
     ATRACE_CALL();
-    ALOGV("%s-start of function. Request list size %d", __FUNCTION__, requests.size());
+    ALOGV("%s-start of function. Request list size %zu", __FUNCTION__, requests.size());
 
     status_t res;
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -177,7 +177,7 @@
 
         metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
         loopCounter++; // loopCounter starts from 1
-        ALOGV("%s: Camera %d: Creating request with ID %d (%d of %d)",
+        ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)",
               __FUNCTION__, mCameraId, requestId, loopCounter, requests.size());
 
         metadataRequestList.push_back(metadata);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 19efd30..13c9f48 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -54,7 +54,8 @@
         int servicePid):
         TClientBase(cameraService, remoteCallback, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid),
-        mSharedCameraCallbacks(remoteCallback)
+        mSharedCameraCallbacks(remoteCallback),
+        mDeviceVersion(cameraService->getDeviceVersion(cameraId))
 {
     ALOGI("Camera %d: Opened", cameraId);
 
@@ -280,6 +281,11 @@
 }
 
 template <typename TClientBase>
+int Camera2ClientBase<TClientBase>::getCameraDeviceVersion() const {
+    return mDeviceVersion;
+}
+
+template <typename TClientBase>
 const sp<CameraDeviceBase>& Camera2ClientBase<TClientBase>::getCameraDevice() {
     return mDevice;
 }
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 9feca93..f57d204 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -76,6 +76,7 @@
     int                   getCameraId() const;
     const sp<CameraDeviceBase>&
                           getCameraDevice();
+    int                   getCameraDeviceVersion() const;
     const sp<CameraService>&
                           getCameraService();
 
@@ -122,6 +123,7 @@
 
     /** CameraDeviceBase instance wrapping HAL2+ entry */
 
+    const int mDeviceVersion;
     sp<CameraDeviceBase>  mDevice;
 
     /** Utility members */
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.cpp b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
index f6a971a..482f687 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.cpp
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.cpp
@@ -37,11 +37,23 @@
 }
 
 status_t FrameProcessorBase::registerListener(int32_t minId,
-        int32_t maxId, wp<FilteredListener> listener, bool quirkSendPartials) {
+        int32_t maxId, wp<FilteredListener> listener, bool sendPartials) {
     Mutex::Autolock l(mInputMutex);
+    List<RangeListener>::iterator item = mRangeListeners.begin();
+    while (item != mRangeListeners.end()) {
+        if (item->minId == minId &&
+                item->maxId == maxId &&
+                item->listener == listener) {
+            // already registered, just return
+            ALOGV("%s: Attempt to register the same client twice, ignoring",
+                    __FUNCTION__);
+            return OK;
+        }
+        item++;
+    }
     ALOGV("%s: Registering listener for frame id range %d - %d",
             __FUNCTION__, minId, maxId);
-    RangeListener rListener = { minId, maxId, listener, quirkSendPartials };
+    RangeListener rListener = { minId, maxId, listener, sendPartials };
     mRangeListeners.push_back(rListener);
     return OK;
 }
@@ -176,7 +188,7 @@
         List<RangeListener>::iterator item = mRangeListeners.begin();
         while (item != mRangeListeners.end()) {
             if (requestId >= item->minId && requestId < item->maxId &&
-                    (!quirkIsPartial || item->quirkSendPartials)) {
+                    (!quirkIsPartial || item->sendPartials)) {
                 sp<FilteredListener> listener = item->listener.promote();
                 if (listener == 0) {
                     item = mRangeListeners.erase(item);
diff --git a/services/camera/libcameraservice/common/FrameProcessorBase.h b/services/camera/libcameraservice/common/FrameProcessorBase.h
index 15a014e..3649c45 100644
--- a/services/camera/libcameraservice/common/FrameProcessorBase.h
+++ b/services/camera/libcameraservice/common/FrameProcessorBase.h
@@ -44,11 +44,12 @@
     };
 
     // Register a listener for a range of IDs [minId, maxId). Multiple listeners
-    // can be listening to the same range.
-    // QUIRK: sendPartials controls whether partial results will be sent.
+    // can be listening to the same range. Registering the same listener with
+    // the same range of IDs has no effect.
+    // sendPartials controls whether partial results will be sent.
     status_t registerListener(int32_t minId, int32_t maxId,
                               wp<FilteredListener> listener,
-                              bool quirkSendPartials = true);
+                              bool sendPartials = true);
     status_t removeListener(int32_t minId, int32_t maxId,
                             wp<FilteredListener> listener);
 
@@ -66,7 +67,7 @@
         int32_t minId;
         int32_t maxId;
         wp<FilteredListener> listener;
-        bool quirkSendPartials;
+        bool sendPartials;
     };
     List<RangeListener> mRangeListeners;
 
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 87b2807..925b645 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -92,8 +92,22 @@
     status_t initialize(hw_module_t *module)
     {
         ALOGI("Opening camera %s", mName.string());
-        int rc = module->methods->open(module, mName.string(),
-                                       (hw_device_t **)&mDevice);
+        camera_module_t *cameraModule = reinterpret_cast<camera_module_t *>(module);
+        camera_info info;
+        status_t res = cameraModule->get_camera_info(atoi(mName.string()), &info);
+        if (res != OK) return res;
+
+        int rc = OK;
+        if (module->module_api_version >= CAMERA_MODULE_API_VERSION_2_3 &&
+            info.device_version > CAMERA_DEVICE_API_VERSION_1_0) {
+            // Open higher version camera device as HAL1.0 device.
+            rc = cameraModule->open_legacy(module, mName.string(),
+                                               CAMERA_DEVICE_API_VERSION_1_0,
+                                               (hw_device_t **)&mDevice);
+        } else {
+            rc = module->methods->open(module, mName.string(),
+                                           (hw_device_t **)&mDevice);
+        }
         if (rc != OK) {
             ALOGE("Could not open camera %s: %d", mName.string(), rc);
             return rc;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 16d6f42..5973625 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -113,7 +113,6 @@
     }
 
     /** Cross-check device version */
-
     if (device->common.version < CAMERA_DEVICE_API_VERSION_3_0) {
         SET_ERR_L("Could not open camera: "
                 "Camera device should be at least %x, reports %x instead",
@@ -173,6 +172,7 @@
 
     /** Everything is good to go */
 
+    mDeviceVersion = device->common.version;
     mDeviceInfo = info.static_camera_characteristics;
     mHal3Device = device;
     mStatus = STATUS_UNCONFIGURED;
@@ -284,42 +284,74 @@
     return gotLock;
 }
 
+Camera3Device::Size Camera3Device::getMaxJpegResolution() const {
+    int32_t maxJpegWidth = 0, maxJpegHeight = 0;
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+        const int STREAM_CONFIGURATION_SIZE = 4;
+        const int STREAM_FORMAT_OFFSET = 0;
+        const int STREAM_WIDTH_OFFSET = 1;
+        const int STREAM_HEIGHT_OFFSET = 2;
+        const int STREAM_IS_INPUT_OFFSET = 3;
+        camera_metadata_ro_entry_t availableStreamConfigs =
+                mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+        if (availableStreamConfigs.count == 0 ||
+                availableStreamConfigs.count % STREAM_CONFIGURATION_SIZE != 0) {
+            return Size(0, 0);
+        }
+
+        // Get max jpeg size (area-wise).
+        for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+            int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+            int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+            int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+            int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+            if (isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
+                    && format == HAL_PIXEL_FORMAT_BLOB &&
+                    (width * height > maxJpegWidth * maxJpegHeight)) {
+                maxJpegWidth = width;
+                maxJpegHeight = height;
+            }
+        }
+    } else {
+        camera_metadata_ro_entry availableJpegSizes =
+                mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
+        if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) {
+            return Size(0, 0);
+        }
+
+        // Get max jpeg size (area-wise).
+        for (size_t i = 0; i < availableJpegSizes.count; i += 2) {
+            if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1])
+                    > (maxJpegWidth * maxJpegHeight)) {
+                maxJpegWidth = availableJpegSizes.data.i32[i];
+                maxJpegHeight = availableJpegSizes.data.i32[i + 1];
+            }
+        }
+    }
+    return Size(maxJpegWidth, maxJpegHeight);
+}
+
 ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
-    // TODO: replace below with availableStreamConfiguration for HAL3.2+.
-    camera_metadata_ro_entry availableJpegSizes =
-            mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
-    if (availableJpegSizes.count == 0 || availableJpegSizes.count % 2 != 0) {
+    // Get max jpeg size (area-wise).
+    Size maxJpegResolution = getMaxJpegResolution();
+    if (maxJpegResolution.width == 0) {
         ALOGE("%s: Camera %d: Can't find find valid available jpeg sizes in static metadata!",
                 __FUNCTION__, mId);
         return BAD_VALUE;
     }
 
-    // Get max jpeg size (area-wise).
-    int32_t maxJpegWidth = 0, maxJpegHeight = 0;
-    bool foundMax = false;
-    for (size_t i = 0; i < availableJpegSizes.count; i += 2) {
-        if ((availableJpegSizes.data.i32[i] * availableJpegSizes.data.i32[i + 1])
-                > (maxJpegWidth * maxJpegHeight)) {
-            maxJpegWidth = availableJpegSizes.data.i32[i];
-            maxJpegHeight = availableJpegSizes.data.i32[i + 1];
-            foundMax = true;
-        }
-    }
-    if (!foundMax) {
-        return BAD_VALUE;
-    }
-
     // Get max jpeg buffer size
     ssize_t maxJpegBufferSize = 0;
-    camera_metadata_ro_entry jpegMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
-    if (jpegMaxSize.count == 0) {
+    camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
+    if (jpegBufMaxSize.count == 0) {
         ALOGE("%s: Camera %d: Can't find maximum JPEG size in static metadata!", __FUNCTION__, mId);
         return BAD_VALUE;
     }
-    maxJpegBufferSize = jpegMaxSize.data.i32[0];
+    maxJpegBufferSize = jpegBufMaxSize.data.i32[0];
 
     // Calculate final jpeg buffer size for the given resolution.
-    float scaleFactor = ((float) (width * height)) / (maxJpegWidth * maxJpegHeight);
+    float scaleFactor = ((float) (width * height)) /
+            (maxJpegResolution.width * maxJpegResolution.height);
     ssize_t jpegBufferSize = scaleFactor * maxJpegBufferSize;
     // Bound the buffer size to [MIN_JPEG_BUFFER_SIZE, maxJpegBufferSize].
     if (jpegBufferSize > maxJpegBufferSize) {
@@ -1156,7 +1188,7 @@
         {
             ANDROID_CONTROL_AF_TRIGGER_ID,
             static_cast<int32_t>(id)
-        },
+        }
     };
 
     return mRequestThread->queueTrigger(trigger,
@@ -1177,7 +1209,7 @@
         {
             ANDROID_CONTROL_AF_TRIGGER_ID,
             static_cast<int32_t>(id)
-        },
+        }
     };
 
     return mRequestThread->queueTrigger(trigger,
@@ -1198,7 +1230,7 @@
         {
             ANDROID_CONTROL_AE_PRECAPTURE_ID,
             static_cast<int32_t>(id)
-        },
+        }
     };
 
     return mRequestThread->queueTrigger(trigger,
@@ -1539,8 +1571,6 @@
     uint8_t aeState;
     uint8_t afState;
     uint8_t awbState;
-    int32_t afTriggerId;
-    int32_t aeTriggerId;
 
     gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE,
         &afMode, frameNumber);
@@ -1557,12 +1587,6 @@
     gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE,
         &awbState, frameNumber);
 
-    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_TRIGGER_ID,
-        &afTriggerId, frameNumber);
-
-    gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_PRECAPTURE_ID,
-        &aeTriggerId, frameNumber);
-
     if (!gotAllStates) return false;
 
     ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
@@ -1571,7 +1595,7 @@
         __FUNCTION__, mId, frameNumber, resultExtras.requestId,
         afMode, awbMode,
         afState, aeState, awbState,
-        afTriggerId, aeTriggerId);
+        resultExtras.afTriggerId, resultExtras.precaptureTriggerId);
 
     // Got all states, so construct a minimal result to send
     // In addition to the above fields, this means adding in
@@ -1635,12 +1659,12 @@
     }
 
     if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_TRIGGER_ID,
-            &afTriggerId, frameNumber)) {
+            &resultExtras.afTriggerId, frameNumber)) {
         return false;
     }
 
     if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
-            &aeTriggerId, frameNumber)) {
+            &resultExtras.precaptureTriggerId, frameNumber)) {
         return false;
     }
 
@@ -2126,6 +2150,17 @@
     return OK;
 }
 
+bool Camera3Device::RequestThread::isRepeatingRequestLocked(const sp<CaptureRequest> requestIn) {
+    if (mRepeatingRequests.empty()) {
+        return false;
+    }
+    int32_t requestId = requestIn->mResultExtras.requestId;
+    const RequestList &repeatRequests = mRepeatingRequests;
+    // All repeating requests are guaranteed to have same id so only check first quest
+    const sp<CaptureRequest> firstRequest = *repeatRequests.begin();
+    return (firstRequest->mResultExtras.requestId == requestId);
+}
+
 status_t Camera3Device::RequestThread::clearRepeatingRequests(/*out*/int64_t *lastFrameNumber) {
     Mutex::Autolock l(mRequestLock);
     mRepeatingRequests.clear();
@@ -2140,6 +2175,18 @@
     Mutex::Autolock l(mRequestLock);
     ALOGV("RequestThread::%s:", __FUNCTION__);
     mRepeatingRequests.clear();
+
+    // Decrement repeating frame count for those requests never sent to device
+    // TODO: Remove this after we have proper error handling so these requests
+    // will generate an error callback. This might be the only place calling
+    // isRepeatingRequestLocked. If so, isRepeatingRequestLocked should also be removed.
+    const RequestList &requests = mRequestQueue;
+    for (RequestList::const_iterator it = requests.begin();
+            it != requests.end(); ++it) {
+        if (isRepeatingRequestLocked(*it)) {
+            mRepeatingLastFrameNumber--;
+        }
+    }
     mRequestQueue.clear();
     mTriggerMap.clear();
     if (lastFrameNumber != NULL) {
@@ -2554,13 +2601,29 @@
 
     Mutex::Autolock al(mTriggerMutex);
 
+    sp<Camera3Device> parent = mParent.promote();
+    if (parent == NULL) {
+        CLOGE("RequestThread: Parent is gone");
+        return DEAD_OBJECT;
+    }
+
     CameraMetadata &metadata = request->mSettings;
     size_t count = mTriggerMap.size();
 
     for (size_t i = 0; i < count; ++i) {
         RequestTrigger trigger = mTriggerMap.valueAt(i);
-
         uint32_t tag = trigger.metadataTag;
+
+        if (tag == ANDROID_CONTROL_AF_TRIGGER_ID || tag == ANDROID_CONTROL_AE_PRECAPTURE_ID) {
+            bool isAeTrigger = (trigger.metadataTag == ANDROID_CONTROL_AE_PRECAPTURE_ID);
+            uint32_t triggerId = static_cast<uint32_t>(trigger.entryValue);
+            isAeTrigger ? request->mResultExtras.precaptureTriggerId = triggerId :
+                          request->mResultExtras.afTriggerId = triggerId;
+            if (parent->mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+                continue; // Trigger ID tag is deprecated since device HAL 3.2
+            }
+        }
+
         camera_metadata_entry entry = metadata.find(tag);
 
         if (entry.count > 0) {
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 00ae771..61e6572 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -168,6 +168,8 @@
 
     CameraMetadata             mDeviceInfo;
 
+    int                        mDeviceVersion;
+
     enum Status {
         STATUS_ERROR,
         STATUS_UNINITIALIZED,
@@ -297,6 +299,18 @@
      */
     bool               tryLockSpinRightRound(Mutex& lock);
 
+    struct Size {
+        int width;
+        int height;
+        Size(int w, int h) : width(w), height(h){}
+    };
+
+    /**
+     * Helper function to get the largest Jpeg resolution (in area)
+     * Return Size(0, 0) if static metatdata is invalid
+     */
+    Size getMaxJpegResolution() const;
+
     /**
      * Get Jpeg buffer size for a given jpeg resolution.
      * Negative values are error codes.
@@ -430,6 +444,9 @@
         // Relay error to parent device object setErrorState
         void               setErrorState(const char *fmt, ...);
 
+        // If the input request is in mRepeatingRequests. Must be called with mRequestLock hold
+        bool isRepeatingRequestLocked(const sp<CaptureRequest>);
+
         wp<Camera3Device>  mParent;
         wp<camera3::StatusTracker>  mStatusTracker;
         camera3_device_t  *mHal3Device;