am e6105c26: (-s ours) Reconcile with jb-mr2-zeroday-release - do not merge

* commit 'e6105c26a5b506f96e9fe1d3fec64a8b2ed1d6dd':
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 1b136de..fd78572 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -255,6 +255,14 @@
     mCamera->setPreviewCallbackFlag(flag);
 }
 
+status_t Camera::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer)
+{
+    sp <ICamera> c = mCamera;
+    if (c == 0) return NO_INIT;
+    return c->setPreviewCallbackTarget(callbackProducer);
+}
+
 // callback from camera service
 void Camera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
 {
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 8900867..12356f0 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -31,6 +31,7 @@
     DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
     SET_PREVIEW_TEXTURE,
     SET_PREVIEW_CALLBACK_FLAG,
+    SET_PREVIEW_CALLBACK_TARGET,
     START_PREVIEW,
     STOP_PREVIEW,
     AUTO_FOCUS,
@@ -65,6 +66,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
         remote()->transact(DISCONNECT, data, &reply);
+        reply.readExceptionCode();
     }
 
     // pass the buffered IGraphicBufferProducer to the camera service
@@ -90,6 +92,18 @@
         remote()->transact(SET_PREVIEW_CALLBACK_FLAG, data, &reply);
     }
 
+    status_t setPreviewCallbackTarget(
+            const sp<IGraphicBufferProducer>& callbackProducer)
+    {
+        ALOGV("setPreviewCallbackTarget");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+        sp<IBinder> b(callbackProducer->asBinder());
+        data.writeStrongBinder(b);
+        remote()->transact(SET_PREVIEW_CALLBACK_TARGET, data, &reply);
+        return reply.readInt32();
+    }
+
     // start preview mode, must call setPreviewDisplay first
     status_t startPreview()
     {
@@ -268,6 +282,7 @@
             ALOGV("DISCONNECT");
             CHECK_INTERFACE(ICamera, data, reply);
             disconnect();
+            reply->writeNoException();
             return NO_ERROR;
         } break;
         case SET_PREVIEW_TEXTURE: {
@@ -285,6 +300,14 @@
             setPreviewCallbackFlag(callback_flag);
             return NO_ERROR;
         } break;
+        case SET_PREVIEW_CALLBACK_TARGET: {
+            ALOGV("SET_PREVIEW_CALLBACK_TARGET");
+            CHECK_INTERFACE(ICamera, data, reply);
+            sp<IGraphicBufferProducer> cp =
+                interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
+            reply->writeInt32(setPreviewCallbackTarget(cp));
+            return NO_ERROR;
+        }
         case START_PREVIEW: {
             ALOGV("START_PREVIEW");
             CHECK_INTERFACE(ICamera, data, reply);
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index 134f7f0..819e410 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -15,6 +15,9 @@
 ** limitations under the License.
 */
 
+#define LOG_TAG "BpCameraService"
+#include <utils/Log.h>
+
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -31,6 +34,53 @@
 
 namespace android {
 
+namespace {
+
+enum {
+    EX_SECURITY = -1,
+    EX_BAD_PARCELABLE = -2,
+    EX_ILLEGAL_ARGUMENT = -3,
+    EX_NULL_POINTER = -4,
+    EX_ILLEGAL_STATE = -5,
+    EX_HAS_REPLY_HEADER = -128,  // special; see below
+};
+
+static bool readExceptionCode(Parcel& reply) {
+    int32_t exceptionCode = reply.readExceptionCode();
+
+    if (exceptionCode != 0) {
+        const char* errorMsg;
+        switch(exceptionCode) {
+            case EX_SECURITY:
+                errorMsg = "Security";
+                break;
+            case EX_BAD_PARCELABLE:
+                errorMsg = "BadParcelable";
+                break;
+            case EX_NULL_POINTER:
+                errorMsg = "NullPointer";
+                break;
+            case EX_ILLEGAL_STATE:
+                errorMsg = "IllegalState";
+                break;
+            // Binder should be handling this code inside Parcel::readException
+            // but lets have a to-string here anyway just in case.
+            case EX_HAS_REPLY_HEADER:
+                errorMsg = "HasReplyHeader";
+                break;
+            default:
+                errorMsg = "Unknown";
+        }
+
+        ALOGE("Binder transmission error %s (%d)", errorMsg, exceptionCode);
+        return true;
+    }
+
+    return false;
+}
+
+};
+
 class BpCameraService: public BpInterface<ICameraService>
 {
 public:
@@ -45,6 +95,8 @@
         Parcel data, reply;
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         remote()->transact(BnCameraService::GET_NUMBER_OF_CAMERAS, data, &reply);
+
+        if (readExceptionCode(reply)) return 0;
         return reply.readInt32();
     }
 
@@ -55,9 +107,14 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeInt32(cameraId);
         remote()->transact(BnCameraService::GET_CAMERA_INFO, data, &reply);
-        cameraInfo->facing = reply.readInt32();
-        cameraInfo->orientation = reply.readInt32();
-        return reply.readInt32();
+
+        if (readExceptionCode(reply)) return -EPROTO;
+        status_t result = reply.readInt32();
+        if (reply.readInt32() != 0) {
+            cameraInfo->facing = reply.readInt32();
+            cameraInfo->orientation = reply.readInt32();
+        }
+        return result;
     }
 
     // connect to camera service
@@ -71,6 +128,8 @@
         data.writeString16(clientPackageName);
         data.writeInt32(clientUid);
         remote()->transact(BnCameraService::CONNECT, data, &reply);
+
+        if (readExceptionCode(reply)) return NULL;
         return interface_cast<ICamera>(reply.readStrongBinder());
     }
 
@@ -85,6 +144,8 @@
         data.writeString16(clientPackageName);
         data.writeInt32(clientUid);
         remote()->transact(BnCameraService::CONNECT_PRO, data, &reply);
+
+        if (readExceptionCode(reply)) return NULL;
         return interface_cast<IProCameraUser>(reply.readStrongBinder());
     }
 
@@ -94,6 +155,8 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeStrongBinder(listener->asBinder());
         remote()->transact(BnCameraService::ADD_LISTENER, data, &reply);
+
+        if (readExceptionCode(reply)) return -EPROTO;
         return reply.readInt32();
     }
 
@@ -103,6 +166,8 @@
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeStrongBinder(listener->asBinder());
         remote()->transact(BnCameraService::REMOVE_LISTENER, data, &reply);
+
+        if (readExceptionCode(reply)) return -EPROTO;
         return reply.readInt32();
     }
 };
@@ -117,17 +182,22 @@
     switch(code) {
         case GET_NUMBER_OF_CAMERAS: {
             CHECK_INTERFACE(ICameraService, data, reply);
+            reply->writeNoException();
             reply->writeInt32(getNumberOfCameras());
             return NO_ERROR;
         } break;
         case GET_CAMERA_INFO: {
             CHECK_INTERFACE(ICameraService, data, reply);
-            CameraInfo cameraInfo;
+            CameraInfo cameraInfo = CameraInfo();
             memset(&cameraInfo, 0, sizeof(cameraInfo));
             status_t result = getCameraInfo(data.readInt32(), &cameraInfo);
+            reply->writeNoException();
+            reply->writeInt32(result);
+
+            // Fake a parcelable object here
+            reply->writeInt32(1); // means the parcelable is included
             reply->writeInt32(cameraInfo.facing);
             reply->writeInt32(cameraInfo.orientation);
-            reply->writeInt32(result);
             return NO_ERROR;
         } break;
         case CONNECT: {
@@ -139,17 +209,20 @@
             int32_t clientUid = data.readInt32();
             sp<ICamera> camera = connect(cameraClient, cameraId,
                     clientName, clientUid);
+            reply->writeNoException();
             reply->writeStrongBinder(camera->asBinder());
             return NO_ERROR;
         } break;
         case CONNECT_PRO: {
             CHECK_INTERFACE(ICameraService, data, reply);
-            sp<IProCameraCallbacks> cameraClient = interface_cast<IProCameraCallbacks>(data.readStrongBinder());
+            sp<IProCameraCallbacks> cameraClient =
+                interface_cast<IProCameraCallbacks>(data.readStrongBinder());
             int32_t cameraId = data.readInt32();
             const String16 clientName = data.readString16();
             int32_t clientUid = data.readInt32();
             sp<IProCameraUser> camera = connect(cameraClient, cameraId,
                                                 clientName, clientUid);
+            reply->writeNoException();
             reply->writeStrongBinder(camera->asBinder());
             return NO_ERROR;
         } break;
@@ -157,6 +230,7 @@
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraServiceListener> listener =
                 interface_cast<ICameraServiceListener>(data.readStrongBinder());
+            reply->writeNoException();
             reply->writeInt32(addListener(listener));
             return NO_ERROR;
         } break;
@@ -164,6 +238,7 @@
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraServiceListener> listener =
                 interface_cast<ICameraServiceListener>(data.readStrongBinder());
+            reply->writeNoException();
             reply->writeInt32(removeListener(listener));
             return NO_ERROR;
         } break;
diff --git a/camera/ICameraServiceListener.cpp b/camera/ICameraServiceListener.cpp
index 640ee35..b2f1729 100644
--- a/camera/ICameraServiceListener.cpp
+++ b/camera/ICameraServiceListener.cpp
@@ -54,6 +54,8 @@
                            data,
                            &reply,
                            IBinder::FLAG_ONEWAY);
+
+        reply.readExceptionCode();
     }
 };
 
@@ -73,6 +75,7 @@
             int32_t cameraId = data.readInt32();
 
             onStatusChanged(status, cameraId);
+            reply->writeNoException();
 
             return NO_ERROR;
         } break;
diff --git a/camera/IProCameraUser.cpp b/camera/IProCameraUser.cpp
index 4c4dec3..015cb5c 100644
--- a/camera/IProCameraUser.cpp
+++ b/camera/IProCameraUser.cpp
@@ -162,6 +162,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
         remote()->transact(DISCONNECT, data, &reply);
+        reply.readExceptionCode();
     }
 
     virtual status_t connect(const sp<IProCameraCallbacks>& cameraClient)
@@ -307,6 +308,7 @@
             ALOGV("DISCONNECT");
             CHECK_INTERFACE(IProCameraUser, data, reply);
             disconnect();
+            reply->writeNoException();
             return NO_ERROR;
         } break;
         case CONNECT: {
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 3844487..1060131 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -19,8 +19,6 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
-
 LOCAL_MODULE:= stagefright
 
 include $(BUILD_EXECUTABLE)
@@ -42,7 +40,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= record
 
@@ -65,7 +63,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= recordvideo
 
@@ -89,7 +87,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= audioloop
 
@@ -112,7 +110,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= stream
 
@@ -135,7 +133,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= sf2
 
@@ -159,7 +157,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= codec
 
@@ -182,7 +180,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= muxer
 
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 115b07c..924cf6d 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -30,8 +30,6 @@
 #include <binder/ProcessState.h>
 #include <media/IMediaPlayerService.h>
 #include <media/stagefright/foundation/ALooper.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include "include/LiveSession.h"
 #include "include/NuCachedSource2.h"
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/DataSource.h>
@@ -678,7 +676,6 @@
     gDisplayHistogram = false;
 
     sp<ALooper> looper;
-    sp<LiveSession> liveSession;
 
     int res;
     while ((res = getopt(argc, argv, "han:lm:b:ptsrow:kxSTd:D:")) >= 0) {
@@ -961,9 +958,7 @@
 
         sp<DataSource> dataSource = DataSource::CreateFromURI(filename);
 
-        if (strncasecmp(filename, "sine:", 5)
-                && strncasecmp(filename, "httplive://", 11)
-                && dataSource == NULL) {
+        if (strncasecmp(filename, "sine:", 5) && dataSource == NULL) {
             fprintf(stderr, "Unable to create data source.\n");
             return 1;
         }
@@ -995,44 +990,21 @@
                 mediaSources.push(mediaSource);
             }
         } else {
-            sp<MediaExtractor> extractor;
+            sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
 
-            if (!strncasecmp("httplive://", filename, 11)) {
-                String8 uri("http://");
-                uri.append(filename + 11);
+            if (extractor == NULL) {
+                fprintf(stderr, "could not create extractor.\n");
+                return -1;
+            }
 
-                if (looper == NULL) {
-                    looper = new ALooper;
-                    looper->start();
-                }
-                liveSession = new LiveSession(NULL /* notify */);
-                looper->registerHandler(liveSession);
+            sp<MetaData> meta = extractor->getMetaData();
 
-                liveSession->connect(uri.string());
-                dataSource = liveSession->getDataSource();
+            if (meta != NULL) {
+                const char *mime;
+                CHECK(meta->findCString(kKeyMIMEType, &mime));
 
-                extractor =
-                    MediaExtractor::Create(
-                            dataSource, MEDIA_MIMETYPE_CONTAINER_MPEG2TS);
-
-                syncInfoPresent = false;
-            } else {
-                extractor = MediaExtractor::Create(dataSource);
-
-                if (extractor == NULL) {
-                    fprintf(stderr, "could not create extractor.\n");
-                    return -1;
-                }
-
-                sp<MetaData> meta = extractor->getMetaData();
-
-                if (meta != NULL) {
-                    const char *mime;
-                    CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-                    if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
-                        syncInfoPresent = false;
-                    }
+                if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
+                    syncInfoPresent = false;
                 }
             }
 
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 37626a4..c34b3ea 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -121,7 +121,15 @@
 
             void        setListener(const sp<CameraListener>& listener);
             void        setRecordingProxyListener(const sp<ICameraRecordingProxyListener>& listener);
+
+            // Configure preview callbacks to app. Only one of the older
+            // callbacks or the callback surface can be active at the same time;
+            // enabling one will disable the other if active. Flags can be
+            // disabled by calling it with CAMERA_FRAME_CALLBACK_FLAG_NOOP, and
+            // Target by calling it with a NULL interface.
             void        setPreviewCallbackFlags(int preview_callback_flag);
+            status_t    setPreviewCallbackTarget(
+                    const sp<IGraphicBufferProducer>& callbackProducer);
 
             sp<ICameraRecordingProxy> getRecordingProxy();
 
diff --git a/include/camera/ICamera.h b/include/camera/ICamera.h
index 2236c1f..f3a186e 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/ICamera.h
@@ -32,6 +32,9 @@
 
 class ICamera: public IInterface
 {
+    /**
+     * Keep up-to-date with ICamera.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(Camera);
 
@@ -51,8 +54,15 @@
             const sp<IGraphicBufferProducer>& bufferProducer) = 0;
 
     // set the preview callback flag to affect how the received frames from
-    // preview are handled.
+    // preview are handled. Enabling preview callback flags disables any active
+    // preview callback surface set by setPreviewCallbackTarget().
     virtual void            setPreviewCallbackFlag(int flag) = 0;
+    // set a buffer interface to use for client-received preview frames instead
+    // of preview callback buffers. Passing a valid interface here disables any
+    // active preview callbacks set by setPreviewCallbackFlag(). Passing NULL
+    // disables the use of the callback target.
+    virtual status_t        setPreviewCallbackTarget(
+            const sp<IGraphicBufferProducer>& callbackProducer) = 0;
 
     // start preview mode, must call setPreviewDisplay first
     virtual status_t        startPreview() = 0;
diff --git a/include/camera/ICameraClient.h b/include/camera/ICameraClient.h
index b30aa7a..1584dba 100644
--- a/include/camera/ICameraClient.h
+++ b/include/camera/ICameraClient.h
@@ -28,6 +28,9 @@
 
 class ICameraClient: public IInterface
 {
+    /**
+     * Keep up-to-date with ICameraClient.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(CameraClient);
 
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index aaf6eb3..3c2e60a 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -32,6 +32,9 @@
 class ICameraService : public IInterface
 {
 public:
+    /**
+     * Keep up-to-date with ICameraService.aidl in frameworks/base
+     */
     enum {
         GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
         GET_CAMERA_INFO,
diff --git a/include/camera/ICameraServiceListener.h b/include/camera/ICameraServiceListener.h
index f2a11c2..0a0e43a 100644
--- a/include/camera/ICameraServiceListener.h
+++ b/include/camera/ICameraServiceListener.h
@@ -26,6 +26,9 @@
 
 class ICameraServiceListener : public IInterface
 {
+    /**
+     * Keep up-to-date with ICameraServiceListener.aidl in frameworks/base
+     */
 public:
 
     /**
diff --git a/include/camera/IProCameraCallbacks.h b/include/camera/IProCameraCallbacks.h
index 563ec17..c774698 100644
--- a/include/camera/IProCameraCallbacks.h
+++ b/include/camera/IProCameraCallbacks.h
@@ -30,6 +30,9 @@
 
 class IProCameraCallbacks : public IInterface
 {
+    /**
+     * Keep up-to-date with IProCameraCallbacks.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(ProCameraCallbacks);
 
diff --git a/include/camera/IProCameraUser.h b/include/camera/IProCameraUser.h
index 45b818c..2ccc4d2 100644
--- a/include/camera/IProCameraUser.h
+++ b/include/camera/IProCameraUser.h
@@ -34,6 +34,9 @@
 
 class IProCameraUser: public IInterface
 {
+    /**
+     * Keep up-to-date with IProCameraUser.aidl in frameworks/base
+     */
 public:
     DECLARE_META_INTERFACE(ProCameraUser);
 
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
index 43e4de7..ef392f0 100644
--- a/include/media/AudioBufferProvider.h
+++ b/include/media/AudioBufferProvider.h
@@ -26,6 +26,8 @@
 {
 public:
 
+    // FIXME merge with AudioTrackShared::Buffer, AudioTrack::Buffer, and AudioRecord::Buffer
+    //       and rename getNextBuffer() to obtainBuffer()
     struct Buffer {
         Buffer() : raw(NULL), frameCount(0) { }
         union {
@@ -44,6 +46,19 @@
     // pts is the local time when the next sample yielded by getNextBuffer
     // will be rendered.
     // Pass kInvalidPTS if the PTS is unknown or not applicable.
+    // On entry:
+    //  buffer              != NULL
+    //  buffer->raw         unused
+    //  buffer->frameCount  maximum number of desired frames
+    // On successful return:
+    //  status              NO_ERROR
+    //  buffer->raw         non-NULL pointer to buffer->frameCount contiguous available frames
+    //  buffer->frameCount  number of contiguous available frames at buffer->raw,
+    //                      0 < buffer->frameCount <= entry value
+    // On error return:
+    //  status              != NO_ERROR
+    //  buffer->raw         NULL
+    //  buffer->frameCount  0
     virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0;
 
     virtual void releaseBuffer(Buffer* buffer) = 0;
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 38c6548..7aa3c24 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -14,26 +14,24 @@
  * limitations under the License.
  */
 
-#ifndef AUDIORECORD_H_
-#define AUDIORECORD_H_
+#ifndef ANDROID_AUDIORECORD_H
+#define ANDROID_AUDIORECORD_H
 
-#include <binder/IMemory.h>
 #include <cutils/sched_policy.h>
 #include <media/AudioSystem.h>
 #include <media/IAudioRecord.h>
-#include <system/audio.h>
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
 #include <utils/threads.h>
 
 namespace android {
 
+// ----------------------------------------------------------------------------
+
 class audio_track_cblk_t;
 class AudioRecordClientProxy;
 
 // ----------------------------------------------------------------------------
 
-class AudioRecord : virtual public RefBase
+class AudioRecord : public RefBase
 {
 public:
 
@@ -49,6 +47,8 @@
                                     // (See setMarkerPosition()).
         EVENT_NEW_POS = 3,          // Record head is at a new position
                                     // (See setPositionUpdatePeriod()).
+        EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and
+                                    // voluntary invalidation by mediaserver, or mediaserver crash.
     };
 
     /* Client should declare Buffer on the stack and pass address to obtainBuffer()
@@ -58,11 +58,16 @@
     class Buffer
     {
     public:
+        // FIXME use m prefix
         size_t      frameCount;     // number of sample frames corresponding to size;
                                     // on input it is the number of frames available,
                                     // on output is the number of frames actually drained
 
-        size_t      size;           // total size in bytes == frameCount * frameSize
+        size_t      size;           // input/output in bytes == frameCount * frameSize
+                                    // FIXME this is redundant with respect to frameCount,
+                                    // and TRANSFER_OBTAIN mode is broken for 8-bit data
+                                    // since we don't define the frame format
+
         union {
             void*       raw;
             short*      i16;        // signed 16-bit
@@ -84,6 +89,7 @@
      *          - EVENT_OVERRUN: unused.
      *          - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
      *          - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
+     *          - EVENT_NEW_IAUDIORECORD: unused.
      */
 
     typedef void (*callback_t)(int event, void* user, void *info);
@@ -101,20 +107,28 @@
                                       audio_format_t format,
                                       audio_channel_mask_t channelMask);
 
+    /* How data is transferred from AudioRecord
+     */
+    enum transfer_type {
+        TRANSFER_DEFAULT,   // not specified explicitly; determine from other parameters
+        TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
+        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_SYNC,      // synchronous read()
+    };
+
     /* Constructs an uninitialized AudioRecord. No connection with
-     * AudioFlinger takes place.
+     * AudioFlinger takes place.  Use set() after this.
      */
                         AudioRecord();
 
     /* Creates an AudioRecord object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
-     * Unspecified values are set to the audio hardware's current
-     * values.
+     * Unspecified values are set to appropriate default values.
      *
      * Parameters:
      *
-     * inputSource:        Select the audio input to record to (e.g. AUDIO_SOURCE_DEFAULT).
-     * sampleRate:         Track sampling rate in Hz.
+     * inputSource:        Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT).
+     * sampleRate:         Data sink sampling rate in Hz.
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
      * channelMask:        Channel mask.
@@ -124,11 +138,13 @@
      *                     be larger if the requested size is not compatible with current audio HAL
      *                     latency.  Zero means to use a default value.
      * cbf:                Callback function. If not null, this function is called periodically
-     *                     to consume new PCM data.
+     *                     to consume new PCM data and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames are ready in record track output buffer.
      * sessionId:          Not yet supported.
+     * transferType:       How data is transferred from AudioRecord.
+     * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
 
                         AudioRecord(audio_source_t inputSource,
@@ -139,22 +155,28 @@
                                     callback_t cbf = NULL,
                                     void* user = NULL,
                                     int notificationFrames = 0,
-                                    int sessionId = 0);
-
+                                    int sessionId = 0,
+                                    transfer_type transferType = TRANSFER_DEFAULT);
 
     /* Terminates the AudioRecord and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioRecord.
      */
-                        ~AudioRecord();
+protected:
+                        virtual ~AudioRecord();
+public:
 
-
-    /* Initialize an uninitialized AudioRecord.
+    /* Initialize an AudioRecord that was created using the AudioRecord() constructor.
+     * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful intialization
-     *  - INVALID_OPERATION: AudioRecord is already intitialized or record device is already in use
+     *  - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
      *  - BAD_VALUE: invalid parameter (channels, format, sampleRate...)
      *  - NO_INIT: audio server or audio hardware not initialized
      *  - PERMISSION_DENIED: recording is not allowed for the requesting process
+     *
+     * Parameters not listed in the AudioRecord constructors above:
+     *
+     * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
      */
             status_t    set(audio_source_t inputSource = AUDIO_SOURCE_DEFAULT,
                             uint32_t sampleRate = 0,
@@ -165,30 +187,29 @@
                             void* user = NULL,
                             int notificationFrames = 0,
                             bool threadCanCallJava = false,
-                            int sessionId = 0);
-
+                            int sessionId = 0,
+                            transfer_type transferType = TRANSFER_DEFAULT);
 
     /* Result of constructing the AudioRecord. This must be checked
      * before using any AudioRecord API (except for set()), because using
      * an uninitialized AudioRecord produces undefined results.
      * See set() method above for possible return codes.
      */
-            status_t    initCheck() const;
+            status_t    initCheck() const   { return mStatus; }
 
     /* Returns this track's estimated latency in milliseconds.
      * This includes the latency due to AudioRecord buffer size,
      * and audio hardware driver.
      */
-            uint32_t     latency() const;
+            uint32_t    latency() const     { return mLatency; }
 
    /* getters, see constructor and set() */
 
-            audio_format_t format() const;
-            uint32_t    channelCount() const;
-            size_t      frameCount() const;
-            size_t      frameSize() const { return mFrameSize; }
-            audio_source_t inputSource() const;
-
+            audio_format_t format() const   { return mFormat; }
+            uint32_t    channelCount() const    { return mChannelCount; }
+            size_t      frameCount() const  { return mFrameCount; }
+            size_t      frameSize() const   { return mFrameSize; }
+            audio_source_t inputSource() const  { return mInputSource; }
 
     /* After it's created the track is not active. Call start() to
      * make it active. If set, the callback will start being called.
@@ -198,26 +219,29 @@
             status_t    start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
                               int triggerSession = 0);
 
-    /* Stop a track. If set, the callback will cease being called and
-     * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
-     * and will drain buffers until the pool is exhausted.
+    /* Stop a track. If set, the callback will cease being called.  Note that obtainBuffer() still
+     * works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
      */
             void        stop();
             bool        stopped() const;
 
-    /* Get sample rate for this record track in Hz.
+    /* Return the sink sample rate for this record track in Hz.
+     * Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
      */
-            uint32_t    getSampleRate() const;
+            uint32_t    getSampleRate() const   { return mSampleRate; }
 
     /* Sets marker position. When record reaches the number of frames specified,
      * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
      * with marker == 0 cancels marker notification callback.
+     * To set a marker at a position which would compute as 0,
+     * a workaround is to the set the marker at a nearby position such as ~0 or 1.
      * If the AudioRecord has been opened with no callback function associated,
      * the operation will fail.
      *
      * Parameters:
      *
-     * marker:   marker position expressed in frames.
+     * marker:   marker position expressed in wrapping (overflow) frame units,
+     *           like the return value of getPosition().
      *
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
@@ -226,13 +250,13 @@
             status_t    setMarkerPosition(uint32_t marker);
             status_t    getMarkerPosition(uint32_t *marker) const;
 
-
     /* Sets position update period. Every time the number of frames specified has been recorded,
      * a callback with event type EVENT_NEW_POS is called.
      * Calling setPositionUpdatePeriod with updatePeriod == 0 cancels new position notification
      * callback.
      * If the AudioRecord has been opened with no callback function associated,
      * the operation will fail.
+     * Extremely small values may be rounded up to a value the implementation can support.
      *
      * Parameters:
      *
@@ -245,13 +269,13 @@
             status_t    setPositionUpdatePeriod(uint32_t updatePeriod);
             status_t    getPositionUpdatePeriod(uint32_t *updatePeriod) const;
 
-
-    /* Gets record head position. The position is the total number of frames
-     * recorded since record start.
+    /* Return the total number of frames recorded since recording started.
+     * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
+     * It is reset to zero by stop().
      *
      * Parameters:
      *
-     *  position:  Address where to return record head position within AudioRecord buffer.
+     *  position:  Address where to return record head position.
      *
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
@@ -276,38 +300,70 @@
      *
      * Returned value:
      *  AudioRecord session ID.
+     *
+     * No lock needed because session ID doesn't change after first set().
      */
-            int    getSessionId() const;
+            int    getSessionId() const { return mSessionId; }
 
-    /* Obtains a buffer of "frameCount" frames. The buffer must be
-     * drained entirely, and then released with releaseBuffer().
-     * If the track is stopped, obtainBuffer() returns
-     * STOPPED instead of NO_ERROR as long as there are buffers available,
-     * at which point NO_MORE_BUFFERS is returned.
+    /* Obtains a buffer of up to "audioBuffer->frameCount" full frames.
+     * After draining these frames of data, the caller should release them with releaseBuffer().
+     * If the track buffer is not empty, obtainBuffer() returns as many contiguous
+     * full frames as are available immediately.
+     * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK
+     * regardless of the value of waitCount.
+     * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a
+     * maximum timeout based on waitCount; see chart below.
      * Buffers will be returned until the pool
      * is exhausted, at which point obtainBuffer() will either block
-     * or return WOULD_BLOCK depending on the value of the "blocking"
+     * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
      *
+     * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
+     * which should use read() or callback EVENT_MORE_DATA instead.
+     *
      * Interpretation of waitCount:
      *  +n  limits wait time to n * WAIT_PERIOD_MS,
      *  -1  causes an (almost) infinite wait time,
      *   0  non-blocking.
+     *
+     * Buffer fields
+     * On entry:
+     *  frameCount  number of frames requested
+     * After error return:
+     *  frameCount  0
+     *  size        0
+     *  raw         undefined
+     * After successful return:
+     *  frameCount  actual number of frames available, <= number requested
+     *  size        actual number of bytes available
+     *  raw         pointer to the buffer
      */
 
-        enum {
-            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
-            STOPPED = 1
-        };
+    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+                                __attribute__((__deprecated__));
 
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+private:
+    /* New internal API.
+     * If nonContig is non-NULL, it is an output parameter that will be set to the number of
+     * additional non-contiguous frames that are available immediately.
+     * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+     * in case the requested amount of frames is in two or more non-contiguous regions.
+     * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
+     */
+            status_t    obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+                                     struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
 
-    /* Release an emptied buffer of "frameCount" frames for AudioFlinger to re-fill. */
+    /* Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill. */
+    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(Buffer* audioBuffer);
 
-
     /* As a convenience we provide a read() interface to the audio buffer.
-     * This is implemented on top of obtainBuffer/releaseBuffer.
+     * Input parameter 'size' is in byte units.
+     * This is implemented on top of obtainBuffer/releaseBuffer. For best
+     * performance use callbacks. Returns actual number of bytes read >= 0,
+     * or a negative status code.
      */
             ssize_t     read(void* buffer, size_t size);
 
@@ -336,68 +392,113 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
+                void        pauseConditional();
+                                        // like pause(), but only if prior resume() wasn't latched
 
     private:
         friend class AudioRecord;
         virtual bool        threadLoop();
-        AudioRecord& mReceiver;
+        AudioRecord&        mReceiver;
         virtual ~AudioRecordThread();
         Mutex               mMyLock;    // Thread::mLock is private
         Condition           mMyCond;    // Thread::mThreadExitedCondition is private
         bool                mPaused;    // whether thread is currently paused
+        bool                mResumeLatch;   // whether next pauseConditional() will be a nop
     };
 
             // body of AudioRecordThread::threadLoop()
-            bool processAudioBuffer(const sp<AudioRecordThread>& thread);
+            // returns the maximum amount of time before we would like to run again, where:
+            //      0           immediately
+            //      > 0         no later than this many nanoseconds from now
+            //      NS_WHENEVER still active but no particular deadline
+            //      NS_INACTIVE inactive so don't run again until re-started
+            //      NS_NEVER    never again
+            static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+            nsecs_t processAudioBuffer(const sp<AudioRecordThread>& thread);
 
+            // caller must hold lock on mLock for all _l methods
             status_t openRecord_l(uint32_t sampleRate,
                                 audio_format_t format,
                                 size_t frameCount,
-                                audio_io_handle_t input);
+                                audio_io_handle_t input,
+                                size_t epoch);
+
             audio_io_handle_t getInput_l();
-            status_t restoreRecord_l(audio_track_cblk_t*& cblk);
+
+            // FIXME enum is faster than strcmp() for parameter 'from'
+            status_t restoreRecord_l(const char *from);
 
     sp<AudioRecordThread>   mAudioRecordThread;
     mutable Mutex           mLock;
 
-    bool                    mActive;            // protected by mLock
+    // Current client state:  false = stopped, true = active.  Protected by mLock.  If more states
+    // are added, consider changing this to enum State { ... } mState as in AudioTrack.
+    bool                    mActive;
 
     // for client callback handler
     callback_t              mCbf;               // callback handler for events, or NULL
-    void*                   mUserData;
+    void*                   mUserData;          // for client callback handler
 
     // for notification APIs
-    uint32_t                mNotificationFrames;
-    uint32_t                mRemainingFrames;
-    uint32_t                mMarkerPosition;    // in frames
+    uint32_t                mNotificationFrames; // frames between each notification callback
+    bool                    mRefreshRemaining;  // processAudioBuffer() should refresh next 2
+
+    // These are private to processAudioBuffer(), and are not protected by a lock
+    uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
+    bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
+    int                     mObservedSequence;      // last observed value of mSequence
+
+    uint32_t                mMarkerPosition;    // in wrapping (overflow) frame units
     bool                    mMarkerReached;
     uint32_t                mNewPosition;       // in frames
-    uint32_t                mUpdatePeriod;      // in ms
+    uint32_t                mUpdatePeriod;      // in frames, zero means no EVENT_NEW_POS
+
+    status_t                mStatus;
 
     // constant after constructor or set()
     uint32_t                mSampleRate;
     size_t                  mFrameCount;
     audio_format_t          mFormat;
-    uint8_t                 mChannelCount;
+    uint32_t                mChannelCount;
     size_t                  mFrameSize;         // app-level frame size == AudioFlinger frame size
     audio_source_t          mInputSource;
-    status_t                mStatus;
-    uint32_t                mLatency;
+    uint32_t                mLatency;           // in ms
     audio_channel_mask_t    mChannelMask;
-    audio_io_handle_t       mInput;                     // returned by AudioSystem::getInput()
     int                     mSessionId;
+    transfer_type           mTransfer;
+
+    audio_io_handle_t       mInput;             // returned by AudioSystem::getInput()
 
     // may be changed if IAudioRecord object is re-created
     sp<IAudioRecord>        mAudioRecord;
     sp<IMemory>             mCblkMemory;
-    audio_track_cblk_t*     mCblk;
-    void*                   mBuffers;           // starting address of buffers in shared memory
+    audio_track_cblk_t*     mCblk;              // re-load after mLock.unlock()
 
-    int                     mPreviousPriority;          // before start()
+    int                     mPreviousPriority;  // before start()
     SchedPolicy             mPreviousSchedulingGroup;
-    AudioRecordClientProxy* mProxy;
+
+    // The proxy should only be referenced while a lock is held because the proxy isn't
+    // multi-thread safe.
+    // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+    // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+    sp<AudioRecordClientProxy> mProxy;
+
+    bool                    mInOverrun;         // whether recorder is currently in overrun state
+
+private:
+    class DeathNotifier : public IBinder::DeathRecipient {
+    public:
+        DeathNotifier(AudioRecord* audioRecord) : mAudioRecord(audioRecord) { }
+    protected:
+        virtual void        binderDied(const wp<IBinder>& who);
+    private:
+        const wp<AudioRecord> mAudioRecord;
+    };
+
+    sp<DeathNotifier>       mDeathNotifier;
+    uint32_t                mSequence;              // incremented for each new IAudioRecord attempt
 };
 
 }; // namespace android
 
-#endif /*AUDIORECORD_H_*/
+#endif // ANDROID_AUDIORECORD_H
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 64f82bb..e9bb76a 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -17,18 +17,9 @@
 #ifndef ANDROID_AUDIOTRACK_H
 #define ANDROID_AUDIOTRACK_H
 
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <media/IAudioFlinger.h>
-#include <media/IAudioTrack.h>
-#include <media/AudioSystem.h>
-
-#include <utils/RefBase.h>
-#include <utils/Errors.h>
-#include <binder/IInterface.h>
-#include <binder/IMemory.h>
 #include <cutils/sched_policy.h>
+#include <media/AudioSystem.h>
+#include <media/IAudioTrack.h>
 #include <utils/threads.h>
 
 namespace android {
@@ -37,10 +28,11 @@
 
 class audio_track_cblk_t;
 class AudioTrackClientProxy;
+class StaticAudioTrackClientProxy;
 
 // ----------------------------------------------------------------------------
 
-class AudioTrack : virtual public RefBase
+class AudioTrack : public RefBase
 {
 public:
     enum channel_index {
@@ -49,7 +41,7 @@
         RIGHT  = 1
     };
 
-    /* Events used by AudioTrack callback function (audio_track_cblk_t).
+    /* Events used by AudioTrack callback function (callback_t).
      * Keep in sync with frameworks/base/media/java/android/media/AudioTrack.java NATIVE_EVENT_*.
      */
     enum event_type {
@@ -64,7 +56,10 @@
                                     // (See setMarkerPosition()).
         EVENT_NEW_POS = 4,          // Playback head is at a new position
                                     // (See setPositionUpdatePeriod()).
-        EVENT_BUFFER_END = 5        // Playback head is at the end of the buffer.
+        EVENT_BUFFER_END = 5,       // Playback head is at the end of the buffer.
+                                    // Not currently used by android.media.AudioTrack.
+        EVENT_NEW_IAUDIOTRACK = 6,  // IAudioTrack was re-created, either due to re-routing and
+                                    // voluntary invalidation by mediaserver, or mediaserver crash.
     };
 
     /* Client should declare Buffer on the stack and pass address to obtainBuffer()
@@ -74,19 +69,23 @@
     class Buffer
     {
     public:
+        // FIXME use m prefix
         size_t      frameCount;   // number of sample frames corresponding to size;
                                   // on input it is the number of frames desired,
                                   // on output is the number of frames actually filled
 
-        size_t      size;         // input/output in byte units
+        size_t      size;         // input/output in bytes == frameCount * frameSize
+                                  // FIXME this is redundant with respect to frameCount,
+                                  // and TRANSFER_OBTAIN mode is broken for 8-bit data
+                                  // since we don't define the frame format
+
         union {
             void*       raw;
-            short*      i16;    // signed 16-bit
-            int8_t*     i8;     // unsigned 8-bit, offset by 0x80
+            short*      i16;      // signed 16-bit
+            int8_t*     i8;       // unsigned 8-bit, offset by 0x80
         };
     };
 
-
     /* As a convenience, if a callback is supplied, a handler thread
      * is automatically created with the appropriate priority. This thread
      * invokes the callback when a new buffer becomes available or various conditions occur.
@@ -100,9 +99,10 @@
      *            written.
      *          - EVENT_UNDERRUN: unused.
      *          - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
-     *          - EVENT_MARKER: pointer to an uint32_t containing the marker position in frames.
-     *          - EVENT_NEW_POS: pointer to an uint32_t containing the new position in frames.
+     *          - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
+     *          - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
      *          - EVENT_BUFFER_END: unused.
+     *          - EVENT_NEW_IAUDIOTRACK: unused.
      */
 
     typedef void (*callback_t)(int event, void* user, void *info);
@@ -114,9 +114,19 @@
      *  - NO_INIT: audio server or audio hardware not initialized
      */
 
-     static status_t getMinFrameCount(size_t* frameCount,
-                                      audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
-                                      uint32_t sampleRate = 0);
+    static status_t getMinFrameCount(size_t* frameCount,
+                                     audio_stream_type_t streamType,
+                                     uint32_t sampleRate);
+
+    /* How data is transferred to AudioTrack
+     */
+    enum transfer_type {
+        TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
+        TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
+        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_SYNC,      // synchronous write()
+        TRANSFER_SHARED,    // shared memory
+    };
 
     /* Constructs an uninitialized AudioTrack. No connection with
      * AudioFlinger takes place.  Use set() after this.
@@ -128,13 +138,13 @@
      * Unspecified values are set to appropriate default values.
      * With this constructor, the track is configured for streaming mode.
      * Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA.
-     * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is deprecated.
+     * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is not allowed.
      *
      * Parameters:
      *
      * streamType:         Select the type of audio stream this track is attached to
      *                     (e.g. AUDIO_STREAM_MUSIC).
-     * sampleRate:         Track sampling rate in Hz.
+     * sampleRate:         Data source sampling rate in Hz.
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
      * channelMask:        Channel mask.
@@ -149,9 +159,10 @@
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames have been consumed from track input buffer.
+     *                     This is expressed in units of frames at the initial source sample rate.
      * sessionId:          Specific session ID, or zero to use default.
-     * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
-     *                     If not present in parameter list, then fixed at false.
+     * transferType:       How data is transferred to AudioTrack.
+     * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
 
                         AudioTrack( audio_stream_type_t streamType,
@@ -163,7 +174,8 @@
                                     callback_t cbf       = NULL,
                                     void* user           = NULL,
                                     int notificationFrames = 0,
-                                    int sessionId        = 0);
+                                    int sessionId        = 0,
+                                    transfer_type transferType = TRANSFER_DEFAULT);
 
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
@@ -174,7 +186,6 @@
      * The write() method is not supported in this case.
      * It is recommended to pass a callback function to be notified of playback end by an
      * EVENT_UNDERRUN event.
-     * FIXME EVENT_MORE_DATA still occurs; it must be ignored.
      */
 
                         AudioTrack( audio_stream_type_t streamType,
@@ -186,14 +197,18 @@
                                     callback_t cbf      = NULL,
                                     void* user          = NULL,
                                     int notificationFrames = 0,
-                                    int sessionId       = 0);
+                                    int sessionId       = 0,
+                                    transfer_type transferType = TRANSFER_DEFAULT);
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioTrack.
      */
-                        ~AudioTrack();
+protected:
+                        virtual ~AudioTrack();
+public:
 
-    /* Initialize an uninitialized AudioTrack.
+    /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
+     * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful initialization
      *  - INVALID_OPERATION: AudioTrack is already initialized
@@ -201,6 +216,10 @@
      *  - NO_INIT: audio server or audio hardware not initialized
      * If sharedBuffer is non-0, the frameCount parameter is ignored and
      * replaced by the shared buffer's total allocated size in frame units.
+     *
+     * Parameters not listed in the AudioTrack constructors above:
+     *
+     * threadCanCallJava:  Whether callbacks are made from an attached thread and thus can call JNI.
      */
             status_t    set(audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT,
                             uint32_t sampleRate = 0,
@@ -213,7 +232,8 @@
                             int notificationFrames = 0,
                             const sp<IMemory>& sharedBuffer = 0,
                             bool threadCanCallJava = false,
-                            int sessionId       = 0);
+                            int sessionId       = 0,
+                            transfer_type transferType = TRANSFER_DEFAULT);
 
     /* Result of constructing the AudioTrack. This must be checked
      * before using any AudioTrack API (except for set()), because using
@@ -233,15 +253,16 @@
             audio_stream_type_t streamType() const { return mStreamType; }
             audio_format_t format() const   { return mFormat; }
 
-    /* Return frame size in bytes, which for linear PCM is channelCount * (bit depth per channel / 8).
+    /* Return frame size in bytes, which for linear PCM is
+     * channelCount * (bit depth per channel / 8).
      * channelCount is determined from channelMask, and bit depth comes from format.
      * For non-linear formats, the frame size is typically 1 byte.
      */
-            uint32_t    channelCount() const { return mChannelCount; }
-
-            uint32_t    frameCount() const  { return mFrameCount; }
             size_t      frameSize() const   { return mFrameSize; }
 
+            uint32_t    channelCount() const { return mChannelCount; }
+            uint32_t    frameCount() const  { return mFrameCount; }
+
     /* Return the static buffer specified in constructor or set(), or 0 for streaming mode */
             sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
 
@@ -253,10 +274,9 @@
 
     /* Stop a track.
      * In static buffer mode, the track is stopped immediately.
-     * In streaming mode, the callback will cease being called and
-     * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
-     * and will fill up buffers until the pool is exhausted.
-     * The stop does not occur immediately: any data remaining in the buffer
+     * In streaming mode, the callback will cease being called.  Note that obtainBuffer() still
+     * works and will fill up buffers until the pool is exhausted, and then will return WOULD_BLOCK.
+     * In streaming mode the stop does not occur immediately: any data remaining in the buffer
      * is first drained, mixed, and output, and only then is the track marked as stopped.
      */
             void        stop();
@@ -270,7 +290,7 @@
             void        flush();
 
     /* Pause a track. After pause, the callback will cease being called and
-     * obtainBuffer returns STOPPED. Note that obtainBuffer() still works
+     * obtainBuffer returns WOULD_BLOCK. Note that obtainBuffer() still works
      * and will fill up buffers until the pool is exhausted.
      * Volume is ramped down over the next mix buffer following the pause request,
      * and then the track is marked as paused.  It can be resumed with ramp up by start().
@@ -294,32 +314,41 @@
             status_t    setAuxEffectSendLevel(float level);
             void        getAuxEffectSendLevel(float* level) const;
 
-    /* Set sample rate for this track in Hz, mostly used for games' sound effects
+    /* Set source sample rate for this track in Hz, mostly used for games' sound effects
      */
             status_t    setSampleRate(uint32_t sampleRate);
 
-    /* Return current sample rate in Hz, or 0 if unknown */
+    /* Return current source sample rate in Hz, or 0 if unknown */
             uint32_t    getSampleRate() const;
 
     /* Enables looping and sets the start and end points of looping.
      * Only supported for static buffer mode.
      *
+     * FIXME The comments below are for the new planned interpretation which is not yet implemented.
+     * Currently the legacy behavior is still implemented, where loopStart and loopEnd
+     * are in wrapping (overflow) frame units like the return value of getPosition().
+     * The plan is to fix all callers to use the new version at same time implementation changes.
+     *
      * Parameters:
      *
-     * loopStart:   loop start expressed as the number of PCM frames played since AudioTrack start.
-     * loopEnd:     loop end expressed as the number of PCM frames played since AudioTrack start.
+     * loopStart:   loop start in frames relative to start of buffer.
+     * loopEnd:     loop end in frames relative to start of buffer.
      * loopCount:   number of loops to execute. Calling setLoop() with loopCount == 0 cancels any
-     *              pending or active loop. loopCount = -1 means infinite looping.
+     *              pending or active loop. loopCount == -1 means infinite looping.
      *
      * For proper operation the following condition must be respected:
-     *          (loopEnd-loopStart) <= framecount()
+     *      loopCount != 0 implies 0 <= loopStart < loopEnd <= frameCount().
+     *
+     * If the loop period (loopEnd - loopStart) is too small for the implementation to support,
+     * setLoop() will return BAD_VALUE.  loopCount must be >= -1.
+     *
      */
             status_t    setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);
 
     /* Sets marker position. When playback reaches the number of frames specified, a callback with
      * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
      * notification callback.  To set a marker at a position which would compute as 0,
-     * a workaround is to the set the marker at a nearby position such as -1 or 1.
+     * a workaround is to the set the marker at a nearby position such as ~0 or 1.
      * If the AudioTrack has been opened with no callback function associated, the operation will
      * fail.
      *
@@ -354,18 +383,19 @@
             status_t    setPositionUpdatePeriod(uint32_t updatePeriod);
             status_t    getPositionUpdatePeriod(uint32_t *updatePeriod) const;
 
-    /* Sets playback head position within AudioTrack buffer. The new position is specified
-     * in number of frames.
-     * This method must be called with the AudioTrack in paused or stopped state.
-     * Note that the actual position set is <position> modulo the AudioTrack buffer size in frames.
-     * Therefore using this method makes sense only when playing a "static" audio buffer
-     * as opposed to streaming.
-     * The getPosition() method on the other hand returns the total number of frames played since
-     * playback start.
+    /* Sets playback head position.
+     * Only supported for static buffer mode.
+     *
+     * FIXME The comments below are for the new planned interpretation which is not yet implemented.
+     * Currently the legacy behavior is still implemented, where the new position
+     * is in wrapping (overflow) frame units like the return value of getPosition().
+     * The plan is to fix all callers to use the new version at same time implementation changes.
      *
      * Parameters:
      *
-     * position:  New playback head position within AudioTrack buffer.
+     * position:  New playback head position in frames relative to start of buffer.
+     *            0 <= position <= frameCount().  Note that end of buffer is permitted,
+     *            but will result in an immediate underrun if started.
      *
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful operation
@@ -378,8 +408,22 @@
     /* Return the total number of frames played since playback start.
      * The counter will wrap (overflow) periodically, e.g. every ~27 hours at 44.1 kHz.
      * It is reset to zero by flush(), reload(), and stop().
+     *
+     * Parameters:
+     *
+     *  position:  Address where to return play head position.
+     *
+     * Returned status (from utils/Errors.h) can be:
+     *  - NO_ERROR: successful operation
+     *  - BAD_VALUE:  position is NULL
      */
-            status_t    getPosition(uint32_t *position);
+            status_t    getPosition(uint32_t *position) const;
+
+    /* For static buffer mode only, this returns the current playback position in frames
+     * relative to start of buffer.  It is analogous to the new API for
+     * setLoop() and setPosition().  After underrun, the position will be at end of buffer.
+     */
+            status_t    getBufferPosition(uint32_t *position);
 
     /* Forces AudioTrack buffer full condition. When playing a static buffer, this method avoids
      * rewriting the buffer before restarting playback after a stop.
@@ -426,15 +470,19 @@
      */
             status_t    attachAuxEffect(int effectId);
 
-    /* Obtains a buffer of "frameCount" frames. The buffer must be
-     * filled entirely, and then released with releaseBuffer().
-     * If the track is stopped, obtainBuffer() returns
-     * STOPPED instead of NO_ERROR as long as there are buffers available,
-     * at which point NO_MORE_BUFFERS is returned.
+    /* Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
+     * After filling these slots with data, the caller should release them with releaseBuffer().
+     * If the track buffer is not full, obtainBuffer() returns as many contiguous
+     * [empty slots for] frames as are available immediately.
+     * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK
+     * regardless of the value of waitCount.
+     * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a
+     * maximum timeout based on waitCount; see chart below.
      * Buffers will be returned until the pool
      * is exhausted, at which point obtainBuffer() will either block
-     * or return WOULD_BLOCK depending on the value of the "blocking"
+     * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
+     * Each sample is 16-bit signed PCM.
      *
      * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
      * which should use write() or callback EVENT_MORE_DATA instead.
@@ -457,24 +505,35 @@
      *  raw         pointer to the buffer
      */
 
-        enum {
-            NO_MORE_BUFFERS = 0x80000001,   // same name in AudioFlinger.h, ok to be different value
-            STOPPED = 1
-        };
+    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+                                __attribute__((__deprecated__));
 
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+private:
+    /* New internal API
+     * If nonContig is non-NULL, it is an output parameter that will be set to the number of
+     * additional non-contiguous frames that are available immediately.
+     * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
+     * in case the requested amount of frames is in two or more non-contiguous regions.
+     * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
+     */
+            status_t    obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+                                     struct timespec *elapsed = NULL, size_t *nonContig = NULL);
+public:
 
-    /* Release a filled buffer of "frameCount" frames for AudioFlinger to process. */
+    /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */
+    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
             void        releaseBuffer(Buffer* audioBuffer);
 
     /* As a convenience we provide a write() interface to the audio buffer.
+     * Input parameter 'size' is in byte units.
      * This is implemented on top of obtainBuffer/releaseBuffer. For best
      * performance use callbacks. Returns actual number of bytes written >= 0,
      * or one of the following negative status codes:
      *      INVALID_OPERATION   AudioTrack is configured for shared buffer mode
      *      BAD_VALUE           size is invalid
-     *      STOPPED             AudioTrack was stopped during the write
-     *      NO_MORE_BUFFERS     when obtainBuffer() returns same
+     *      WOULD_BLOCK         when obtainBuffer() returns same, or
+     *                          AudioTrack was stopped during the write
      *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
      * Not supported for static buffer mode.
      */
@@ -483,7 +542,13 @@
     /*
      * Dumps the state of an audio track.
      */
-            status_t dump(int fd, const Vector<String16>& args) const;
+            status_t    dump(int fd, const Vector<String16>& args) const;
+
+    /*
+     * Return the total number of frames which AudioFlinger desired but were unavailable,
+     * and thus which resulted in an underrun.  Reset to zero by stop().
+     */
+            uint32_t    getUnderrunFrames() const;
 
 protected:
     /* copying audio tracks is not allowed */
@@ -502,19 +567,29 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
+                void        pauseConditional();
+                                        // like pause(), but only if prior resume() wasn't latched
 
     private:
         friend class AudioTrack;
         virtual bool        threadLoop();
-        AudioTrack& mReceiver;
-        ~AudioTrackThread();
+        AudioTrack&         mReceiver;
+        virtual ~AudioTrackThread();
         Mutex               mMyLock;    // Thread::mLock is private
         Condition           mMyCond;    // Thread::mThreadExitedCondition is private
         bool                mPaused;    // whether thread is currently paused
+        bool                mResumeLatch;   // whether next pauseConditional() will be a nop
     };
 
             // body of AudioTrackThread::threadLoop()
-            bool processAudioBuffer(const sp<AudioTrackThread>& thread);
+            // returns the maximum amount of time before we would like to run again, where:
+            //      0           immediately
+            //      > 0         no later than this many nanoseconds from now
+            //      NS_WHENEVER still active but no particular deadline
+            //      NS_INACTIVE inactive so don't run again until re-started
+            //      NS_NEVER    never again
+            static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
+            nsecs_t processAudioBuffer(const sp<AudioTrackThread>& thread);
 
             // caller must hold lock on mLock for all _l methods
             status_t createTrack_l(audio_stream_type_t streamType,
@@ -523,20 +598,24 @@
                                  size_t frameCount,
                                  audio_output_flags_t flags,
                                  const sp<IMemory>& sharedBuffer,
-                                 audio_io_handle_t output);
+                                 audio_io_handle_t output,
+                                 size_t epoch);
 
-            // can only be called when !mActive
+            // can only be called when mState != STATE_ACTIVE
             void flush_l();
 
-            status_t setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
+            void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
             audio_io_handle_t getOutput_l();
-            status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart);
-            bool stopped_l() const { return !mActive; }
 
+            // FIXME enum is faster than strcmp() for parameter 'from'
+            status_t restoreTrack_l(const char *from);
+
+    // may be changed if IAudioTrack is re-created
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
-    sp<AudioTrackThread>    mAudioTrackThread;
+    audio_track_cblk_t*     mCblk;                  // re-load after mLock.unlock()
 
+    sp<AudioTrackThread>    mAudioTrackThread;
     float                   mVolume[2];
     float                   mSendLevel;
     uint32_t                mSampleRate;
@@ -544,62 +623,89 @@
     size_t                  mReqFrameCount;         // frame count to request the next time a new
                                                     // IAudioTrack is needed
 
-    audio_track_cblk_t*     mCblk;                  // re-load after mLock.unlock()
 
-            // Starting address of buffers in shared memory.  If there is a shared buffer, mBuffers
-            // is the value of pointer() for the shared buffer, otherwise mBuffers points
-            // immediately after the control block.  This address is for the mapping within client
-            // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
-    void*                   mBuffers;
-
+    // constant after constructor or set()
     audio_format_t          mFormat;                // as requested by client, not forced to 16-bit
     audio_stream_type_t     mStreamType;
     uint32_t                mChannelCount;
     audio_channel_mask_t    mChannelMask;
+    transfer_type           mTransfer;
 
-                // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.
-                // For 8-bit PCM data, mFrameSizeAF is
-                // twice as large because data is expanded to 16-bit before being stored in buffer.
+    // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.  For 8-bit PCM data, it's
+    // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer.
     size_t                  mFrameSize;             // app-level frame size
     size_t                  mFrameSizeAF;           // AudioFlinger frame size
 
     status_t                mStatus;
-    uint32_t                mLatency;
 
-    bool                    mActive;                // protected by mLock
+    // can change dynamically when IAudioTrack invalidated
+    uint32_t                mLatency;               // in ms
+
+    // Indicates the current track state.  Protected by mLock.
+    enum State {
+        STATE_ACTIVE,
+        STATE_STOPPED,
+        STATE_PAUSED,
+        STATE_FLUSHED,
+    }                       mState;
 
     callback_t              mCbf;                   // callback handler for events, or NULL
     void*                   mUserData;              // for client callback handler
 
     // for notification APIs
     uint32_t                mNotificationFramesReq; // requested number of frames between each
-                                                    // notification callback
+                                                    // notification callback,
+                                                    // at initial source sample rate
     uint32_t                mNotificationFramesAct; // actual number of frames between each
-                                                    // notification callback
+                                                    // notification callback,
+                                                    // at initial source sample rate
+    bool                    mRefreshRemaining;      // processAudioBuffer() should refresh next 2
+
+    // These are private to processAudioBuffer(), and are not protected by a lock
+    uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
+    bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
+    int                     mObservedSequence;      // last observed value of mSequence
+
     sp<IMemory>             mSharedBuffer;
-    int                     mLoopCount;
-    uint32_t                mRemainingFrames;
+    uint32_t                mLoopPeriod;            // in frames, zero means looping is disabled
     uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
     uint32_t                mNewPosition;           // in frames
-    uint32_t                mUpdatePeriod;          // in frames
+    uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
-    bool                    mFlushed; // FIXME will be made obsolete by making flush() synchronous
     audio_output_flags_t    mFlags;
     int                     mSessionId;
     int                     mAuxEffectId;
 
-    // When locking both mLock and mCblk->lock, must lock in this order to avoid deadlock:
-    //      1. mLock
-    //      2. mCblk->lock
-    // It is OK to lock only mCblk->lock.
     mutable Mutex           mLock;
 
     bool                    mIsTimed;
     int                     mPreviousPriority;          // before start()
     SchedPolicy             mPreviousSchedulingGroup;
-    AudioTrackClientProxy*  mProxy;
     bool                    mAwaitBoost;    // thread should wait for priority boost before running
+
+    // The proxy should only be referenced while a lock is held because the proxy isn't
+    // multi-thread safe, especially the SingleStateQueue part of the proxy.
+    // An exception is that a blocking ClientProxy::obtainBuffer() may be called without a lock,
+    // provided that the caller also holds an extra reference to the proxy and shared memory to keep
+    // them around in case they are replaced during the obtainBuffer().
+    sp<StaticAudioTrackClientProxy> mStaticProxy;   // for type safety only
+    sp<AudioTrackClientProxy>       mProxy;         // primary owner of the memory
+
+    bool                    mInUnderrun;            // whether track is currently in underrun state
+
+private:
+    class DeathNotifier : public IBinder::DeathRecipient {
+    public:
+        DeathNotifier(AudioTrack* audioTrack) : mAudioTrack(audioTrack) { }
+    protected:
+        virtual void        binderDied(const wp<IBinder>& who);
+    private:
+        const wp<AudioTrack> mAudioTrack;
+    };
+
+    sp<DeathNotifier>       mDeathNotifier;
+    uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
 };
 
 class TimedAudioTrack : public AudioTrack
diff --git a/include/media/IHDCP.h b/include/media/IHDCP.h
index 6d27b18..54fefa3 100644
--- a/include/media/IHDCP.h
+++ b/include/media/IHDCP.h
@@ -17,6 +17,7 @@
 #include <binder/IInterface.h>
 #include <media/hardware/HDCPAPI.h>
 #include <media/stagefright/foundation/ABase.h>
+#include <ui/GraphicBuffer.h>
 
 namespace android {
 
@@ -59,6 +60,20 @@
             const void *inData, size_t size, uint32_t streamCTR,
             uint64_t *outInputCTR, void *outData) = 0;
 
+    // Encrypt data according to the HDCP spec. "size" bytes of data starting
+    // at location "offset" are available in "buffer" (buffer handle). "size"
+    // may not be a multiple of 128 bits (16 bytes). An equal number of
+    // encrypted bytes should be written to the buffer at "outData" (virtual
+    // address). This operation is to be synchronous, i.e. this call does not
+    // return until outData contains size bytes of encrypted data.
+    // streamCTR will be assigned by the caller (to 0 for the first PES stream,
+    // 1 for the second and so on)
+    // inputCTR _will_be_maintained_by_the_callee_ for each PES stream.
+    virtual status_t encryptNative(
+            const sp<GraphicBuffer> &graphicBuffer,
+            size_t offset, size_t size, uint32_t streamCTR,
+            uint64_t *outInputCTR, void *outData) = 0;
+
     // DECRYPTION only:
     // Decrypt data according to the HDCP spec.
     // "size" bytes of encrypted data are available at "inData"
diff --git a/include/media/JetPlayer.h b/include/media/JetPlayer.h
index 0616bf0..388f767 100644
--- a/include/media/JetPlayer.h
+++ b/include/media/JetPlayer.h
@@ -88,7 +88,7 @@
     EAS_DATA_HANDLE     mEasData;
     EAS_FILE_LOCATOR    mEasJetFileLoc;
     EAS_PCM*            mAudioBuffer;// EAS renders the MIDI data into this buffer,
-    AudioTrack*         mAudioTrack; // and we play it in this audio track
+    sp<AudioTrack>      mAudioTrack; // and we play it in this audio track
     int                 mTrackBufferSize;
     S_JET_STATUS        mJetStatus;
     S_JET_STATUS        mPreviousJetStatus;
diff --git a/include/media/SoundPool.h b/include/media/SoundPool.h
index 7bf3069..9e5654f 100644
--- a/include/media/SoundPool.h
+++ b/include/media/SoundPool.h
@@ -118,7 +118,7 @@
 class SoundChannel : public SoundEvent {
 public:
     enum state { IDLE, RESUMING, STOPPING, PAUSED, PLAYING };
-    SoundChannel() : mAudioTrack(NULL), mState(IDLE), mNumChannels(1),
+    SoundChannel() : mState(IDLE), mNumChannels(1),
             mPos(0), mToggle(0), mAutoPaused(false) {}
     ~SoundChannel();
     void init(SoundPool* soundPool);
@@ -148,7 +148,7 @@
     bool doStop_l();
 
     SoundPool*          mSoundPool;
-    AudioTrack*         mAudioTrack;
+    sp<AudioTrack>      mAudioTrack;
     SoundEvent          mNextEvent;
     Mutex               mLock;
     int                 mState;
diff --git a/include/media/ToneGenerator.h b/include/media/ToneGenerator.h
index 2183fbe..98c4332 100644
--- a/include/media/ToneGenerator.h
+++ b/include/media/ToneGenerator.h
@@ -160,7 +160,7 @@
     bool isInited() { return (mState == TONE_IDLE)?false:true;}
 
     // returns the audio session this ToneGenerator belongs to or 0 if an error occured.
-    int getSessionId() { return (mpAudioTrack == NULL) ? 0 : mpAudioTrack->getSessionId(); }
+    int getSessionId() { return (mpAudioTrack == 0) ? 0 : mpAudioTrack->getSessionId(); }
 
 private:
 
@@ -264,7 +264,7 @@
     unsigned short mLoopCounter; // Current tone loopback count
 
     uint32_t mSamplingRate;  // AudioFlinger Sampling rate
-    AudioTrack *mpAudioTrack;  // Pointer to audio track used for playback
+    sp<AudioTrack> mpAudioTrack;  // Pointer to audio track used for playback
     Mutex mLock;  // Mutex to control concurent access to ToneGenerator object from audio callback and application API
     Mutex mCbkCondLock; // Mutex associated to mWaitCbkCond
     Condition mWaitCbkCond; // condition enabling interface to wait for audio callback completion after a change is requested
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index df25d7b..8876c9b 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -182,7 +182,7 @@
 
     bool mSentFormat;
     bool mIsEncoder;
-
+    bool mUseMetadataOnEncoderOutput;
     bool mShutdownInProgress;
 
     // If "mKeepComponentAllocated" we only transition back to Loaded state
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 1dc408f..3bf046d 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -70,7 +70,7 @@
 private:
     friend class VideoEditorAudioPlayer;
     sp<MediaSource> mSource;
-    AudioTrack *mAudioTrack;
+    sp<AudioTrack> mAudioTrack;
 
     MediaBuffer *mInputBuffer;
 
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 99f3c3b..4c9aaad 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -73,7 +73,7 @@
     Condition mFrameAvailableCondition;
     Condition mFrameEncodingCompletionCondition;
 
-    AudioRecord *mRecord;
+    sp<AudioRecord> mRecord;
     status_t mInitCheck;
     bool mStarted;
     int32_t mSampleRate;
diff --git a/include/media/stagefright/SurfaceMediaSource.h b/include/media/stagefright/SurfaceMediaSource.h
index 5f21da9..7d40379 100644
--- a/include/media/stagefright/SurfaceMediaSource.h
+++ b/include/media/stagefright/SurfaceMediaSource.h
@@ -146,9 +146,13 @@
     // this consumer
     sp<BufferQueue> mBufferQueue;
 
-    // mBufferSlot caches GraphicBuffers from the buffer queue
-    sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
+    struct SlotData {
+        sp<GraphicBuffer> mGraphicBuffer;
+        uint64_t mFrameNumber;
+    };
 
+    // mSlots caches GraphicBuffers and frameNumbers from the buffer queue
+    SlotData mSlots[BufferQueue::NUM_BUFFER_SLOTS];
 
     // The permenent width and height of SMS buffers
     int mWidth;
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 41e20f8..681f557 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -22,32 +22,46 @@
 
 #include <utils/threads.h>
 #include <utils/Log.h>
+#include <utils/RefBase.h>
+#include <media/nbaio/roundup.h>
+#include <media/SingleStateQueue.h>
+#include <private/media/StaticAudioTrackState.h>
 
 namespace android {
 
 // ----------------------------------------------------------------------------
 
-// Maximum cumulated timeout milliseconds before restarting audioflinger thread
-#define MAX_STARTUP_TIMEOUT_MS  3000    // Longer timeout period at startup to cope with A2DP
-                                        // init time
-#define MAX_RUN_TIMEOUT_MS      1000
-#define WAIT_PERIOD_MS          10
-
-#define CBLK_UNDERRUN   0x01 // set: underrun (out) or overrrun (in), clear: no underrun or overrun
+#define CBLK_UNDERRUN   0x01 // set by server immediately on output underrun, cleared by client
 #define CBLK_FORCEREADY 0x02 // set: track is considered ready immediately by AudioFlinger,
                              // clear: track is ready when buffer full
 #define CBLK_INVALID    0x04 // track buffer invalidated by AudioFlinger, need to re-create
-#define CBLK_DISABLED   0x08 // track disabled by AudioFlinger due to underrun, need to re-start
+#define CBLK_DISABLED   0x08 // output track disabled by AudioFlinger due to underrun,
+                             // need to re-start.  Unlike CBLK_UNDERRUN, this is not set
+                             // immediately, but only after a long string of underruns.
+// 0x10 unused
+#define CBLK_LOOP_CYCLE 0x20 // set by server each time a loop cycle other than final one completes
+#define CBLK_LOOP_FINAL 0x40 // set by server when the final loop cycle completes
+#define CBLK_BUFFER_END 0x80 // set by server when the position reaches end of buffer if not looping
+#define CBLK_OVERRUN   0x100 // set by server immediately on input overrun, cleared by client
+#define CBLK_INTERRUPT 0x200 // set by client on interrupt(), cleared by client in obtainBuffer()
 
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
-    volatile int32_t mFront;
-    volatile int32_t mRear;
+    // in continuously incrementing frame units, take modulo buffer size, which must be a power of 2
+    volatile int32_t mFront;    // read by server
+    volatile int32_t mRear;     // write by client
+    volatile int32_t mFlush;    // incremented by client to indicate a request to flush;
+                                // server notices and discards all data between mFront and mRear
+    volatile uint32_t mUnderrunFrames;  // server increments for each unavailable but desired frame
 };
 
-// future
+typedef SingleStateQueue<StaticAudioTrackState> StaticAudioTrackSingleStateQueue;
+
 struct AudioTrackSharedStatic {
-    int mReserved;
+    StaticAudioTrackSingleStateQueue::Shared
+                    mSingleStateQueue;
+    size_t          mBufferPosition;    // updated asynchronously by server,
+                                        // "for entertainment purposes only"
 };
 
 // ----------------------------------------------------------------------------
@@ -55,65 +69,61 @@
 // Important: do not add any virtual methods, including ~
 struct audio_track_cblk_t
 {
+                // Since the control block is always located in shared memory, this constructor
+                // is only used for placement new().  It is never used for regular new() or stack.
+                            audio_track_cblk_t();
+                /*virtual*/ ~audio_track_cblk_t() { }
+
                 friend class Proxy;
+                friend class ClientProxy;
                 friend class AudioTrackClientProxy;
                 friend class AudioRecordClientProxy;
                 friend class ServerProxy;
+                friend class AudioTrackServerProxy;
+                friend class AudioRecordServerProxy;
 
     // The data members are grouped so that members accessed frequently and in the same context
     // are in the same line of data cache.
-                Mutex       lock;           // sizeof(int)
-                Condition   cv;             // sizeof(int)
 
-                // next 4 are offsets within "buffers"
-    volatile    uint32_t    user;
-    volatile    uint32_t    server;
-                uint32_t    userBase;
-                uint32_t    serverBase;
-
-                int         mPad1;          // unused, but preserves cache line alignment
+    volatile    uint32_t    server;     // updated asynchronously by server,
+                                        // "for entertainment purposes only"
 
                 size_t      frameCount_;    // used during creation to pass actual track buffer size
                                             // from AudioFlinger to client, and not referenced again
-                                            // FIXME remove here and replace by createTrack() in/out parameter
+                                            // FIXME remove here and replace by createTrack() in/out
+                                            // parameter
                                             // renamed to "_" to detect incorrect use
 
-                // Cache line boundary (32 bytes)
+    volatile    int32_t     mFutex;     // semaphore: down (P) by client,
+                                        // up (V) by server or binderDied() or interrupt()
 
-                uint32_t    loopStart;
-                uint32_t    loopEnd;        // read-only for server, read/write for client
-                int         loopCount;      // read/write for client
+private:
+
+                size_t      mMinimum;       // server wakes up client if available >= mMinimum
 
                 // Channel volumes are fixed point U4.12, so 0x1000 means 1.0.
                 // Left channel is in [0:15], right channel is in [16:31].
                 // Always read and write the combined pair atomically.
                 // For AudioTrack only, not used by AudioRecord.
-private:
                 uint32_t    mVolumeLR;
 
                 uint32_t    mSampleRate;    // AudioTrack only: client's requested sample rate in Hz
                                             // or 0 == default. Write-only client, read-only server.
 
+                // client write-only, server read-only
+                uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
+
                 uint8_t     mPad2;           // unused
 
 public:
                 // read-only for client, server writes once at initialization and is then read-only
                 uint8_t     mName;           // normal tracks: track name, fast tracks: track index
 
-                // used by client only
-                uint16_t    bufferTimeoutMs; // Maximum cumulated timeout before restarting
-                                             // audioflinger
-
-                uint16_t    waitTimeMs;      // Cumulated wait time, used by client only
-private:
-                // client write-only, server read-only
-                uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
-public:
     volatile    int32_t     flags;
 
                 // Cache line boundary (32 bytes)
 
-#if 0
+public:
                 union {
                     AudioTrackSharedStreaming   mStreaming;
                     AudioTrackSharedStatic      mStatic;
@@ -121,25 +131,6 @@
                 } u;
 
                 // Cache line boundary (32 bytes)
-#endif
-
-                // Since the control block is always located in shared memory, this constructor
-                // is only used for placement new().  It is never used for regular new() or stack.
-                            audio_track_cblk_t();
-
-private:
-                // if there is a shared buffer, "buffers" is the value of pointer() for the shared
-                // buffer, otherwise "buffers" points immediately after the control block
-                void*       buffer(void *buffers, uint32_t frameSize, size_t offset) const;
-
-                bool        tryLock();
-
-                // isOut == true means AudioTrack, isOut == false means AudioRecord
-                bool        stepServer(size_t stepCount, size_t frameCount, bool isOut);
-                uint32_t    stepUser(size_t stepCount, size_t frameCount, bool isOut);
-                uint32_t    framesAvailable(size_t frameCount, bool isOut);
-                uint32_t    framesAvailable_l(size_t frameCount, bool isOut);
-                uint32_t    framesReady(bool isOut);
 };
 
 // ----------------------------------------------------------------------------
@@ -147,29 +138,31 @@
 // Proxy for shared memory control block, to isolate callers from needing to know the details.
 // There is exactly one ClientProxy and one ServerProxy per shared memory control block.
 // The proxies are located in normal memory, and are not multi-thread safe within a given side.
-class Proxy {
+class Proxy : public RefBase {
 protected:
-    Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize) { }
+    Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut,
+            bool clientInServer);
     virtual ~Proxy() { }
 
 public:
-    void*   buffer(size_t offset) const {
-        return mCblk->buffer(mBuffers, mFrameSize, offset);
-    }
+    struct Buffer {
+        size_t  mFrameCount;            // number of frames available in this buffer
+        void*   mRaw;                   // pointer to first frame
+        size_t  mNonContig;             // number of additional non-contiguous frames available
+    };
 
 protected:
     // These refer to shared memory, and are virtual addresses with respect to the current process.
     // They may have different virtual addresses within the other process.
-    audio_track_cblk_t* const   mCblk;          // the control block
-    void* const                 mBuffers;       // starting address of buffers
+    audio_track_cblk_t* const   mCblk;  // the control block
+    void* const     mBuffers;           // starting address of buffers
 
-    const size_t                mFrameCount;    // not necessarily a power of 2
-    const size_t                mFrameSize;     // in bytes
-#if 0
-    const size_t                mFrameCountP2;  // mFrameCount rounded to power of 2, streaming mode
-#endif
-
+    const size_t    mFrameCount;        // not necessarily a power of 2
+    const size_t    mFrameSize;         // in bytes
+    const size_t    mFrameCountP2;      // mFrameCount rounded to power of 2, streaming mode
+    const bool      mIsOut;             // true for AudioTrack, false for AudioRecord
+    const bool      mClientInServer;    // true for OutputTrack, false for AudioTrack & AudioRecord
+    bool            mIsShutdown;        // latch set to true when shared memory corruption detected
 };
 
 // ----------------------------------------------------------------------------
@@ -177,9 +170,86 @@
 // Proxy seen by AudioTrack client and AudioRecord client
 class ClientProxy : public Proxy {
 protected:
-    ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : Proxy(cblk, buffers, frameCount, frameSize) { }
+    ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+            bool isOut, bool clientInServer);
     virtual ~ClientProxy() { }
+
+public:
+    static const struct timespec kForever;
+    static const struct timespec kNonBlocking;
+
+    // Obtain a buffer with filled frames (reading) or empty frames (writing).
+    // It is permitted to call obtainBuffer() multiple times in succession, without any intervening
+    // calls to releaseBuffer().  In that case, the final obtainBuffer() is the one that effectively
+    // sets or extends the unreleased frame count.
+    // On entry:
+    //  buffer->mFrameCount should be initialized to maximum number of desired frames,
+    //      which must be > 0.
+    //  buffer->mNonContig is unused.
+    //  buffer->mRaw is unused.
+    //  requested is the requested timeout in local monotonic delta time units:
+    //      NULL or &kNonBlocking means non-blocking (zero timeout).
+    //      &kForever means block forever (infinite timeout).
+    //      Other values mean a specific timeout in local monotonic delta time units.
+    //  elapsed is a pointer to a location that will hold the total local monotonic time that
+    //      elapsed while blocked, or NULL if not needed.
+    // On exit:
+    //  buffer->mFrameCount has the actual number of contiguous available frames,
+    //      which is always 0 when the return status != NO_ERROR.
+    //  buffer->mNonContig is the number of additional non-contiguous available frames.
+    //  buffer->mRaw is a pointer to the first available frame,
+    //      or NULL when buffer->mFrameCount == 0.
+    // The return status is one of:
+    //  NO_ERROR    Success, buffer->mFrameCount > 0.
+    //  WOULD_BLOCK Non-blocking mode and no frames are available.
+    //  TIMED_OUT   Timeout occurred before any frames became available.
+    //              This can happen even for infinite timeout, due to a spurious wakeup.
+    //              In this case, the caller should investigate and then re-try as appropriate.
+    //  DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
+    //  -EINTR      Call has been interrupted.  Look around to see why, and then perhaps try again.
+    //  NO_INIT     Shared memory is corrupt.
+    //  BAD_VALUE   On entry buffer == NULL or buffer->mFrameCount == 0.
+    status_t    obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
+            struct timespec *elapsed = NULL);
+
+    // Release (some of) the frames last obtained.
+    // On entry, buffer->mFrameCount should have the number of frames to release,
+    // which must (cumulatively) be <= the number of frames last obtained but not yet released.
+    // buffer->mRaw is ignored, but is normally same pointer returned by last obtainBuffer().
+    // It is permitted to call releaseBuffer() multiple times to release the frames in chunks.
+    // On exit:
+    //  buffer->mFrameCount is zero.
+    //  buffer->mRaw is NULL.
+    void        releaseBuffer(Buffer* buffer);
+
+    // Call after detecting server's death
+    void        binderDied();
+
+    // Call to force an obtainBuffer() to return quickly with -EINTR
+    void        interrupt();
+
+    size_t      getPosition() {
+        return mEpoch + mCblk->server;
+    }
+
+    void        setEpoch(size_t epoch) {
+        mEpoch = epoch;
+    }
+
+    void        setMinimum(size_t minimum) {
+        mCblk->mMinimum = minimum;
+    }
+
+    // Return the number of frames that would need to be obtained and released
+    // in order for the client to be aligned at start of buffer
+    virtual size_t  getMisalignment();
+
+    size_t      getEpoch() const {
+        return mEpoch;
+    }
+
+private:
+    size_t      mEpoch;
 };
 
 // ----------------------------------------------------------------------------
@@ -187,8 +257,10 @@
 // Proxy used by AudioTrack client, which also includes AudioFlinger::PlaybackThread::OutputTrack
 class AudioTrackClientProxy : public ClientProxy {
 public:
-    AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : ClientProxy(cblk, buffers, frameCount, frameSize) { }
+    AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize, bool clientInServer = false)
+        : ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
+          clientInServer) { }
     virtual ~AudioTrackClientProxy() { }
 
     // No barriers on the following operations, so the ordering of loads/stores
@@ -208,27 +280,36 @@
         mCblk->mSampleRate = sampleRate;
     }
 
-    // called by:
-    //   PlaybackThread::OutputTrack::write
-    //   AudioTrack::createTrack_l
-    //   AudioTrack::releaseBuffer
-    //   AudioTrack::reload
-    //   AudioTrack::restoreTrack_l (2 places)
-    size_t      stepUser(size_t stepCount) {
-        return mCblk->stepUser(stepCount, mFrameCount, true /*isOut*/);
+    virtual void flush();
+
+    virtual uint32_t    getUnderrunFrames() const {
+        return mCblk->u.mStreaming.mUnderrunFrames;
+    }
+};
+
+class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
+public:
+    StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize);
+    virtual ~StaticAudioTrackClientProxy() { }
+
+    virtual void    flush();
+
+#define MIN_LOOP    16  // minimum length of each loop iteration in frames
+            void    setLoop(size_t loopStart, size_t loopEnd, int loopCount);
+            size_t  getBufferPosition();
+
+    virtual size_t  getMisalignment() {
+        return 0;
     }
 
-    // called by AudioTrack::obtainBuffer and AudioTrack::processBuffer
-    size_t      framesAvailable() {
-        return mCblk->framesAvailable(mFrameCount, true /*isOut*/);
+    virtual uint32_t    getUnderrunFrames() const {
+        return 0;
     }
 
-    // called by AudioTrack::obtainBuffer and PlaybackThread::OutputTrack::obtainBuffer
-    // FIXME remove this API since it assumes a lock that should be invisible to caller
-    size_t      framesAvailable_l() {
-        return mCblk->framesAvailable_l(mFrameCount, true /*isOut*/);
-    }
-
+private:
+    StaticAudioTrackSingleStateQueue::Mutator   mMutator;
+    size_t          mBufferPosition;    // so that getBufferPosition() appears to be synchronous
 };
 
 // ----------------------------------------------------------------------------
@@ -236,60 +317,122 @@
 // Proxy used by AudioRecord client
 class AudioRecordClientProxy : public ClientProxy {
 public:
-    AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize)
-        : ClientProxy(cblk, buffers, frameCount, frameSize) { }
+    AudioRecordClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize)
+        : ClientProxy(cblk, buffers, frameCount, frameSize,
+            false /*isOut*/, false /*clientInServer*/) { }
     ~AudioRecordClientProxy() { }
-
-    // called by AudioRecord::releaseBuffer
-    size_t      stepUser(size_t stepCount) {
-        return mCblk->stepUser(stepCount, mFrameCount, false /*isOut*/);
-    }
-
-    // called by AudioRecord::processBuffer
-    size_t      framesAvailable() {
-        return mCblk->framesAvailable(mFrameCount, false /*isOut*/);
-    }
-
-    // called by AudioRecord::obtainBuffer
-    size_t      framesReady() {
-        return mCblk->framesReady(false /*isOut*/);
-    }
-
 };
 
 // ----------------------------------------------------------------------------
 
 // Proxy used by AudioFlinger server
 class ServerProxy : public Proxy {
+protected:
+    ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+            bool isOut, bool clientInServer);
 public:
-    ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize, bool isOut)
-        : Proxy(cblk, buffers, frameCount, frameSize), mIsOut(isOut) { }
     virtual ~ServerProxy() { }
 
-    // for AudioTrack and AudioRecord
-    bool        step(size_t stepCount) { return mCblk->stepServer(stepCount, mFrameCount, mIsOut); }
+    // Obtain a buffer with filled frames (writing) or empty frames (reading).
+    // It is permitted to call obtainBuffer() multiple times in succession, without any intervening
+    // calls to releaseBuffer().  In that case, the final obtainBuffer() is the one that effectively
+    // sets or extends the unreleased frame count.
+    // Always non-blocking.
+    // On entry:
+    //  buffer->mFrameCount should be initialized to maximum number of desired frames,
+    //      which must be > 0.
+    //  buffer->mNonContig is unused.
+    //  buffer->mRaw is unused.
+    // On exit:
+    //  buffer->mFrameCount has the actual number of contiguous available frames,
+    //      which is always 0 when the return status != NO_ERROR.
+    //  buffer->mNonContig is the number of additional non-contiguous available frames.
+    //  buffer->mRaw is a pointer to the first available frame,
+    //      or NULL when buffer->mFrameCount == 0.
+    // The return status is one of:
+    //  NO_ERROR    Success, buffer->mFrameCount > 0.
+    //  WOULD_BLOCK No frames are available.
+    //  NO_INIT     Shared memory is corrupt.
+    virtual status_t    obtainBuffer(Buffer* buffer);
 
+    // Release (some of) the frames last obtained.
+    // On entry, buffer->mFrameCount should have the number of frames to release,
+    // which must (cumulatively) be <= the number of frames last obtained but not yet released.
+    // It is permitted to call releaseBuffer() multiple times to release the frames in chunks.
+    // buffer->mRaw is ignored, but is normally same pointer returned by last obtainBuffer().
+    // On exit:
+    //  buffer->mFrameCount is zero.
+    //  buffer->mRaw is NULL.
+    virtual void        releaseBuffer(Buffer* buffer);
+
+protected:
+    size_t      mUnreleased;    // unreleased frames remaining from most recent obtainBuffer()
+    size_t      mAvailToClient; // estimated frames available to client prior to releaseBuffer()
+private:
+    int32_t     mFlush;         // our copy of cblk->u.mStreaming.mFlush, for streaming output only
+    bool        mDeferWake;     // whether another releaseBuffer() is expected soon
+};
+
+// Proxy used by AudioFlinger for servicing AudioTrack
+class AudioTrackServerProxy : public ServerProxy {
+public:
+    AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize, bool clientInServer = false)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) { }
+protected:
+    virtual ~AudioTrackServerProxy() { }
+
+public:
     // return value of these methods must be validated by the caller
     uint32_t    getSampleRate() const { return mCblk->mSampleRate; }
     uint16_t    getSendLevel_U4_12() const { return mCblk->mSendLevel; }
     uint32_t    getVolumeLR() const { return mCblk->mVolumeLR; }
 
-    // for AudioTrack only
-    size_t      framesReady() {
-        ALOG_ASSERT(mIsOut);
-        return mCblk->framesReady(true);
-    }
+    // estimated total number of filled frames available to server to read,
+    // which may include non-contiguous frames
+    virtual size_t      framesReady();
 
-    // for AudioRecord only, called by RecordThread::RecordTrack::getNextBuffer
-    // FIXME remove this API since it assumes a lock that should be invisible to caller
-    size_t      framesAvailableIn_l() {
-        ALOG_ASSERT(!mIsOut);
-        return mCblk->framesAvailable_l(mFrameCount, false);
-    }
+    // Currently AudioFlinger will call framesReady() for a fast track from two threads:
+    // FastMixer thread, and normal mixer thread.  This is dangerous, as the proxy is intended
+    // to be called from at most one thread of server, and one thread of client.
+    // As a temporary workaround, this method informs the proxy implementation that it
+    // should avoid doing a state queue poll from within framesReady().
+    // FIXME Change AudioFlinger to not call framesReady() from normal mixer thread.
+    virtual void        framesReadyIsCalledByMultipleThreads() { }
+};
+
+class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
+public:
+    StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize);
+protected:
+    virtual ~StaticAudioTrackServerProxy() { }
+
+public:
+    virtual size_t      framesReady();
+    virtual void        framesReadyIsCalledByMultipleThreads();
+    virtual status_t    obtainBuffer(Buffer* buffer);
+    virtual void        releaseBuffer(Buffer* buffer);
 
 private:
-    const bool  mIsOut;     // true for AudioTrack, false for AudioRecord
+    ssize_t             pollPosition(); // poll for state queue update, and return current position
+    StaticAudioTrackSingleStateQueue::Observer  mObserver;
+    size_t              mPosition;  // server's current play position in frames, relative to 0
+    size_t              mEnd;       // cached value computed from mState, safe for asynchronous read
+    bool                mFramesReadyIsCalledByMultipleThreads;
+    StaticAudioTrackState   mState;
+};
 
+// Proxy used by AudioFlinger for servicing AudioRecord
+class AudioRecordServerProxy : public ServerProxy {
+public:
+    AudioRecordServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+            size_t frameSize)
+        : ServerProxy(cblk, buffers, frameCount, frameSize, false /*isOut*/,
+            false /*clientInServer*/) { }
+protected:
+    virtual ~AudioRecordServerProxy() { }
 };
 
 // ----------------------------------------------------------------------------
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
index c111ba8..3fa8b87 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
@@ -35,8 +35,7 @@
 VideoEditorAudioPlayer::VideoEditorAudioPlayer(
         const sp<MediaPlayerBase::AudioSink> &audioSink,
         PreviewPlayer *observer)
-    : mAudioTrack(NULL),
-      mInputBuffer(NULL),
+    : mInputBuffer(NULL),
       mSampleRate(0),
       mLatencyUs(0),
       mFrameSize(0),
@@ -111,8 +110,7 @@
     } else {
         mAudioTrack->stop();
 
-        delete mAudioTrack;
-        mAudioTrack = NULL;
+        mAudioTrack.clear();
     }
 
     // Make sure to release any buffer we hold onto so that the
@@ -538,8 +536,7 @@
                 0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
-            delete mAudioTrack;
-            mAudioTrack = NULL;
+            mAudioTrack.clear();
 
             if (mFirstBuffer != NULL) {
                 mFirstBuffer->release();
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
index 626df39..a5616c1 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
@@ -91,7 +91,7 @@
     int64_t mBGAudioStoryBoardCurrentMediaVolumeVal;
 
     sp<MediaSource> mSource;
-    AudioTrack *mAudioTrack;
+    sp<AudioTrack> mAudioTrack;
 
     MediaBuffer *mInputBuffer;
 
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
index 91a4415..4a14b40 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
@@ -310,7 +310,6 @@
 VideoEditorPlayer::VeAudioOutput::VeAudioOutput()
     : mCallback(NULL),
       mCallbackCookie(NULL) {
-    mTrack = 0;
     mStreamType = AUDIO_STREAM_MUSIC;
     mLeftVolume = 1.0;
     mRightVolume = 1.0;
@@ -405,7 +404,7 @@
 
     }
     ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount);
-    if (mTrack) close();
+    if (mTrack != 0) close();
     uint32_t afSampleRate;
     size_t afFrameCount;
     int frameCount;
@@ -434,7 +433,7 @@
         }
     }
 
-    AudioTrack *t;
+    sp<AudioTrack> t;
     if (mCallback != NULL) {
         t = new AudioTrack(
                 mStreamType,
@@ -457,7 +456,6 @@
 
     if ((t == 0) || (t->initCheck() != NO_ERROR)) {
         ALOGE("Unable to create audio track");
-        delete t;
         return NO_INIT;
     }
 
@@ -472,7 +470,7 @@
 void VideoEditorPlayer::VeAudioOutput::start() {
 
     ALOGV("start");
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
         mTrack->start();
         mTrack->getPosition(&mNumFramesWritten);
@@ -492,7 +490,7 @@
     LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
 
     //ALOGV("write(%p, %u)", buffer, size);
-    if (mTrack) {
+    if (mTrack != 0) {
         snoopWrite(buffer, size);
         ssize_t ret = mTrack->write(buffer, size);
         mNumFramesWritten += ret / 4; // assume 16 bit stereo
@@ -504,26 +502,25 @@
 void VideoEditorPlayer::VeAudioOutput::stop() {
 
     ALOGV("stop");
-    if (mTrack) mTrack->stop();
+    if (mTrack != 0) mTrack->stop();
 }
 
 void VideoEditorPlayer::VeAudioOutput::flush() {
 
     ALOGV("flush");
-    if (mTrack) mTrack->flush();
+    if (mTrack != 0) mTrack->flush();
 }
 
 void VideoEditorPlayer::VeAudioOutput::pause() {
 
     ALOGV("VeAudioOutput::pause");
-    if (mTrack) mTrack->pause();
+    if (mTrack != 0) mTrack->pause();
 }
 
 void VideoEditorPlayer::VeAudioOutput::close() {
 
     ALOGV("close");
-    delete mTrack;
-    mTrack = 0;
+    mTrack.clear();
 }
 
 void VideoEditorPlayer::VeAudioOutput::setVolume(float left, float right) {
@@ -531,7 +528,7 @@
     ALOGV("setVolume(%f, %f)", left, right);
     mLeftVolume = left;
     mRightVolume = right;
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(left, right);
     }
 }
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
index 77194ab..defc90d 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.h
@@ -71,7 +71,7 @@
         static void             CallbackWrapper(
                 int event, void *me, void *info);
 
-        AudioTrack*             mTrack;
+        sp<AudioTrack>          mTrack;
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
         audio_stream_type_t     mStreamType;
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 2c0c3a5..96755bb 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -53,7 +53,8 @@
     Visualizer.cpp \
     MemoryLeakTrackUtil.cpp \
     SoundPool.cpp \
-    SoundPoolThread.cpp
+    SoundPoolThread.cpp \
+    StringArray.cpp
 
 LOCAL_SRC_FILES += ../libnbaio/roundup.c
 
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 40ff1bf..9faa497 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -19,18 +19,13 @@
 #define LOG_TAG "AudioRecord"
 
 #include <sys/resource.h>
-#include <sys/types.h>
-
 #include <binder/IPCThreadState.h>
-#include <cutils/atomic.h>
-#include <cutils/compiler.h>
 #include <media/AudioRecord.h>
-#include <media/AudioSystem.h>
-#include <system/audio.h>
 #include <utils/Log.h>
-
 #include <private/media/AudioTrackShared.h>
 
+#define WAIT_PERIOD_MS          10
+
 namespace android {
 // ---------------------------------------------------------------------------
 
@@ -41,7 +36,9 @@
         audio_format_t format,
         audio_channel_mask_t channelMask)
 {
-    if (frameCount == NULL) return BAD_VALUE;
+    if (frameCount == NULL) {
+        return BAD_VALUE;
+    }
 
     // default to 0 in case of error
     *frameCount = 0;
@@ -75,8 +72,7 @@
 
 AudioRecord::AudioRecord()
     : mStatus(NO_INIT), mSessionId(0),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
 {
 }
 
@@ -89,14 +85,15 @@
         callback_t cbf,
         void* user,
         int notificationFrames,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType)
     : mStatus(NO_INIT), mSessionId(0),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
       mProxy(NULL)
 {
-    mStatus = set(inputSource, sampleRate, format, channelMask,
-            frameCount, cbf, user, notificationFrames, false /*threadCanCallJava*/, sessionId);
+    mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
+            notificationFrames, false /*threadCanCallJava*/, sessionId, transferType);
 }
 
 AudioRecord::~AudioRecord()
@@ -111,11 +108,13 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
-        mAudioRecord.clear();
+        if (mAudioRecord != 0) {
+            mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
+            mAudioRecord.clear();
+        }
         IPCThreadState::self()->flushCommands();
         AudioSystem::releaseAudioSessionId(mSessionId);
     }
-    delete mProxy;
 }
 
 status_t AudioRecord::set(
@@ -128,8 +127,32 @@
         void* user,
         int notificationFrames,
         bool threadCanCallJava,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType)
 {
+    switch (transferType) {
+    case TRANSFER_DEFAULT:
+        if (cbf == NULL || threadCanCallJava) {
+            transferType = TRANSFER_SYNC;
+        } else {
+            transferType = TRANSFER_CALLBACK;
+        }
+        break;
+    case TRANSFER_CALLBACK:
+        if (cbf == NULL) {
+            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL");
+            return BAD_VALUE;
+        }
+        break;
+    case TRANSFER_OBTAIN:
+    case TRANSFER_SYNC:
+        break;
+    default:
+        ALOGE("Invalid transfer type %d", transferType);
+        return BAD_VALUE;
+    }
+    mTransfer = transferType;
+
     // FIXME "int" here is legacy and will be replaced by size_t later
     if (frameCountInt < 0) {
         ALOGE("Invalid frame count %d", frameCountInt);
@@ -143,6 +166,7 @@
     AutoMutex lock(mLock);
 
     if (mAudioRecord != 0) {
+        ALOGE("Track already in use");
         return INVALID_OPERATION;
     }
 
@@ -159,14 +183,16 @@
     if (format == AUDIO_FORMAT_DEFAULT) {
         format = AUDIO_FORMAT_PCM_16_BIT;
     }
+
     // validate parameters
     if (!audio_is_valid_format(format)) {
-        ALOGE("Invalid format");
+        ALOGE("Invalid format %d", format);
         return BAD_VALUE;
     }
     mFormat = format;
 
     if (!audio_is_input_channel(channelMask)) {
+        ALOGE("Invalid channel mask %#x", channelMask);
         return BAD_VALUE;
     }
     mChannelMask = channelMask;
@@ -200,6 +226,7 @@
     size_t minFrameCount = 0;
     status_t status = getMinFrameCount(&minFrameCount, sampleRate, format, channelMask);
     if (status != NO_ERROR) {
+        ALOGE("getMinFrameCount() failed; status %d", status);
         return status;
     }
     ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);
@@ -207,6 +234,7 @@
     if (frameCount == 0) {
         frameCount = minFrameCount;
     } else if (frameCount < minFrameCount) {
+        ALOGE("frameCount %u < minFrameCount %u", frameCount, minFrameCount);
         return BAD_VALUE;
     }
 
@@ -215,7 +243,7 @@
     }
 
     // create the IAudioRecord
-    status = openRecord_l(sampleRate, format, frameCount, input);
+    status = openRecord_l(sampleRate, format, frameCount, input, 0 /*epoch*/);
     if (status != NO_ERROR) {
         return status;
     }
@@ -233,7 +261,7 @@
     mActive = false;
     mCbf = cbf;
     mNotificationFrames = notificationFrames;
-    mRemainingFrames = notificationFrames;
+    mRefreshRemaining = true;
     mUserData = user;
     // TODO: add audio hardware input latency here
     mLatency = (1000*mFrameCount) / sampleRate;
@@ -244,117 +272,78 @@
     mInputSource = inputSource;
     mInput = input;
     AudioSystem::acquireAudioSessionId(mSessionId);
+    mSequence = 1;
+    mObservedSequence = mSequence;
+    mInOverrun = false;
 
     return NO_ERROR;
 }
 
-status_t AudioRecord::initCheck() const
-{
-    return mStatus;
-}
-
-// -------------------------------------------------------------------------
-
-uint32_t AudioRecord::latency() const
-{
-    return mLatency;
-}
-
-audio_format_t AudioRecord::format() const
-{
-    return mFormat;
-}
-
-uint32_t AudioRecord::channelCount() const
-{
-    return mChannelCount;
-}
-
-size_t AudioRecord::frameCount() const
-{
-    return mFrameCount;
-}
-
-audio_source_t AudioRecord::inputSource() const
-{
-    return mInputSource;
-}
-
 // -------------------------------------------------------------------------
 
 status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
 {
-    status_t ret = NO_ERROR;
-    sp<AudioRecordThread> t = mAudioRecordThread;
-
     ALOGV("start, sync event %d trigger session %d", event, triggerSession);
 
     AutoMutex lock(mLock);
-    // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioRecord> audioRecord = mAudioRecord;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
+    if (mActive) {
+        return NO_ERROR;
+    }
 
-    if (!mActive) {
+    // reset current position as seen by client to 0
+    mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    int32_t flags = android_atomic_acquire_load(&mCblk->flags);
+
+    status_t status = NO_ERROR;
+    if (!(flags & CBLK_INVALID)) {
+        ALOGV("mAudioRecord->start()");
+        status = mAudioRecord->start(event, triggerSession);
+        if (status == DEAD_OBJECT) {
+            flags |= CBLK_INVALID;
+        }
+    }
+    if (flags & CBLK_INVALID) {
+        status = restoreRecord_l("start");
+    }
+
+    if (status != NO_ERROR) {
+        ALOGE("start() status %d", status);
+    } else {
         mActive = true;
-
-        cblk->lock.lock();
-        if (!(cblk->flags & CBLK_INVALID)) {
-            cblk->lock.unlock();
-            ALOGV("mAudioRecord->start()");
-            ret = mAudioRecord->start(event, triggerSession);
-            cblk->lock.lock();
-            if (ret == DEAD_OBJECT) {
-                android_atomic_or(CBLK_INVALID, &cblk->flags);
-            }
-        }
-        if (cblk->flags & CBLK_INVALID) {
-            audio_track_cblk_t* temp = cblk;
-            ret = restoreRecord_l(temp);
-            cblk = temp;
-        }
-        cblk->lock.unlock();
-        if (ret == NO_ERROR) {
-            mNewPosition = cblk->user + mUpdatePeriod;
-            cblk->bufferTimeoutMs = (event == AudioSystem::SYNC_EVENT_NONE) ? MAX_RUN_TIMEOUT_MS :
-                                            AudioSystem::kSyncRecordStartTimeOutMs;
-            cblk->waitTimeMs = 0;
-            if (t != 0) {
-                t->resume();
-            } else {
-                mPreviousPriority = getpriority(PRIO_PROCESS, 0);
-                get_sched_policy(0, &mPreviousSchedulingGroup);
-                androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
-            }
+        sp<AudioRecordThread> t = mAudioRecordThread;
+        if (t != 0) {
+            t->resume();
         } else {
-            mActive = false;
+            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+            get_sched_policy(0, &mPreviousSchedulingGroup);
+            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
         }
     }
 
-    return ret;
+    return status;
 }
 
 void AudioRecord::stop()
 {
-    sp<AudioRecordThread> t = mAudioRecordThread;
-
-    ALOGV("stop");
-
     AutoMutex lock(mLock);
-    if (mActive) {
-        mActive = false;
-        mCblk->cv.signal();
-        mAudioRecord->stop();
-        // the record head position will reset to 0, so if a marker is set, we need
-        // to activate it again
-        mMarkerReached = false;
-        if (t != 0) {
-            t->pause();
-        } else {
-            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
-            set_sched_policy(0, mPreviousSchedulingGroup);
-        }
+    if (!mActive) {
+        return;
+    }
+
+    mActive = false;
+    mProxy->interrupt();
+    mAudioRecord->stop();
+    // the record head position will reset to 0, so if a marker is set, we need
+    // to activate it again
+    mMarkerReached = false;
+    sp<AudioRecordThread> t = mAudioRecordThread;
+    if (t != 0) {
+        t->pause();
+    } else {
+        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+        set_sched_policy(0, mPreviousSchedulingGroup);
     }
 }
 
@@ -364,14 +353,11 @@
     return !mActive;
 }
 
-uint32_t AudioRecord::getSampleRate() const
-{
-    return mSampleRate;
-}
-
 status_t AudioRecord::setMarkerPosition(uint32_t marker)
 {
-    if (mCbf == NULL) return INVALID_OPERATION;
+    if (mCbf == NULL) {
+        return INVALID_OPERATION;
+    }
 
     AutoMutex lock(mLock);
     mMarkerPosition = marker;
@@ -382,7 +368,9 @@
 
 status_t AudioRecord::getMarkerPosition(uint32_t *marker) const
 {
-    if (marker == NULL) return BAD_VALUE;
+    if (marker == NULL) {
+        return BAD_VALUE;
+    }
 
     AutoMutex lock(mLock);
     *marker = mMarkerPosition;
@@ -392,13 +380,12 @@
 
 status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
-    if (mCbf == NULL) return INVALID_OPERATION;
-
-    uint32_t curPosition;
-    getPosition(&curPosition);
+    if (mCbf == NULL) {
+        return INVALID_OPERATION;
+    }
 
     AutoMutex lock(mLock);
-    mNewPosition = curPosition + updatePeriod;
+    mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
     return NO_ERROR;
@@ -406,7 +393,9 @@
 
 status_t AudioRecord::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
-    if (updatePeriod == NULL) return BAD_VALUE;
+    if (updatePeriod == NULL) {
+        return BAD_VALUE;
+    }
 
     AutoMutex lock(mLock);
     *updatePeriod = mUpdatePeriod;
@@ -416,10 +405,12 @@
 
 status_t AudioRecord::getPosition(uint32_t *position) const
 {
-    if (position == NULL) return BAD_VALUE;
+    if (position == NULL) {
+        return BAD_VALUE;
+    }
 
     AutoMutex lock(mLock);
-    *position = mCblk->user;
+    *position = mProxy->getPosition();
 
     return NO_ERROR;
 }
@@ -427,7 +418,7 @@
 unsigned int AudioRecord::getInputFramesLost() const
 {
     // no need to check mActive, because if inactive this will return 0, which is what we want
-    return AudioSystem::getInputFramesLost(mInput);
+    return AudioSystem::getInputFramesLost(getInput());
 }
 
 // -------------------------------------------------------------------------
@@ -437,7 +428,8 @@
         uint32_t sampleRate,
         audio_format_t format,
         size_t frameCount,
-        audio_io_handle_t input)
+        audio_io_handle_t input,
+        size_t epoch)
 {
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -447,7 +439,7 @@
     }
 
     pid_t tid = -1;
-    // FIXME see similar logic at AudioTrack
+    // FIXME see similar logic at AudioTrack for tid
 
     int originalSessionId = mSessionId;
     sp<IAudioRecord> record = audioFlinger->openRecord(input,
@@ -470,133 +462,138 @@
         ALOGE("Could not get control block");
         return NO_INIT;
     }
-    mAudioRecord.clear();
+    if (mAudioRecord != 0) {
+        mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
+        mDeathNotifier.clear();
+    }
     mAudioRecord = record;
-    mCblkMemory.clear();
     mCblkMemory = iMem;
     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
     mCblk = cblk;
-    mBuffers = (char*)cblk + sizeof(audio_track_cblk_t);
-    cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
-    cblk->waitTimeMs = 0;
+
+    // starting address of buffers in shared memory
+    void *buffers = (char*)cblk + sizeof(audio_track_cblk_t);
 
     // update proxy
-    delete mProxy;
-    mProxy = new AudioRecordClientProxy(cblk, mBuffers, frameCount, mFrameSize);
+    mProxy = new AudioRecordClientProxy(cblk, buffers, frameCount, mFrameSize);
+    mProxy->setEpoch(epoch);
+    mProxy->setMinimum(mNotificationFrames);
+
+    mDeathNotifier = new DeathNotifier(this);
+    mAudioRecord->asBinder()->linkToDeath(mDeathNotifier, this);
 
     return NO_ERROR;
 }
 
 status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
+    if (audioBuffer == NULL) {
+        return BAD_VALUE;
+    }
+    if (mTransfer != TRANSFER_OBTAIN) {
+        audioBuffer->frameCount = 0;
+        audioBuffer->size = 0;
+        audioBuffer->raw = NULL;
+        return INVALID_OPERATION;
+    }
 
-    AutoMutex lock(mLock);
-    bool active;
-    status_t result = NO_ERROR;
-    audio_track_cblk_t* cblk = mCblk;
-    uint32_t framesReq = audioBuffer->frameCount;
-    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;
+    const struct timespec *requested;
+    if (waitCount == -1) {
+        requested = &ClientProxy::kForever;
+    } else if (waitCount == 0) {
+        requested = &ClientProxy::kNonBlocking;
+    } else if (waitCount > 0) {
+        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
+        struct timespec timeout;
+        timeout.tv_sec = ms / 1000;
+        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
+        requested = &timeout;
+    } else {
+        ALOGE("%s invalid waitCount %d", __func__, waitCount);
+        requested = NULL;
+    }
+    return obtainBuffer(audioBuffer, requested);
+}
 
-    audioBuffer->frameCount  = 0;
-    audioBuffer->size        = 0;
+status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+        struct timespec *elapsed, size_t *nonContig)
+{
+    // previous and new IAudioRecord sequence numbers are used to detect track re-creation
+    uint32_t oldSequence = 0;
+    uint32_t newSequence;
 
-    size_t framesReady = mProxy->framesReady();
+    Proxy::Buffer buffer;
+    status_t status = NO_ERROR;
 
-    if (framesReady == 0) {
-        cblk->lock.lock();
-        goto start_loop_here;
-        while (framesReady == 0) {
-            active = mActive;
-            if (CC_UNLIKELY(!active)) {
-                cblk->lock.unlock();
-                return NO_MORE_BUFFERS;
-            }
-            if (CC_UNLIKELY(!waitCount)) {
-                cblk->lock.unlock();
-                return WOULD_BLOCK;
-            }
-            if (!(cblk->flags & CBLK_INVALID)) {
-                mLock.unlock();
-                // this condition is in shared memory, so if IAudioRecord and control block
-                // are replaced due to mediaserver death or IAudioRecord invalidation then
-                // cv won't be signalled, but fortunately the timeout will limit the wait
-                result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
-                cblk->lock.unlock();
-                mLock.lock();
-                if (!mActive) {
-                    return status_t(STOPPED);
-                }
-                // IAudioRecord may have been re-created while mLock was unlocked
-                cblk = mCblk;
-                cblk->lock.lock();
-            }
-            if (cblk->flags & CBLK_INVALID) {
-                goto create_new_record;
-            }
-            if (CC_UNLIKELY(result != NO_ERROR)) {
-                cblk->waitTimeMs += waitTimeMs;
-                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
-                    ALOGW(   "obtainBuffer timed out (is the CPU pegged?) "
-                            "user=%08x, server=%08x", cblk->user, cblk->server);
-                    cblk->lock.unlock();
-                    // callback thread or sync event hasn't changed
-                    result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
-                    cblk->lock.lock();
-                    if (result == DEAD_OBJECT) {
-                        android_atomic_or(CBLK_INVALID, &cblk->flags);
-create_new_record:
-                        audio_track_cblk_t* temp = cblk;
-                        result = AudioRecord::restoreRecord_l(temp);
-                        cblk = temp;
+    static const int32_t kMaxTries = 5;
+    int32_t tryCounter = kMaxTries;
+
+    do {
+        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
+        // keep them from going away if another thread re-creates the track during obtainBuffer()
+        sp<AudioRecordClientProxy> proxy;
+        sp<IMemory> iMem;
+        {
+            // start of lock scope
+            AutoMutex lock(mLock);
+
+            newSequence = mSequence;
+            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
+            if (status == DEAD_OBJECT) {
+                // re-create track, unless someone else has already done so
+                if (newSequence == oldSequence) {
+                    status = restoreRecord_l("obtainBuffer");
+                    if (status != NO_ERROR) {
+                        break;
                     }
-                    if (result != NO_ERROR) {
-                        ALOGW("obtainBuffer create Track error %d", result);
-                        cblk->lock.unlock();
-                        return result;
-                    }
-                    cblk->waitTimeMs = 0;
-                }
-                if (--waitCount == 0) {
-                    cblk->lock.unlock();
-                    return TIMED_OUT;
                 }
             }
-            // read the server count again
-        start_loop_here:
-            framesReady = mProxy->framesReady();
-        }
-        cblk->lock.unlock();
+            oldSequence = newSequence;
+
+            // Keep the extra references
+            proxy = mProxy;
+            iMem = mCblkMemory;
+
+            // Non-blocking if track is stopped
+            if (!mActive) {
+                requested = &ClientProxy::kNonBlocking;
+            }
+
+        }   // end of lock scope
+
+        buffer.mFrameCount = audioBuffer->frameCount;
+        // FIXME starts the requested timeout and elapsed over from scratch
+        status = proxy->obtainBuffer(&buffer, requested, elapsed);
+
+    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
+
+    audioBuffer->frameCount = buffer.mFrameCount;
+    audioBuffer->size = buffer.mFrameCount * mFrameSize;
+    audioBuffer->raw = buffer.mRaw;
+    if (nonContig != NULL) {
+        *nonContig = buffer.mNonContig;
     }
-
-    cblk->waitTimeMs = 0;
-    // reset time out to running value after obtaining a buffer
-    cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
-
-    if (framesReq > framesReady) {
-        framesReq = framesReady;
-    }
-
-    uint32_t u = cblk->user;
-    uint32_t bufferEnd = cblk->userBase + mFrameCount;
-
-    if (framesReq > bufferEnd - u) {
-        framesReq = bufferEnd - u;
-    }
-
-    audioBuffer->frameCount  = framesReq;
-    audioBuffer->size        = framesReq * mFrameSize;
-    audioBuffer->raw         = mProxy->buffer(u);
-    active = mActive;
-    return active ? status_t(NO_ERROR) : status_t(STOPPED);
+    return status;
 }
 
 void AudioRecord::releaseBuffer(Buffer* audioBuffer)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
+    // all TRANSFER_* are valid
+
+    size_t stepCount = audioBuffer->size / mFrameSize;
+    if (stepCount == 0) {
+        return;
+    }
+
+    Proxy::Buffer buffer;
+    buffer.mFrameCount = stepCount;
+    buffer.mRaw = audioBuffer->raw;
 
     AutoMutex lock(mLock);
-    (void) mProxy->stepUser(audioBuffer->frameCount);
+    mInOverrun = false;
+    mProxy->releaseBuffer(&buffer);
+
+    // the server does not automatically disable recorder on overrun, so no need to restart
 }
 
 audio_io_handle_t AudioRecord::getInput() const
@@ -616,215 +613,304 @@
     return mInput;
 }
 
-int AudioRecord::getSessionId() const
-{
-    // no lock needed because session ID doesn't change after first set()
-    return mSessionId;
-}
-
 // -------------------------------------------------------------------------
 
 ssize_t AudioRecord::read(void* buffer, size_t userSize)
 {
-    ssize_t read = 0;
-    Buffer audioBuffer;
-    int8_t *dst = static_cast<int8_t*>(buffer);
+    if (mTransfer != TRANSFER_SYNC) {
+        return INVALID_OPERATION;
+    }
 
-    if (ssize_t(userSize) < 0) {
-        // sanity-check. user is most-likely passing an error code.
-        ALOGE("AudioRecord::read(buffer=%p, size=%u (%d)",
-                buffer, userSize, userSize);
+    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
+        // sanity-check. user is most-likely passing an error code, and it would
+        // make the return value ambiguous (actualSize vs error).
+        ALOGE("AudioRecord::read(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
         return BAD_VALUE;
     }
 
-    mLock.lock();
-    // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioRecord> audioRecord = mAudioRecord;
-    sp<IMemory> iMem = mCblkMemory;
-    mLock.unlock();
+    ssize_t read = 0;
+    Buffer audioBuffer;
 
-    do {
+    while (userSize >= mFrameSize) {
+        audioBuffer.frameCount = userSize / mFrameSize;
 
-        audioBuffer.frameCount = userSize/frameSize();
-
-        // By using a wait count corresponding to twice the timeout period in
-        // obtainBuffer() we give a chance to recover once for a read timeout
-        // (if media_server crashed for instance) before returning a length of
-        // 0 bytes read to the client
-        status_t err = obtainBuffer(&audioBuffer, ((2 * MAX_RUN_TIMEOUT_MS) / WAIT_PERIOD_MS));
+        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
         if (err < 0) {
-            // out of buffers, return #bytes written
-            if (err == status_t(NO_MORE_BUFFERS)) {
+            if (read > 0) {
                 break;
             }
-            if (err == status_t(TIMED_OUT)) {
-                // return partial transfer count
-                return read;
-            }
             return ssize_t(err);
         }
 
         size_t bytesRead = audioBuffer.size;
-        memcpy(dst, audioBuffer.i8, bytesRead);
-
-        dst += bytesRead;
+        memcpy(buffer, audioBuffer.i8, bytesRead);
+        buffer = ((char *) buffer) + bytesRead;
         userSize -= bytesRead;
         read += bytesRead;
 
         releaseBuffer(&audioBuffer);
-    } while (userSize);
+    }
 
     return read;
 }
 
 // -------------------------------------------------------------------------
 
-bool AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
+nsecs_t AudioRecord::processAudioBuffer(const sp<AudioRecordThread>& thread)
 {
-    Buffer audioBuffer;
-    uint32_t frames = mRemainingFrames;
-    size_t readSize;
-
     mLock.lock();
-    // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioRecord> audioRecord = mAudioRecord;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
-    bool active = mActive;
-    uint32_t markerPosition = mMarkerPosition;
-    uint32_t newPosition = mNewPosition;
-    uint32_t user = cblk->user;
-    // determine whether a marker callback will be needed, while locked
-    bool needMarker = !mMarkerReached && (mMarkerPosition > 0) && (user >= mMarkerPosition);
-    if (needMarker) {
-        mMarkerReached = true;
+
+    // Can only reference mCblk while locked
+    int32_t flags = android_atomic_and(~CBLK_OVERRUN, &mCblk->flags);
+
+    // Check for track invalidation
+    if (flags & CBLK_INVALID) {
+        (void) restoreRecord_l("processAudioBuffer");
+        mLock.unlock();
+        // Run again immediately, but with a new IAudioRecord
+        return 0;
     }
-    // determine the number of new position callback(s) that will be needed, while locked
+
+    bool active = mActive;
+
+    // Manage overrun callback, must be done under lock to avoid race with releaseBuffer()
+    bool newOverrun = false;
+    if (flags & CBLK_OVERRUN) {
+        if (!mInOverrun) {
+            mInOverrun = true;
+            newOverrun = true;
+        }
+    }
+
+    // Get current position of server
+    size_t position = mProxy->getPosition();
+
+    // Manage marker callback
+    bool markerReached = false;
+    size_t markerPosition = mMarkerPosition;
+    // FIXME fails for wraparound, need 64 bits
+    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+        mMarkerReached = markerReached = true;
+    }
+
+    // Determine the number of new position callback(s) that will be needed, while locked
+    size_t newPosCount = 0;
+    size_t newPosition = mNewPosition;
     uint32_t updatePeriod = mUpdatePeriod;
-    uint32_t needNewPos = updatePeriod > 0 && user >= newPosition ?
-            ((user - newPosition) / updatePeriod) + 1 : 0;
-    mNewPosition = newPosition + updatePeriod * needNewPos;
+    // FIXME fails for wraparound, need 64 bits
+    if (updatePeriod > 0 && position >= newPosition) {
+        newPosCount = ((position - newPosition) / updatePeriod) + 1;
+        mNewPosition += updatePeriod * newPosCount;
+    }
+
+    // Cache other fields that will be needed soon
+    size_t notificationFrames = mNotificationFrames;
+    if (mRefreshRemaining) {
+        mRefreshRemaining = false;
+        mRemainingFrames = notificationFrames;
+        mRetryOnPartialBuffer = false;
+    }
+    size_t misalignment = mProxy->getMisalignment();
+    int32_t sequence = mSequence;
+
+    // These fields don't need to be cached, because they are assigned only by set():
+    //      mTransfer, mCbf, mUserData, mSampleRate
+
     mLock.unlock();
 
-    // perform marker callback, while unlocked
-    if (needMarker) {
+    // perform callbacks while unlocked
+    if (newOverrun) {
+        mCbf(EVENT_OVERRUN, mUserData, NULL);
+    }
+    if (markerReached) {
         mCbf(EVENT_MARKER, mUserData, &markerPosition);
     }
-
-    // perform new position callback(s), while unlocked
-    for (; needNewPos > 0; --needNewPos) {
-        uint32_t temp = newPosition;
+    while (newPosCount > 0) {
+        size_t temp = newPosition;
         mCbf(EVENT_NEW_POS, mUserData, &temp);
         newPosition += updatePeriod;
+        newPosCount--;
+    }
+    if (mObservedSequence != sequence) {
+        mObservedSequence = sequence;
+        mCbf(EVENT_NEW_IAUDIORECORD, mUserData, NULL);
     }
 
-    do {
-        audioBuffer.frameCount = frames;
-        // Calling obtainBuffer() with a wait count of 1
-        // limits wait time to WAIT_PERIOD_MS. This prevents from being
-        // stuck here not being able to handle timed events (position, markers).
-        status_t err = obtainBuffer(&audioBuffer, 1);
-        if (err < NO_ERROR) {
-            if (err != TIMED_OUT) {
-                ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
-                        "Error obtaining an audio buffer, giving up.");
-                return false;
+    // if inactive, then don't run me again until re-started
+    if (!active) {
+        return NS_INACTIVE;
+    }
+
+    // Compute the estimated time until the next timed event (position, markers)
+    uint32_t minFrames = ~0;
+    if (!markerReached && position < markerPosition) {
+        minFrames = markerPosition - position;
+    }
+    if (updatePeriod > 0 && updatePeriod < minFrames) {
+        minFrames = updatePeriod;
+    }
+
+    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
+    static const uint32_t kPoll = 0;
+    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
+        minFrames = kPoll * notificationFrames;
+    }
+
+    // Convert frame units to time units
+    nsecs_t ns = NS_WHENEVER;
+    if (minFrames != (uint32_t) ~0) {
+        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
+        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
+        ns = ((minFrames * 1000000000LL) / mSampleRate) + kFudgeNs;
+    }
+
+    // If not supplying data by EVENT_MORE_DATA, then we're done
+    if (mTransfer != TRANSFER_CALLBACK) {
+        return ns;
+    }
+
+    struct timespec timeout;
+    const struct timespec *requested = &ClientProxy::kForever;
+    if (ns != NS_WHENEVER) {
+        timeout.tv_sec = ns / 1000000000LL;
+        timeout.tv_nsec = ns % 1000000000LL;
+        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
+        requested = &timeout;
+    }
+
+    while (mRemainingFrames > 0) {
+
+        Buffer audioBuffer;
+        audioBuffer.frameCount = mRemainingFrames;
+        size_t nonContig;
+        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
+        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
+                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
+        requested = &ClientProxy::kNonBlocking;
+        size_t avail = audioBuffer.frameCount + nonContig;
+        ALOGV("obtainBuffer(%u) returned %u = %u + %u",
+                mRemainingFrames, avail, audioBuffer.frameCount, nonContig);
+        if (err != NO_ERROR) {
+            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
+                break;
             }
-            break;
+            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
+            return NS_NEVER;
         }
-        if (err == status_t(STOPPED)) return false;
+
+        if (mRetryOnPartialBuffer) {
+            mRetryOnPartialBuffer = false;
+            if (avail < mRemainingFrames) {
+                int64_t myns = ((mRemainingFrames - avail) *
+                        1100000000LL) / mSampleRate;
+                if (ns < 0 || myns < ns) {
+                    ns = myns;
+                }
+                return ns;
+            }
+        }
 
         size_t reqSize = audioBuffer.size;
         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
-        readSize = audioBuffer.size;
+        size_t readSize = audioBuffer.size;
 
         // Sanity check on returned size
-        if (ssize_t(readSize) <= 0) {
-            // The callback is done filling buffers
-            // Keep this thread going to handle timed events and
-            // still try to get more data in intervals of WAIT_PERIOD_MS
-            // but don't just loop and block the CPU, so wait
-            usleep(WAIT_PERIOD_MS*1000);
-            break;
+        if (ssize_t(readSize) < 0 || readSize > reqSize) {
+            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
+                    reqSize, (int) readSize);
+            return NS_NEVER;
         }
-        if (readSize > reqSize) readSize = reqSize;
 
-        audioBuffer.size = readSize;
-        audioBuffer.frameCount = readSize/frameSize();
-        frames -= audioBuffer.frameCount;
+        if (readSize == 0) {
+            // The callback is done consuming buffers
+            // Keep this thread going to handle timed events and
+            // still try to provide more data in intervals of WAIT_PERIOD_MS
+            // but don't just loop and block the CPU, so wait
+            return WAIT_PERIOD_MS * 1000000LL;
+        }
+
+        size_t releasedFrames = readSize / mFrameSize;
+        audioBuffer.frameCount = releasedFrames;
+        mRemainingFrames -= releasedFrames;
+        if (misalignment >= releasedFrames) {
+            misalignment -= releasedFrames;
+        } else {
+            misalignment = 0;
+        }
 
         releaseBuffer(&audioBuffer);
 
-    } while (frames);
-
-
-    // Manage overrun callback
-    if (active && (mProxy->framesAvailable() == 0)) {
-        // The value of active is stale, but we are almost sure to be active here because
-        // otherwise we would have exited when obtainBuffer returned STOPPED earlier.
-        ALOGV("Overrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags);
-        if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) {
-            mCbf(EVENT_OVERRUN, mUserData, NULL);
+        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
+        // if callback doesn't like to accept the full chunk
+        if (readSize < reqSize) {
+            continue;
         }
-    }
 
-    if (frames == 0) {
-        mRemainingFrames = mNotificationFrames;
-    } else {
-        mRemainingFrames = frames;
+        // There could be enough non-contiguous frames available to satisfy the remaining request
+        if (mRemainingFrames <= nonContig) {
+            continue;
+        }
+
+#if 0
+        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
+        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
+        // that total to a sum == notificationFrames.
+        if (0 < misalignment && misalignment <= mRemainingFrames) {
+            mRemainingFrames = misalignment;
+            return (mRemainingFrames * 1100000000LL) / mSampleRate;
+        }
+#endif
+
     }
-    return true;
+    mRemainingFrames = notificationFrames;
+    mRetryOnPartialBuffer = true;
+
+    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
+    return 0;
 }
 
-// must be called with mLock and cblk.lock held. Callers must also hold strong references on
-// the IAudioRecord and IMemory in case they are recreated here.
-// If the IAudioRecord is successfully restored, the cblk pointer is updated
-status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& refCblk)
+status_t AudioRecord::restoreRecord_l(const char *from)
 {
+    ALOGW("dead IAudioRecord, creating a new one from %s()", from);
+    ++mSequence;
     status_t result;
 
-    audio_track_cblk_t* cblk = refCblk;
-    audio_track_cblk_t* newCblk = cblk;
-    ALOGW("dead IAudioRecord, creating a new one");
-
-    // signal old cblk condition so that other threads waiting for available buffers stop
-    // waiting now
-    cblk->cv.broadcast();
-    cblk->lock.unlock();
-
     // if the new IAudioRecord is created, openRecord_l() will modify the
     // following member variables: mAudioRecord, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioRecord and IMemory
-    result = openRecord_l(mSampleRate, mFormat, mFrameCount, getInput_l());
+    size_t position = mProxy->getPosition();
+    mNewPosition = position + mUpdatePeriod;
+    result = openRecord_l(mSampleRate, mFormat, mFrameCount, getInput_l(), position);
     if (result == NO_ERROR) {
-        newCblk = mCblk;
-        // callback thread or sync event hasn't changed
-        result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
+        if (mActive) {
+            // callback thread or sync event hasn't changed
+            // FIXME this fails if we have a new AudioFlinger instance
+            result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
+        }
     }
     if (result != NO_ERROR) {
+        ALOGW("restoreRecord_l() failed status %d", result);
         mActive = false;
     }
 
-    ALOGV("restoreRecord_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
-        result, mActive, newCblk, cblk, newCblk->flags, cblk->flags);
-
-    if (result == NO_ERROR) {
-        // from now on we switch to the newly created cblk
-        refCblk = newCblk;
-    }
-    newCblk->lock.lock();
-
-    ALOGW_IF(result != NO_ERROR, "restoreRecord_l() error %d", result);
-
     return result;
 }
 
 // =========================================================================
 
+void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who)
+{
+    sp<AudioRecord> audioRecord = mAudioRecord.promote();
+    if (audioRecord != 0) {
+        AutoMutex lock(audioRecord->mLock);
+        audioRecord->mProxy->binderDied();
+    }
+}
+
+// =========================================================================
+
 AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true)
+    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
 {
 }
 
@@ -842,10 +928,26 @@
             return true;
         }
     }
-    if (!mReceiver.processAudioBuffer(this)) {
-        pause();
+    nsecs_t ns =  mReceiver.processAudioBuffer(this);
+    switch (ns) {
+    case 0:
+        return true;
+    case NS_WHENEVER:
+        sleep(1);
+        return true;
+    case NS_INACTIVE:
+        pauseConditional();
+        return true;
+    case NS_NEVER:
+        return false;
+    default:
+        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
+        struct timespec req;
+        req.tv_sec = ns / 1000000000LL;
+        req.tv_nsec = ns % 1000000000LL;
+        nanosleep(&req, NULL /*rem*/);
+        return true;
     }
-    return true;
 }
 
 void AudioRecord::AudioRecordThread::requestExit()
@@ -859,6 +961,17 @@
 {
     AutoMutex _l(mMyLock);
     mPaused = true;
+    mResumeLatch = false;
+}
+
+void AudioRecord::AudioRecordThread::pauseConditional()
+{
+    AutoMutex _l(mMyLock);
+    if (mResumeLatch) {
+        mResumeLatch = false;
+    } else {
+        mPaused = true;
+    }
 }
 
 void AudioRecord::AudioRecordThread::resume()
@@ -866,7 +979,10 @@
     AutoMutex _l(mMyLock);
     if (mPaused) {
         mPaused = false;
+        mResumeLatch = false;
         mMyCond.signal();
+    } else {
+        mResumeLatch = true;
     }
 }
 
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 7eeb4f8..faca054 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -19,31 +19,14 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioTrack"
 
-#include <stdint.h>
-#include <sys/types.h>
-#include <limits.h>
-
-#include <sched.h>
 #include <sys/resource.h>
-
+#include <audio_utils/primitives.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+#include <utils/Log.h>
 #include <private/media/AudioTrackShared.h>
 
-#include <media/AudioSystem.h>
-#include <media/AudioTrack.h>
-
-#include <utils/Log.h>
-#include <binder/Parcel.h>
-#include <binder/IPCThreadState.h>
-#include <utils/Timers.h>
-#include <utils/Atomic.h>
-
-#include <cutils/bitops.h>
-#include <cutils/compiler.h>
-
-#include <system/audio.h>
-#include <system/audio_policy.h>
-
-#include <audio_utils/primitives.h>
+#define WAIT_PERIOD_MS          10
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -82,7 +65,9 @@
 
     // Ensure that buffer depth covers at least audio hardware latency
     uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
-    if (minBufCount < 2) minBufCount = 2;
+    if (minBufCount < 2) {
+        minBufCount = 2;
+    }
 
     *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
             afFrameCount * minBufCount * sampleRate / afSampleRate;
@@ -97,8 +82,7 @@
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousSchedulingGroup(SP_DEFAULT)
 {
 }
 
@@ -112,16 +96,16 @@
         callback_t cbf,
         void* user,
         int notificationFrames,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousSchedulingGroup(SP_DEFAULT)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
-            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId);
+            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType);
 }
 
 AudioTrack::AudioTrack(
@@ -134,27 +118,20 @@
         callback_t cbf,
         void* user,
         int notificationFrames,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
-      mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mPreviousSchedulingGroup(SP_DEFAULT)
 {
-    if (sharedBuffer == 0) {
-        ALOGE("sharedBuffer must be non-0");
-        mStatus = BAD_VALUE;
-        return;
-    }
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
-            sharedBuffer, false /*threadCanCallJava*/, sessionId);
+            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType);
 }
 
 AudioTrack::~AudioTrack()
 {
-    ALOGV_IF(mSharedBuffer != 0, "Destructor sharedBuffer: %p", mSharedBuffer->pointer());
-
     if (mStatus == NO_ERROR) {
         // Make sure that callback function exits in the case where
         // it is looping on buffer full condition in obtainBuffer().
@@ -165,11 +142,13 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
-        mAudioTrack.clear();
+        if (mAudioTrack != 0) {
+            mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
+            mAudioTrack.clear();
+        }
         IPCThreadState::self()->flushCommands();
         AudioSystem::releaseAudioSessionId(mSessionId);
     }
-    delete mProxy;
 }
 
 status_t AudioTrack::set(
@@ -184,8 +163,44 @@
         int notificationFrames,
         const sp<IMemory>& sharedBuffer,
         bool threadCanCallJava,
-        int sessionId)
+        int sessionId,
+        transfer_type transferType)
 {
+    switch (transferType) {
+    case TRANSFER_DEFAULT:
+        if (sharedBuffer != 0) {
+            transferType = TRANSFER_SHARED;
+        } else if (cbf == NULL || threadCanCallJava) {
+            transferType = TRANSFER_SYNC;
+        } else {
+            transferType = TRANSFER_CALLBACK;
+        }
+        break;
+    case TRANSFER_CALLBACK:
+        if (cbf == NULL || sharedBuffer != 0) {
+            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
+            return BAD_VALUE;
+        }
+        break;
+    case TRANSFER_OBTAIN:
+    case TRANSFER_SYNC:
+        if (sharedBuffer != 0) {
+            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
+            return BAD_VALUE;
+        }
+        break;
+    case TRANSFER_SHARED:
+        if (sharedBuffer == 0) {
+            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
+            return BAD_VALUE;
+        }
+        break;
+    default:
+        ALOGE("Invalid transfer type %d", transferType);
+        return BAD_VALUE;
+    }
+    mTransfer = transferType;
+
     // FIXME "int" here is legacy and will be replaced by size_t later
     if (frameCountInt < 0) {
         ALOGE("Invalid frame count %d", frameCountInt);
@@ -199,6 +214,7 @@
     ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
 
     AutoMutex lock(mLock);
+
     if (mAudioTrack != 0) {
         ALOGE("Track already in use");
         return INVALID_OPERATION;
@@ -228,7 +244,7 @@
 
     // validate parameters
     if (!audio_is_valid_format(format)) {
-        ALOGE("Invalid format");
+        ALOGE("Invalid format %d", format);
         return BAD_VALUE;
     }
 
@@ -281,6 +297,7 @@
     mFrameCount = frameCount;
     mReqFrameCount = frameCount;
     mNotificationFramesReq = notificationFrames;
+    mNotificationFramesAct = 0;
     mSessionId = sessionId;
     mAuxEffectId = 0;
     mFlags = flags;
@@ -298,7 +315,8 @@
                                   frameCount,
                                   flags,
                                   sharedBuffer,
-                                  output);
+                                  output,
+                                  0 /*epoch*/);
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
@@ -309,20 +327,21 @@
     }
 
     mStatus = NO_ERROR;
-
     mStreamType = streamType;
     mFormat = format;
-
     mSharedBuffer = sharedBuffer;
-    mActive = false;
+    mState = STATE_STOPPED;
     mUserData = user;
-    mLoopCount = 0;
+    mLoopPeriod = 0;
     mMarkerPosition = 0;
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
-    mFlushed = false;
     AudioSystem::acquireAudioSessionId(mSessionId);
+    mSequence = 1;
+    mObservedSequence = mSequence;
+    mInUnderrun = false;
+
     return NO_ERROR;
 }
 
@@ -330,87 +349,45 @@
 
 void AudioTrack::start()
 {
-    sp<AudioTrackThread> t = mAudioTrackThread;
-
-    ALOGV("start %p", this);
-
     AutoMutex lock(mLock);
-    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioTrack> audioTrack = mAudioTrack;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
-
-    if (!mActive) {
-        mFlushed = false;
-        mActive = true;
-        mNewPosition = cblk->server + mUpdatePeriod;
-        cblk->lock.lock();
-        cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
-        cblk->waitTimeMs = 0;
-        android_atomic_and(~CBLK_DISABLED, &cblk->flags);
-        if (t != 0) {
-            t->resume();
-        } else {
-            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
-            get_sched_policy(0, &mPreviousSchedulingGroup);
-            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
-        }
-
-        ALOGV("start %p before lock cblk %p", this, cblk);
-        status_t status = NO_ERROR;
-        if (!(cblk->flags & CBLK_INVALID)) {
-            cblk->lock.unlock();
-            ALOGV("mAudioTrack->start()");
-            status = mAudioTrack->start();
-            cblk->lock.lock();
-            if (status == DEAD_OBJECT) {
-                android_atomic_or(CBLK_INVALID, &cblk->flags);
-            }
-        }
-        if (cblk->flags & CBLK_INVALID) {
-            audio_track_cblk_t* temp = cblk;
-            status = restoreTrack_l(temp, true /*fromStart*/);
-            cblk = temp;
-        }
-        cblk->lock.unlock();
-        if (status != NO_ERROR) {
-            ALOGV("start() failed");
-            mActive = false;
-            if (t != 0) {
-                t->pause();
-            } else {
-                setpriority(PRIO_PROCESS, 0, mPreviousPriority);
-                set_sched_policy(0, mPreviousSchedulingGroup);
-            }
-        }
+    if (mState == STATE_ACTIVE) {
+        return;
     }
 
-}
+    mInUnderrun = true;
 
-void AudioTrack::stop()
-{
+    State previousState = mState;
+    mState = STATE_ACTIVE;
+    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
+        // reset current position as seen by client to 0
+        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
+    }
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->flags);
+
     sp<AudioTrackThread> t = mAudioTrackThread;
+    if (t != 0) {
+        t->resume();
+    } else {
+        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+        get_sched_policy(0, &mPreviousSchedulingGroup);
+        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+    }
 
-    ALOGV("stop %p", this);
-
-    AutoMutex lock(mLock);
-    if (mActive) {
-        mActive = false;
-        mCblk->cv.signal();
-        mAudioTrack->stop();
-        // Cancel loops (If we are in the middle of a loop, playback
-        // would not stop until loopCount reaches 0).
-        setLoop_l(0, 0, 0);
-        // the playback head position will reset to 0, so if a marker is set, we need
-        // to activate it again
-        mMarkerReached = false;
-        // Force flush if a shared buffer is used otherwise audioflinger
-        // will not stop before end of buffer is reached.
-        // It may be needed to make sure that we stop playback, likely in case looping is on.
-        if (mSharedBuffer != 0) {
-            flush_l();
+    status_t status = NO_ERROR;
+    if (!(flags & CBLK_INVALID)) {
+        status = mAudioTrack->start();
+        if (status == DEAD_OBJECT) {
+            flags |= CBLK_INVALID;
         }
+    }
+    if (flags & CBLK_INVALID) {
+        status = restoreTrack_l("start");
+    }
+
+    if (status != NO_ERROR) {
+        ALOGE("start() status %d", status);
+        mState = previousState;
         if (t != 0) {
             t->pause();
         } else {
@@ -419,57 +396,85 @@
         }
     }
 
+    // FIXME discarding status
+}
+
+void AudioTrack::stop()
+{
+    AutoMutex lock(mLock);
+    // FIXME pause then stop should not be a nop
+    if (mState != STATE_ACTIVE) {
+        return;
+    }
+
+    mState = STATE_STOPPED;
+    mProxy->interrupt();
+    mAudioTrack->stop();
+    // the playback head position will reset to 0, so if a marker is set, we need
+    // to activate it again
+    mMarkerReached = false;
+#if 0
+    // Force flush if a shared buffer is used otherwise audioflinger
+    // will not stop before end of buffer is reached.
+    // It may be needed to make sure that we stop playback, likely in case looping is on.
+    if (mSharedBuffer != 0) {
+        flush_l();
+    }
+#endif
+    sp<AudioTrackThread> t = mAudioTrackThread;
+    if (t != 0) {
+        t->pause();
+    } else {
+        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+        set_sched_policy(0, mPreviousSchedulingGroup);
+    }
 }
 
 bool AudioTrack::stopped() const
 {
     AutoMutex lock(mLock);
-    return stopped_l();
+    return mState != STATE_ACTIVE;
 }
 
 void AudioTrack::flush()
 {
-    AutoMutex lock(mLock);
-    if (!mActive && mSharedBuffer == 0) {
-        flush_l();
+    if (mSharedBuffer != 0) {
+        return;
     }
+    AutoMutex lock(mLock);
+    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
+        return;
+    }
+    flush_l();
 }
 
 void AudioTrack::flush_l()
 {
-    ALOGV("flush");
-    ALOG_ASSERT(!mActive);
+    ALOG_ASSERT(mState != STATE_ACTIVE);
 
     // clear playback marker and periodic update counter
     mMarkerPosition = 0;
     mMarkerReached = false;
     mUpdatePeriod = 0;
 
-    mFlushed = true;
+    mState = STATE_FLUSHED;
+    mProxy->flush();
     mAudioTrack->flush();
-    // Release AudioTrack callback thread in case it was waiting for new buffers
-    // in AudioTrack::obtainBuffer()
-    mCblk->cv.signal();
 }
 
 void AudioTrack::pause()
 {
-    ALOGV("pause");
     AutoMutex lock(mLock);
-    if (mActive) {
-        mActive = false;
-        mCblk->cv.signal();
-        mAudioTrack->pause();
+    if (mState != STATE_ACTIVE) {
+        return;
     }
+    mState = STATE_PAUSED;
+    mProxy->interrupt();
+    mAudioTrack->pause();
 }
 
 status_t AudioTrack::setVolume(float left, float right)
 {
-    if (mStatus != NO_ERROR) {
-        return mStatus;
-    }
-    ALOG_ASSERT(mProxy != NULL);
-
     if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
         return BAD_VALUE;
     }
@@ -490,18 +495,11 @@
 
 status_t AudioTrack::setAuxEffectSendLevel(float level)
 {
-    ALOGV("setAuxEffectSendLevel(%f)", level);
-
-    if (mStatus != NO_ERROR) {
-        return mStatus;
-    }
-    ALOG_ASSERT(mProxy != NULL);
-
     if (level < 0.0f || level > 1.0f) {
         return BAD_VALUE;
     }
-    AutoMutex lock(mLock);
 
+    AutoMutex lock(mLock);
     mSendLevel = level;
     mProxy->setSendLevel(level);
 
@@ -511,18 +509,17 @@
 void AudioTrack::getAuxEffectSendLevel(float* level) const
 {
     if (level != NULL) {
-        *level  = mSendLevel;
+        *level = mSendLevel;
     }
 }
 
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
-    uint32_t afSamplingRate;
-
     if (mIsTimed) {
         return INVALID_OPERATION;
     }
 
+    uint32_t afSamplingRate;
     if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
         return NO_INIT;
     }
@@ -550,58 +547,44 @@
 
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
-    AutoMutex lock(mLock);
-    return setLoop_l(loopStart, loopEnd, loopCount);
-}
-
-// must be called with mLock held
-status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
-{
     if (mSharedBuffer == 0 || mIsTimed) {
         return INVALID_OPERATION;
     }
 
-    audio_track_cblk_t* cblk = mCblk;
-
-    Mutex::Autolock _l(cblk->lock);
-
     if (loopCount == 0) {
-        cblk->loopStart = UINT_MAX;
-        cblk->loopEnd = UINT_MAX;
-        cblk->loopCount = 0;
-        mLoopCount = 0;
-        return NO_ERROR;
-    }
-
-    if (loopStart >= loopEnd ||
-        loopEnd - loopStart > mFrameCount ||
-        cblk->server > loopStart) {
-        ALOGE("setLoop invalid value: loopStart %d, loopEnd %d, loopCount %d, framecount %d, "
-              "user %d", loopStart, loopEnd, loopCount, mFrameCount, cblk->user);
+        ;
+    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
+            loopEnd - loopStart >= MIN_LOOP) {
+        ;
+    } else {
         return BAD_VALUE;
     }
 
-    if ((mSharedBuffer != 0) && (loopEnd > mFrameCount)) {
-        ALOGE("setLoop invalid value: loop markers beyond data: loopStart %d, loopEnd %d, "
-            "framecount %d",
-            loopStart, loopEnd, mFrameCount);
-        return BAD_VALUE;
+    AutoMutex lock(mLock);
+    // See setPosition() regarding setting parameters such as loop points or position while active
+    if (mState == STATE_ACTIVE) {
+        return INVALID_OPERATION;
     }
-
-    cblk->loopStart = loopStart;
-    cblk->loopEnd = loopEnd;
-    cblk->loopCount = loopCount;
-    mLoopCount = loopCount;
-
+    setLoop_l(loopStart, loopEnd, loopCount);
     return NO_ERROR;
 }
 
+void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
+{
+    // FIXME If setting a loop also sets position to start of loop, then
+    //       this is correct.  Otherwise it should be removed.
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
+    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
+}
+
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
     if (mCbf == NULL) {
         return INVALID_OPERATION;
     }
 
+    AutoMutex lock(mLock);
     mMarkerPosition = marker;
     mMarkerReached = false;
 
@@ -614,6 +597,7 @@
         return BAD_VALUE;
     }
 
+    AutoMutex lock(mLock);
     *marker = mMarkerPosition;
 
     return NO_ERROR;
@@ -625,9 +609,8 @@
         return INVALID_OPERATION;
     }
 
-    uint32_t curPosition;
-    getPosition(&curPosition);
-    mNewPosition = curPosition + updatePeriod;
+    AutoMutex lock(mLock);
+    mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
     return NO_ERROR;
@@ -639,6 +622,7 @@
         return BAD_VALUE;
     }
 
+    AutoMutex lock(mLock);
     *updatePeriod = mUpdatePeriod;
 
     return NO_ERROR;
@@ -649,58 +633,73 @@
     if (mSharedBuffer == 0 || mIsTimed) {
         return INVALID_OPERATION;
     }
-
-    AutoMutex lock(mLock);
-
-    if (!stopped_l()) {
-        return INVALID_OPERATION;
-    }
-
-    audio_track_cblk_t* cblk = mCblk;
-    Mutex::Autolock _l(cblk->lock);
-
-    if (position > cblk->user) {
+    if (position > mFrameCount) {
         return BAD_VALUE;
     }
 
-    cblk->server = position;
-    android_atomic_or(CBLK_FORCEREADY, &cblk->flags);
+    AutoMutex lock(mLock);
+    // Currently we require that the player is inactive before setting parameters such as position
+    // or loop points.  Otherwise, there could be a race condition: the application could read the
+    // current position, compute a new position or loop parameters, and then set that position or
+    // loop parameters but it would do the "wrong" thing since the position has continued to advance
+    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
+    // to specify how it wants to handle such scenarios.
+    if (mState == STATE_ACTIVE) {
+        return INVALID_OPERATION;
+    }
+    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
+    mLoopPeriod = 0;
+    // FIXME Check whether loops and setting position are incompatible in old code.
+    // If we use setLoop for both purposes we lose the capability to set the position while looping.
+    mStaticProxy->setLoop(position, mFrameCount, 0);
 
     return NO_ERROR;
 }
 
-status_t AudioTrack::getPosition(uint32_t *position)
+status_t AudioTrack::getPosition(uint32_t *position) const
 {
     if (position == NULL) {
         return BAD_VALUE;
     }
-    AutoMutex lock(mLock);
-    *position = mFlushed ? 0 : mCblk->server;
 
+    AutoMutex lock(mLock);
+    // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
+    *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
+            mProxy->getPosition();
+
+    return NO_ERROR;
+}
+
+status_t AudioTrack::getBufferPosition(size_t *position)
+{
+    if (mSharedBuffer == 0 || mIsTimed) {
+        return INVALID_OPERATION;
+    }
+    if (position == NULL) {
+        return BAD_VALUE;
+    }
+
+    AutoMutex lock(mLock);
+    *position = mStaticProxy->getBufferPosition();
     return NO_ERROR;
 }
 
 status_t AudioTrack::reload()
 {
-    if (mStatus != NO_ERROR) {
-        return mStatus;
-    }
-    ALOG_ASSERT(mProxy != NULL);
-
     if (mSharedBuffer == 0 || mIsTimed) {
         return INVALID_OPERATION;
     }
 
     AutoMutex lock(mLock);
-
-    if (!stopped_l()) {
+    // See setPosition() regarding setting parameters such as loop points or position while active
+    if (mState == STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
-
-    flush_l();
-
-    (void) mProxy->stepUser(mFrameCount);
-
+    mNewPosition = mUpdatePeriod;
+    mLoopPeriod = 0;
+    // FIXME The new code cannot reload while keeping a loop specified.
+    // Need to check how the old code handled this, and whether it's a significant change.
+    mStaticProxy->setLoop(0, mFrameCount, 0);
     return NO_ERROR;
 }
 
@@ -719,7 +718,7 @@
 
 status_t AudioTrack::attachAuxEffect(int effectId)
 {
-    ALOGV("attachAuxEffect(%d)", effectId);
+    AutoMutex lock(mLock);
     status_t status = mAudioTrack->attachAuxEffect(effectId);
     if (status == NO_ERROR) {
         mAuxEffectId = effectId;
@@ -737,7 +736,8 @@
         size_t frameCount,
         audio_output_flags_t flags,
         const sp<IMemory>& sharedBuffer,
-        audio_io_handle_t output)
+        audio_io_handle_t output,
+        size_t epoch)
 {
     status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
@@ -747,7 +747,8 @@
     }
 
     uint32_t afLatency;
-    if (AudioSystem::getLatency(output, streamType, &afLatency) != NO_ERROR) {
+    if ((status = AudioSystem::getLatency(output, streamType, &afLatency)) != NO_ERROR) {
+        ALOGE("getLatency(%d) failed status %d", output, status);
         return NO_INIT;
     }
 
@@ -775,7 +776,10 @@
             frameCount = sharedBuffer->size();
         } else if (frameCount == 0) {
             size_t afFrameCount;
-            if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
+            status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
+            if (status != NO_ERROR) {
+                ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType,
+                        status);
                 return NO_INIT;
             }
             frameCount = afFrameCount;
@@ -806,17 +810,26 @@
 
         // FIXME move these calculations and associated checks to server
         uint32_t afSampleRate;
-        if (AudioSystem::getSamplingRate(output, streamType, &afSampleRate) != NO_ERROR) {
+        status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
+        if (status != NO_ERROR) {
+            ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType,
+                    status);
             return NO_INIT;
         }
         size_t afFrameCount;
-        if (AudioSystem::getFrameCount(output, streamType, &afFrameCount) != NO_ERROR) {
+        status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
+        if (status != NO_ERROR) {
+            ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
             return NO_INIT;
         }
 
         // Ensure that buffer depth covers at least audio hardware latency
         uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
-        if (minBufCount < 2) minBufCount = 2;
+        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
+                afFrameCount, minBufCount, afSampleRate, afLatency);
+        if (minBufCount <= 2) {
+            minBufCount = sampleRate == afSampleRate ? 2 : 3;
+        }
 
         size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
         ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
@@ -826,12 +839,9 @@
         if (frameCount == 0) {
             frameCount = minFrameCount;
         }
-        if (mNotificationFramesAct == 0) {
-            mNotificationFramesAct = frameCount/2;
-        }
         // Make sure that application is notified with sufficient margin
         // before underrun
-        if (mNotificationFramesAct > frameCount/2) {
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
             mNotificationFramesAct = frameCount/2;
         }
         if (frameCount < minFrameCount) {
@@ -881,6 +891,10 @@
         ALOGE("Could not get control block");
         return NO_INIT;
     }
+    if (mAudioTrack != 0) {
+        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
+        mDeathNotifier.clear();
+    }
     mAudioTrack = track;
     mCblkMemory = iMem;
     audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
@@ -898,26 +912,38 @@
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
             mAwaitBoost = true;
+            if (sharedBuffer == 0) {
+                // double-buffering is not required for fast tracks, due to tighter scheduling
+                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount) {
+                    mNotificationFramesAct = frameCount;
+                }
+            }
         } else {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
             // once denied, do not request again if IAudioTrack is re-created
             flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
             mFlags = flags;
-        }
-        if (sharedBuffer == 0) {
-            mNotificationFramesAct = frameCount/2;
+            if (sharedBuffer == 0) {
+                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
+                    mNotificationFramesAct = frameCount/2;
+                }
+            }
         }
     }
+    mRefreshRemaining = true;
+
+    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
+    // is the value of pointer() for the shared buffer, otherwise buffers points
+    // immediately after the control block.  This address is for the mapping within client
+    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
+    void* buffers;
     if (sharedBuffer == 0) {
-        mBuffers = (char*)cblk + sizeof(audio_track_cblk_t);
+        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
     } else {
-        mBuffers = sharedBuffer->pointer();
+        buffers = sharedBuffer->pointer();
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
-    cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
-    cblk->waitTimeMs = 0;
-    mRemainingFrames = mNotificationFramesAct;
     // FIXME don't believe this lie
     mLatency = afLatency + (1000*frameCount) / sampleRate;
     mFrameCount = frameCount;
@@ -928,147 +954,143 @@
     }
 
     // update proxy
-    delete mProxy;
-    mProxy = new AudioTrackClientProxy(cblk, mBuffers, frameCount, mFrameSizeAF);
+    if (sharedBuffer == 0) {
+        mStaticProxy.clear();
+        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+    } else {
+        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+        mProxy = mStaticProxy;
+    }
     mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
             uint16_t(mVolume[LEFT] * 0x1000));
     mProxy->setSendLevel(mSendLevel);
     mProxy->setSampleRate(mSampleRate);
-    if (sharedBuffer != 0) {
-        // Force buffer full condition as data is already present in shared memory
-        mProxy->stepUser(frameCount);
-    }
+    mProxy->setEpoch(epoch);
+    mProxy->setMinimum(mNotificationFramesAct);
+
+    mDeathNotifier = new DeathNotifier(this);
+    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
 
     return NO_ERROR;
 }
 
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
-
-    AutoMutex lock(mLock);
-    bool active;
-    status_t result = NO_ERROR;
-    audio_track_cblk_t* cblk = mCblk;
-    uint32_t framesReq = audioBuffer->frameCount;
-    uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS;
-
-    audioBuffer->frameCount  = 0;
-    audioBuffer->size = 0;
-
-    size_t framesAvail = mProxy->framesAvailable();
-
-    cblk->lock.lock();
-    if (cblk->flags & CBLK_INVALID) {
-        goto create_new_track;
+    if (audioBuffer == NULL) {
+        return BAD_VALUE;
     }
-    cblk->lock.unlock();
+    if (mTransfer != TRANSFER_OBTAIN) {
+        audioBuffer->frameCount = 0;
+        audioBuffer->size = 0;
+        audioBuffer->raw = NULL;
+        return INVALID_OPERATION;
+    }
 
-    if (framesAvail == 0) {
-        cblk->lock.lock();
-        goto start_loop_here;
-        while (framesAvail == 0) {
-            active = mActive;
-            if (CC_UNLIKELY(!active)) {
-                ALOGV("Not active and NO_MORE_BUFFERS");
-                cblk->lock.unlock();
-                return NO_MORE_BUFFERS;
-            }
-            if (CC_UNLIKELY(!waitCount)) {
-                cblk->lock.unlock();
-                return WOULD_BLOCK;
-            }
-            if (!(cblk->flags & CBLK_INVALID)) {
-                mLock.unlock();
-                // this condition is in shared memory, so if IAudioTrack and control block
-                // are replaced due to mediaserver death or IAudioTrack invalidation then
-                // cv won't be signalled, but fortunately the timeout will limit the wait
-                result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
-                cblk->lock.unlock();
-                mLock.lock();
-                if (!mActive) {
-                    return status_t(STOPPED);
-                }
-                // IAudioTrack may have been re-created while mLock was unlocked
-                cblk = mCblk;
-                cblk->lock.lock();
-            }
+    const struct timespec *requested;
+    if (waitCount == -1) {
+        requested = &ClientProxy::kForever;
+    } else if (waitCount == 0) {
+        requested = &ClientProxy::kNonBlocking;
+    } else if (waitCount > 0) {
+        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
+        struct timespec timeout;
+        timeout.tv_sec = ms / 1000;
+        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
+        requested = &timeout;
+    } else {
+        ALOGE("%s invalid waitCount %d", __func__, waitCount);
+        requested = NULL;
+    }
+    return obtainBuffer(audioBuffer, requested);
+}
 
-            if (cblk->flags & CBLK_INVALID) {
-                goto create_new_track;
-            }
-            if (CC_UNLIKELY(result != NO_ERROR)) {
-                cblk->waitTimeMs += waitTimeMs;
-                if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {
-                    // timing out when a loop has been set and we have already written upto loop end
-                    // is a normal condition: no need to wake AudioFlinger up.
-                    if (cblk->user < cblk->loopEnd) {
-                        ALOGW("obtainBuffer timed out (is the CPU pegged?) %p name=%#x user=%08x, "
-                              "server=%08x", this, cblk->mName, cblk->user, cblk->server);
-                        //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
-                        cblk->lock.unlock();
-                        result = mAudioTrack->start();
-                        cblk->lock.lock();
-                        if (result == DEAD_OBJECT) {
-                            android_atomic_or(CBLK_INVALID, &cblk->flags);
-create_new_track:
-                            audio_track_cblk_t* temp = cblk;
-                            result = restoreTrack_l(temp, false /*fromStart*/);
-                            cblk = temp;
-                        }
-                        if (result != NO_ERROR) {
-                            ALOGW("obtainBuffer create Track error %d", result);
-                            cblk->lock.unlock();
-                            return result;
-                        }
+status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
+        struct timespec *elapsed, size_t *nonContig)
+{
+    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
+    uint32_t oldSequence = 0;
+    uint32_t newSequence;
+
+    Proxy::Buffer buffer;
+    status_t status = NO_ERROR;
+
+    static const int32_t kMaxTries = 5;
+    int32_t tryCounter = kMaxTries;
+
+    do {
+        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
+        // keep them from going away if another thread re-creates the track during obtainBuffer()
+        sp<AudioTrackClientProxy> proxy;
+        sp<IMemory> iMem;
+
+        {   // start of lock scope
+            AutoMutex lock(mLock);
+
+            newSequence = mSequence;
+            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
+            if (status == DEAD_OBJECT) {
+                // re-create track, unless someone else has already done so
+                if (newSequence == oldSequence) {
+                    status = restoreTrack_l("obtainBuffer");
+                    if (status != NO_ERROR) {
+                        break;
                     }
-                    cblk->waitTimeMs = 0;
-                }
-
-                if (--waitCount == 0) {
-                    cblk->lock.unlock();
-                    return TIMED_OUT;
                 }
             }
-            // read the server count again
-        start_loop_here:
-            framesAvail = mProxy->framesAvailable_l();
-        }
-        cblk->lock.unlock();
+            oldSequence = newSequence;
+
+            // Keep the extra references
+            proxy = mProxy;
+            iMem = mCblkMemory;
+
+            // Non-blocking if track is stopped or paused
+            if (mState != STATE_ACTIVE) {
+                requested = &ClientProxy::kNonBlocking;
+            }
+
+        }   // end of lock scope
+
+        buffer.mFrameCount = audioBuffer->frameCount;
+        // FIXME starts the requested timeout and elapsed over from scratch
+        status = proxy->obtainBuffer(&buffer, requested, elapsed);
+
+    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
+
+    audioBuffer->frameCount = buffer.mFrameCount;
+    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
+    audioBuffer->raw = buffer.mRaw;
+    if (nonContig != NULL) {
+        *nonContig = buffer.mNonContig;
     }
-
-    cblk->waitTimeMs = 0;
-
-    if (framesReq > framesAvail) {
-        framesReq = framesAvail;
-    }
-
-    uint32_t u = cblk->user;
-    uint32_t bufferEnd = cblk->userBase + mFrameCount;
-
-    if (framesReq > bufferEnd - u) {
-        framesReq = bufferEnd - u;
-    }
-
-    audioBuffer->frameCount = framesReq;
-    audioBuffer->size = framesReq * mFrameSizeAF;
-    audioBuffer->raw = mProxy->buffer(u);
-    active = mActive;
-    return active ? status_t(NO_ERROR) : status_t(STOPPED);
+    return status;
 }
 
 void AudioTrack::releaseBuffer(Buffer* audioBuffer)
 {
-    ALOG_ASSERT(mStatus == NO_ERROR && mProxy != NULL);
+    if (mTransfer == TRANSFER_SHARED) {
+        return;
+    }
+
+    size_t stepCount = audioBuffer->size / mFrameSizeAF;
+    if (stepCount == 0) {
+        return;
+    }
+
+    Proxy::Buffer buffer;
+    buffer.mFrameCount = stepCount;
+    buffer.mRaw = audioBuffer->raw;
 
     AutoMutex lock(mLock);
-    audio_track_cblk_t* cblk = mCblk;
-    (void) mProxy->stepUser(audioBuffer->frameCount);
-    if (audioBuffer->frameCount > 0) {
-        // restart track if it was disabled by audioflinger due to previous underrun
-        if (mActive && (cblk->flags & CBLK_DISABLED)) {
-            android_atomic_and(~CBLK_DISABLED, &cblk->flags);
-            ALOGW("releaseBuffer() track %p name=%#x disabled, restarting", this, cblk->mName);
+    mInUnderrun = false;
+    mProxy->releaseBuffer(&buffer);
+
+    // restart track if it was disabled by audioflinger due to previous underrun
+    if (mState == STATE_ACTIVE) {
+        audio_track_cblk_t* cblk = mCblk;
+        if (android_atomic_and(~CBLK_DISABLED, &cblk->flags) & CBLK_DISABLED) {
+            ALOGW("releaseBuffer() track %p name=%#x disabled due to previous underrun, restarting",
+                    this, cblk->mName);
+            // FIXME ignoring status
             mAudioTrack->start();
         }
     }
@@ -1078,68 +1100,46 @@
 
 ssize_t AudioTrack::write(const void* buffer, size_t userSize)
 {
-
-    if (mSharedBuffer != 0 || mIsTimed) {
+    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
         return INVALID_OPERATION;
     }
 
-    if (ssize_t(userSize) < 0) {
+    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
         // Sanity-check: user is most-likely passing an error code, and it would
         // make the return value ambiguous (actualSize vs error).
-        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)",
-                buffer, userSize, userSize);
+        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
         return BAD_VALUE;
     }
 
-    ALOGV("write %p: %d bytes, mActive=%d", this, userSize, mActive);
-
-    if (userSize == 0) {
-        return 0;
-    }
-
-    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
-    // while we are accessing the cblk
-    mLock.lock();
-    sp<IAudioTrack> audioTrack = mAudioTrack;
-    sp<IMemory> iMem = mCblkMemory;
-    mLock.unlock();
-
-    // since mLock is unlocked the IAudioTrack and shared memory may be re-created,
-    // so all cblk references might still refer to old shared memory, but that should be benign
-
-    ssize_t written = 0;
-    const int8_t *src = (const int8_t *)buffer;
+    size_t written = 0;
     Buffer audioBuffer;
-    size_t frameSz = frameSize();
 
-    do {
-        audioBuffer.frameCount = userSize/frameSz;
+    while (userSize >= mFrameSize) {
+        audioBuffer.frameCount = userSize / mFrameSize;
 
-        status_t err = obtainBuffer(&audioBuffer, -1);
+        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
         if (err < 0) {
-            // out of buffers, return #bytes written
-            if (err == status_t(NO_MORE_BUFFERS)) {
+            if (written > 0) {
                 break;
             }
             return ssize_t(err);
         }
 
         size_t toWrite;
-
         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
             // Divide capacity by 2 to take expansion into account
-            toWrite = audioBuffer.size>>1;
-            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) src, toWrite);
+            toWrite = audioBuffer.size >> 1;
+            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
         } else {
             toWrite = audioBuffer.size;
-            memcpy(audioBuffer.i8, src, toWrite);
+            memcpy(audioBuffer.i8, buffer, toWrite);
         }
-        src += toWrite;
+        buffer = ((const char *) buffer) + toWrite;
         userSize -= toWrite;
         written += toWrite;
 
         releaseBuffer(&audioBuffer);
-    } while (userSize >= frameSz);
+    }
 
     return written;
 }
@@ -1155,10 +1155,12 @@
     AutoMutex lock(mLock);
     status_t result = UNKNOWN_ERROR;
 
+#if 1
     // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
     // while we are accessing the cblk
     sp<IAudioTrack> audioTrack = mAudioTrack;
     sp<IMemory> iMem = mCblkMemory;
+#endif
 
     // If the track is not invalid already, try to allocate a buffer.  alloc
     // fails indicating that the server is dead, flag the track as invalid so
@@ -1174,13 +1176,9 @@
     // If the track is invalid at this point, attempt to restore it. and try the
     // allocation one more time.
     if (cblk->flags & CBLK_INVALID) {
-        cblk->lock.lock();
-        audio_track_cblk_t* temp = cblk;
-        result = restoreTrack_l(temp, false /*fromStart*/);
-        cblk = temp;
-        cblk->lock.unlock();
+        result = restoreTrack_l("allocateTimedBuffer");
 
-        if (result == OK) {
+        if (result == NO_ERROR) {
             result = mAudioTrack->allocateTimedBuffer(size, buffer);
         }
     }
@@ -1197,9 +1195,10 @@
         audio_track_cblk_t* cblk = mCblk;
         // restart track if it was disabled by audioflinger due to previous underrun
         if (buffer->size() != 0 && status == NO_ERROR &&
-                mActive && (cblk->flags & CBLK_DISABLED)) {
+                (mState == STATE_ACTIVE) && (cblk->flags & CBLK_DISABLED)) {
             android_atomic_and(~CBLK_DISABLED, &cblk->flags);
             ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
+            // FIXME ignoring status
             mAudioTrack->start();
         }
     }
@@ -1214,12 +1213,8 @@
 
 // -------------------------------------------------------------------------
 
-bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
+nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
 {
-    Buffer audioBuffer;
-    uint32_t frames;
-    size_t writtenSize;
-
     mLock.lock();
     if (mAwaitBoost) {
         mAwaitBoost = false;
@@ -1240,86 +1235,181 @@
         }
         return true;
     }
-    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
-    // while we are accessing the cblk
-    sp<IAudioTrack> audioTrack = mAudioTrack;
-    sp<IMemory> iMem = mCblkMemory;
-    audio_track_cblk_t* cblk = mCblk;
-    bool active = mActive;
-    mLock.unlock();
 
-    // since mLock is unlocked the IAudioTrack and shared memory may be re-created,
-    // so all cblk references might still refer to old shared memory, but that should be benign
+    // Can only reference mCblk while locked
+    int32_t flags = android_atomic_and(
+        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->flags);
 
-    // Manage underrun callback
-    if (active && (mProxy->framesAvailable() == mFrameCount)) {
-        ALOGV("Underrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags);
-        if (!(android_atomic_or(CBLK_UNDERRUN, &cblk->flags) & CBLK_UNDERRUN)) {
-            mCbf(EVENT_UNDERRUN, mUserData, 0);
-            if (cblk->server == mFrameCount) {
-                mCbf(EVENT_BUFFER_END, mUserData, 0);
-            }
-            if (mSharedBuffer != 0) {
-                return false;
-            }
+    // Check for track invalidation
+    if (flags & CBLK_INVALID) {
+        (void) restoreTrack_l("processAudioBuffer");
+        mLock.unlock();
+        // Run again immediately, but with a new IAudioTrack
+        return 0;
+    }
+
+    bool active = mState == STATE_ACTIVE;
+
+    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
+    bool newUnderrun = false;
+    if (flags & CBLK_UNDERRUN) {
+#if 0
+        // Currently in shared buffer mode, when the server reaches the end of buffer,
+        // the track stays active in continuous underrun state.  It's up to the application
+        // to pause or stop the track, or set the position to a new offset within buffer.
+        // This was some experimental code to auto-pause on underrun.   Keeping it here
+        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
+        if (mTransfer == TRANSFER_SHARED) {
+            mState = STATE_PAUSED;
+            active = false;
+        }
+#endif
+        if (!mInUnderrun) {
+            mInUnderrun = true;
+            newUnderrun = true;
         }
     }
 
-    // Manage loop end callback
-    while (mLoopCount > cblk->loopCount) {
-        int loopCount = -1;
-        mLoopCount--;
-        if (mLoopCount >= 0) loopCount = mLoopCount;
-
-        mCbf(EVENT_LOOP_END, mUserData, (void *)&loopCount);
-    }
+    // Get current position of server
+    size_t position = mProxy->getPosition();
 
     // Manage marker callback
-    if (!mMarkerReached && (mMarkerPosition > 0)) {
-        if (cblk->server >= mMarkerPosition) {
-            mCbf(EVENT_MARKER, mUserData, (void *)&mMarkerPosition);
-            mMarkerReached = true;
-        }
+    bool markerReached = false;
+    size_t markerPosition = mMarkerPosition;
+    // FIXME fails for wraparound, need 64 bits
+    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
+        mMarkerReached = markerReached = true;
     }
 
-    // Manage new position callback
-    if (mUpdatePeriod > 0) {
-        while (cblk->server >= mNewPosition) {
-            mCbf(EVENT_NEW_POS, mUserData, (void *)&mNewPosition);
-            mNewPosition += mUpdatePeriod;
-        }
+    // Determine number of new position callback(s) that will be needed, while locked
+    size_t newPosCount = 0;
+    size_t newPosition = mNewPosition;
+    size_t updatePeriod = mUpdatePeriod;
+    // FIXME fails for wraparound, need 64 bits
+    if (updatePeriod > 0 && position >= newPosition) {
+        newPosCount = ((position - newPosition) / updatePeriod) + 1;
+        mNewPosition += updatePeriod * newPosCount;
     }
 
-    // If Shared buffer is used, no data is requested from client.
-    if (mSharedBuffer != 0) {
-        frames = 0;
-    } else {
-        frames = mRemainingFrames;
+    // Cache other fields that will be needed soon
+    uint32_t loopPeriod = mLoopPeriod;
+    uint32_t sampleRate = mSampleRate;
+    size_t notificationFrames = mNotificationFramesAct;
+    if (mRefreshRemaining) {
+        mRefreshRemaining = false;
+        mRemainingFrames = notificationFrames;
+        mRetryOnPartialBuffer = false;
+    }
+    size_t misalignment = mProxy->getMisalignment();
+    int32_t sequence = mSequence;
+
+    // These fields don't need to be cached, because they are assigned only by set():
+    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
+    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
+
+    mLock.unlock();
+
+    // perform callbacks while unlocked
+    if (newUnderrun) {
+        mCbf(EVENT_UNDERRUN, mUserData, NULL);
+    }
+    // FIXME we will miss loops if loop cycle was signaled several times since last call
+    //       to processAudioBuffer()
+    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
+        mCbf(EVENT_LOOP_END, mUserData, NULL);
+    }
+    if (flags & CBLK_BUFFER_END) {
+        mCbf(EVENT_BUFFER_END, mUserData, NULL);
+    }
+    if (markerReached) {
+        mCbf(EVENT_MARKER, mUserData, &markerPosition);
+    }
+    while (newPosCount > 0) {
+        size_t temp = newPosition;
+        mCbf(EVENT_NEW_POS, mUserData, &temp);
+        newPosition += updatePeriod;
+        newPosCount--;
+    }
+    if (mObservedSequence != sequence) {
+        mObservedSequence = sequence;
+        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
     }
 
-    // See description of waitCount parameter at declaration of obtainBuffer().
-    // The logic below prevents us from being stuck below at obtainBuffer()
-    // not being able to handle timed events (position, markers, loops).
-    int32_t waitCount = -1;
-    if (mUpdatePeriod || (!mMarkerReached && mMarkerPosition) || mLoopCount) {
-        waitCount = 1;
+    // if inactive, then don't run me again until re-started
+    if (!active) {
+        return NS_INACTIVE;
     }
 
-    do {
+    // Compute the estimated time until the next timed event (position, markers, loops)
+    // FIXME only for non-compressed audio
+    uint32_t minFrames = ~0;
+    if (!markerReached && position < markerPosition) {
+        minFrames = markerPosition - position;
+    }
+    if (loopPeriod > 0 && loopPeriod < minFrames) {
+        minFrames = loopPeriod;
+    }
+    if (updatePeriod > 0 && updatePeriod < minFrames) {
+        minFrames = updatePeriod;
+    }
 
-        audioBuffer.frameCount = frames;
+    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
+    static const uint32_t kPoll = 0;
+    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
+        minFrames = kPoll * notificationFrames;
+    }
 
-        status_t err = obtainBuffer(&audioBuffer, waitCount);
-        if (err < NO_ERROR) {
-            if (err != TIMED_OUT) {
-                ALOGE_IF(err != status_t(NO_MORE_BUFFERS),
-                        "Error obtaining an audio buffer, giving up.");
-                return false;
+    // Convert frame units to time units
+    nsecs_t ns = NS_WHENEVER;
+    if (minFrames != (uint32_t) ~0) {
+        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
+        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
+        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
+    }
+
+    // If not supplying data by EVENT_MORE_DATA, then we're done
+    if (mTransfer != TRANSFER_CALLBACK) {
+        return ns;
+    }
+
+    struct timespec timeout;
+    const struct timespec *requested = &ClientProxy::kForever;
+    if (ns != NS_WHENEVER) {
+        timeout.tv_sec = ns / 1000000000LL;
+        timeout.tv_nsec = ns % 1000000000LL;
+        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
+        requested = &timeout;
+    }
+
+    while (mRemainingFrames > 0) {
+
+        Buffer audioBuffer;
+        audioBuffer.frameCount = mRemainingFrames;
+        size_t nonContig;
+        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
+        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
+                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
+        requested = &ClientProxy::kNonBlocking;
+        size_t avail = audioBuffer.frameCount + nonContig;
+        ALOGV("obtainBuffer(%u) returned %u = %u + %u",
+                mRemainingFrames, avail, audioBuffer.frameCount, nonContig);
+        if (err != NO_ERROR) {
+            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
+                return 0;
             }
-            break;
+            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
+            return NS_NEVER;
         }
-        if (err == status_t(STOPPED)) {
-            return false;
+
+        if (mRetryOnPartialBuffer) {
+            mRetryOnPartialBuffer = false;
+            if (avail < mRemainingFrames) {
+                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
+                if (ns < 0 || myns < ns) {
+                    ns = myns;
+                }
+                return ns;
+            }
         }
 
         // Divide buffer size by 2 to take into account the expansion
@@ -1331,66 +1421,76 @@
 
         size_t reqSize = audioBuffer.size;
         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
-        writtenSize = audioBuffer.size;
+        size_t writtenSize = audioBuffer.size;
+        size_t writtenFrames = writtenSize / mFrameSize;
 
         // Sanity check on returned size
-        if (ssize_t(writtenSize) <= 0) {
+        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
+            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
+                    reqSize, (int) writtenSize);
+            return NS_NEVER;
+        }
+
+        if (writtenSize == 0) {
             // The callback is done filling buffers
             // Keep this thread going to handle timed events and
             // still try to get more data in intervals of WAIT_PERIOD_MS
             // but don't just loop and block the CPU, so wait
-            usleep(WAIT_PERIOD_MS*1000);
-            break;
-        }
-
-        if (writtenSize > reqSize) {
-            writtenSize = reqSize;
+            return WAIT_PERIOD_MS * 1000000LL;
         }
 
         if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
             // 8 to 16 bit conversion, note that source and destination are the same address
             memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
-            writtenSize <<= 1;
+            audioBuffer.size <<= 1;
         }
 
-        audioBuffer.size = writtenSize;
-        // NOTE: cblk->frameSize is not equal to AudioTrack::frameSize() for
-        // 8 bit PCM data: in this case,  cblk->frameSize is based on a sample size of
-        // 16 bit.
-        audioBuffer.frameCount = writtenSize / mFrameSizeAF;
-
-        frames -= audioBuffer.frameCount;
+        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
+        audioBuffer.frameCount = releasedFrames;
+        mRemainingFrames -= releasedFrames;
+        if (misalignment >= releasedFrames) {
+            misalignment -= releasedFrames;
+        } else {
+            misalignment = 0;
+        }
 
         releaseBuffer(&audioBuffer);
-    }
-    while (frames);
 
-    if (frames == 0) {
-        mRemainingFrames = mNotificationFramesAct;
-    } else {
-        mRemainingFrames = frames;
+        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
+        // if callback doesn't like to accept the full chunk
+        if (writtenSize < reqSize) {
+            continue;
+        }
+
+        // There could be enough non-contiguous frames available to satisfy the remaining request
+        if (mRemainingFrames <= nonContig) {
+            continue;
+        }
+
+#if 0
+        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
+        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
+        // that total to a sum == notificationFrames.
+        if (0 < misalignment && misalignment <= mRemainingFrames) {
+            mRemainingFrames = misalignment;
+            return (mRemainingFrames * 1100000000LL) / sampleRate;
+        }
+#endif
+
     }
-    return true;
+    mRemainingFrames = notificationFrames;
+    mRetryOnPartialBuffer = true;
+
+    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
+    return 0;
 }
 
-// must be called with mLock and refCblk.lock held. Callers must also hold strong references on
-// the IAudioTrack and IMemory in case they are recreated here.
-// If the IAudioTrack is successfully restored, the refCblk pointer is updated
-// FIXME Don't depend on caller to hold strong references.
-status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& refCblk, bool fromStart)
+status_t AudioTrack::restoreTrack_l(const char *from)
 {
+    ALOGW("dead IAudioTrack, creating a new one from %s()", from);
+    ++mSequence;
     status_t result;
 
-    audio_track_cblk_t* cblk = refCblk;
-    audio_track_cblk_t* newCblk = cblk;
-    ALOGW("dead IAudioTrack, creating a new one from %s",
-        fromStart ? "start()" : "obtainBuffer()");
-
-    // signal old cblk condition so that other threads waiting for available buffers stop
-    // waiting now
-    cblk->cv.broadcast();
-    cblk->lock.unlock();
-
     // refresh the audio configuration cache in this process to make sure we get new
     // output parameters in getOutput_l() and createTrack_l()
     AudioSystem::clearAudioConfigCache();
@@ -1398,68 +1498,47 @@
     // if the new IAudioTrack is created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory
+    size_t position = mProxy->getPosition();
+    mNewPosition = position + mUpdatePeriod;
+    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
     result = createTrack_l(mStreamType,
                            mSampleRate,
                            mFormat,
                            mReqFrameCount,  // so that frame count never goes down
                            mFlags,
                            mSharedBuffer,
-                           getOutput_l());
+                           getOutput_l(),
+                           position /*epoch*/);
 
     if (result == NO_ERROR) {
-        uint32_t user = cblk->user;
-        uint32_t server = cblk->server;
+        // continue playback from last known position, but
+        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
+        if (mStaticProxy != NULL) {
+            mLoopPeriod = 0;
+            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
+        }
+        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
+        //       track destruction have been played? This is critical for SoundPool implementation
+        //       This must be broken, and needs to be tested/debugged.
+#if 0
         // restore write index and set other indexes to reflect empty buffer status
-        newCblk = mCblk;
-        newCblk->user = user;
-        newCblk->server = user;
-        newCblk->userBase = user;
-        newCblk->serverBase = user;
-        // restore loop: this is not guaranteed to succeed if new frame count is not
-        // compatible with loop length
-        setLoop_l(cblk->loopStart, cblk->loopEnd, cblk->loopCount);
-        size_t frames = 0;
-        if (!fromStart) {
-            newCblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
+        if (!strcmp(from, "start")) {
             // Make sure that a client relying on callback events indicating underrun or
             // the actual amount of audio frames played (e.g SoundPool) receives them.
             if (mSharedBuffer == 0) {
-                if (user > server) {
-                    frames = ((user - server) > mFrameCount) ?
-                            mFrameCount : (user - server);
-                    memset(mBuffers, 0, frames * mFrameSizeAF);
-                }
                 // restart playback even if buffer is not completely filled.
-                android_atomic_or(CBLK_FORCEREADY, &newCblk->flags);
+                android_atomic_or(CBLK_FORCEREADY, &mCblk->flags);
             }
         }
-        if (mSharedBuffer != 0) {
-            frames = mFrameCount;
-        }
-        if (frames > 0) {
-            // stepUser() clears CBLK_UNDERRUN flag enabling underrun callbacks to
-            // the client
-            mProxy->stepUser(frames);
-        }
-        if (mActive) {
+#endif
+        if (mState == STATE_ACTIVE) {
             result = mAudioTrack->start();
-            ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result);
-        }
-        if (fromStart && result == NO_ERROR) {
-            mNewPosition = newCblk->server + mUpdatePeriod;
         }
     }
-    ALOGW_IF(result != NO_ERROR, "restoreTrack_l() failed status %d", result);
-    ALOGV("restoreTrack_l() status %d mActive %d cblk %p, old cblk %p flags %08x old flags %08x",
-        result, mActive, newCblk, cblk, newCblk->flags, cblk->flags);
-
-    if (result == NO_ERROR) {
-        // from now on we switch to the newly created cblk
-        refCblk = newCblk;
+    if (result != NO_ERROR) {
+        ALOGW("restoreTrack_l() failed status %d", result);
+        mState = STATE_STOPPED;
     }
-    newCblk->lock.lock();
-
-    ALOGW_IF(result != NO_ERROR, "restoreTrack_l() error %d", result);
 
     return result;
 }
@@ -1480,16 +1559,33 @@
     result.append(buffer);
     snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
     result.append(buffer);
-    snprintf(buffer, 255, "  active(%d), latency (%d)\n", mActive, mLatency);
+    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
     result.append(buffer);
     ::write(fd, result.string(), result.size());
     return NO_ERROR;
 }
 
+uint32_t AudioTrack::getUnderrunFrames() const
+{
+    AutoMutex lock(mLock);
+    return mProxy->getUnderrunFrames();
+}
+
+// =========================================================================
+
+void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
+{
+    sp<AudioTrack> audioTrack = mAudioTrack.promote();
+    if (audioTrack != 0) {
+        AutoMutex lock(audioTrack->mLock);
+        audioTrack->mProxy->binderDied();
+    }
+}
+
 // =========================================================================
 
 AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
-    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true)
+    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
 {
 }
 
@@ -1507,10 +1603,26 @@
             return true;
         }
     }
-    if (!mReceiver.processAudioBuffer(this)) {
-        pause();
+    nsecs_t ns = mReceiver.processAudioBuffer(this);
+    switch (ns) {
+    case 0:
+        return true;
+    case NS_WHENEVER:
+        sleep(1);
+        return true;
+    case NS_INACTIVE:
+        pauseConditional();
+        return true;
+    case NS_NEVER:
+        return false;
+    default:
+        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
+        struct timespec req;
+        req.tv_sec = ns / 1000000000LL;
+        req.tv_nsec = ns % 1000000000LL;
+        nanosleep(&req, NULL /*rem*/);
+        return true;
     }
-    return true;
 }
 
 void AudioTrack::AudioTrackThread::requestExit()
@@ -1524,6 +1636,17 @@
 {
     AutoMutex _l(mMyLock);
     mPaused = true;
+    mResumeLatch = false;
+}
+
+void AudioTrack::AudioTrackThread::pauseConditional()
+{
+    AutoMutex _l(mMyLock);
+    if (mResumeLatch) {
+        mResumeLatch = false;
+    } else {
+        mPaused = true;
+    }
 }
 
 void AudioTrack::AudioTrackThread::resume()
@@ -1531,7 +1654,10 @@
     AutoMutex _l(mMyLock);
     if (mPaused) {
         mPaused = false;
+        mResumeLatch = false;
         mMyCond.signal();
+    } else {
+        mResumeLatch = true;
     }
 }
 
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 13d47c9..f034164 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -19,178 +19,664 @@
 
 #include <private/media/AudioTrackShared.h>
 #include <utils/Log.h>
+extern "C" {
+#include "../private/bionic_futex.h"
+}
 
 namespace android {
 
 audio_track_cblk_t::audio_track_cblk_t()
-    : lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0),
-    userBase(0), serverBase(0), frameCount_(0),
-    loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000),
-    mSampleRate(0), mSendLevel(0), flags(0)
+    : server(0), frameCount_(0), mFutex(0), mMinimum(0),
+    mVolumeLR(0x10001000), mSampleRate(0), mSendLevel(0), mName(0), flags(0)
+{
+    memset(&u, 0, sizeof(u));
+}
+
+// ---------------------------------------------------------------------------
+
+Proxy::Proxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount, size_t frameSize,
+        bool isOut, bool clientInServer)
+    : mCblk(cblk), mBuffers(buffers), mFrameCount(frameCount), mFrameSize(frameSize),
+      mFrameCountP2(roundup(frameCount)), mIsOut(isOut), mClientInServer(clientInServer),
+      mIsShutdown(false)
 {
 }
 
-uint32_t audio_track_cblk_t::stepUser(size_t stepCount, size_t frameCount, bool isOut)
+// ---------------------------------------------------------------------------
+
+ClientProxy::ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+        size_t frameSize, bool isOut, bool clientInServer)
+    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer), mEpoch(0)
 {
-    ALOGV("stepuser %08x %08x %d", user, server, stepCount);
-
-    uint32_t u = user;
-    u += stepCount;
-    // Ensure that user is never ahead of server for AudioRecord
-    if (isOut) {
-        // If stepServer() has been called once, switch to normal obtainBuffer() timeout period
-        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) {
-            bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
-        }
-    } else if (u > server) {
-        ALOGW("stepUser occurred after track reset");
-        u = server;
-    }
-
-    if (u >= frameCount) {
-        // common case, user didn't just wrap
-        if (u - frameCount >= userBase ) {
-            userBase += frameCount;
-        }
-    } else if (u >= userBase + frameCount) {
-        // user just wrapped
-        userBase += frameCount;
-    }
-
-    user = u;
-
-    // Clear flow control error condition as new data has been written/read to/from buffer.
-    if (flags & CBLK_UNDERRUN) {
-        android_atomic_and(~CBLK_UNDERRUN, &flags);
-    }
-
-    return u;
 }
 
-bool audio_track_cblk_t::stepServer(size_t stepCount, size_t frameCount, bool isOut)
+const struct timespec ClientProxy::kForever = {INT_MAX /*tv_sec*/, 0 /*tv_nsec*/};
+const struct timespec ClientProxy::kNonBlocking = {0 /*tv_sec*/, 0 /*tv_nsec*/};
+
+#define MEASURE_NS 10000000 // attempt to provide accurate timeouts if requested >= MEASURE_NS
+
+// To facilitate quicker recovery from server failure, this value limits the timeout per each futex
+// wait.  However it does not protect infinite timeouts.  If defined to be zero, there is no limit.
+// FIXME May not be compatible with audio tunneling requirements where timeout should be in the
+// order of minutes.
+#define MAX_SEC    5
+
+status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
+        struct timespec *elapsed)
 {
-    ALOGV("stepserver %08x %08x %d", user, server, stepCount);
-
-    if (!tryLock()) {
-        ALOGW("stepServer() could not lock cblk");
-        return false;
+    if (buffer == NULL || buffer->mFrameCount == 0) {
+        ALOGE("%s BAD_VALUE", __func__);
+        return BAD_VALUE;
     }
+    struct timespec total;          // total elapsed time spent waiting
+    total.tv_sec = 0;
+    total.tv_nsec = 0;
+    bool measure = elapsed != NULL; // whether to measure total elapsed time spent waiting
 
-    uint32_t s = server;
-    bool flushed = (s == user);
-
-    s += stepCount;
-    if (isOut) {
-        // Mark that we have read the first buffer so that next time stepUser() is called
-        // we switch to normal obtainBuffer() timeout period
-        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) {
-            bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS - 1;
-        }
-        // It is possible that we receive a flush()
-        // while the mixer is processing a block: in this case,
-        // stepServer() is called After the flush() has reset u & s and
-        // we have s > u
-        if (flushed) {
-            ALOGW("stepServer occurred after track reset");
-            s = user;
-        }
-    }
-
-    if (s >= loopEnd) {
-        ALOGW_IF(s > loopEnd, "stepServer: s %u > loopEnd %u", s, loopEnd);
-        s = loopStart;
-        if (--loopCount == 0) {
-            loopEnd = UINT_MAX;
-            loopStart = UINT_MAX;
-        }
-    }
-
-    if (s >= frameCount) {
-        // common case, server didn't just wrap
-        if (s - frameCount >= serverBase ) {
-            serverBase += frameCount;
-        }
-    } else if (s >= serverBase + frameCount) {
-        // server just wrapped
-        serverBase += frameCount;
-    }
-
-    server = s;
-
-    if (!(flags & CBLK_INVALID)) {
-        cv.signal();
-    }
-    lock.unlock();
-    return true;
-}
-
-void* audio_track_cblk_t::buffer(void *buffers, size_t frameSize, uint32_t offset) const
-{
-    return (int8_t *)buffers + (offset - userBase) * frameSize;
-}
-
-uint32_t audio_track_cblk_t::framesAvailable(size_t frameCount, bool isOut)
-{
-    Mutex::Autolock _l(lock);
-    return framesAvailable_l(frameCount, isOut);
-}
-
-uint32_t audio_track_cblk_t::framesAvailable_l(size_t frameCount, bool isOut)
-{
-    uint32_t u = user;
-    uint32_t s = server;
-
-    if (isOut) {
-        uint32_t limit = (s < loopStart) ? s : loopStart;
-        return limit + frameCount - u;
+    status_t status;
+    enum {
+        TIMEOUT_ZERO,       // requested == NULL || *requested == 0
+        TIMEOUT_INFINITE,   // *requested == infinity
+        TIMEOUT_FINITE,     // 0 < *requested < infinity
+        TIMEOUT_CONTINUE,   // additional chances after TIMEOUT_FINITE
+    } timeout;
+    if (requested == NULL) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == INT_MAX) {
+        timeout = TIMEOUT_INFINITE;
     } else {
-        return frameCount + u - s;
+        timeout = TIMEOUT_FINITE;
+        if (requested->tv_sec > 0 || requested->tv_nsec >= MEASURE_NS) {
+            measure = true;
+        }
     }
-}
-
-uint32_t audio_track_cblk_t::framesReady(bool isOut)
-{
-    uint32_t u = user;
-    uint32_t s = server;
-
-    if (isOut) {
-        if (u < loopEnd) {
-            return u - s;
+    struct timespec before;
+    bool beforeIsValid = false;
+    audio_track_cblk_t* cblk = mCblk;
+    bool ignoreInitialPendingInterrupt = true;
+    // check for shared memory corruption
+    if (mIsShutdown) {
+        status = NO_INIT;
+        goto end;
+    }
+    for (;;) {
+        int32_t flags = android_atomic_and(~CBLK_INTERRUPT, &cblk->flags);
+        // check for track invalidation by server, or server death detection
+        if (flags & CBLK_INVALID) {
+            ALOGV("Track invalidated");
+            status = DEAD_OBJECT;
+            goto end;
+        }
+        // check for obtainBuffer interrupted by client
+        if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {
+            ALOGV("obtainBuffer() interrupted by client");
+            status = -EINTR;
+            goto end;
+        }
+        ignoreInitialPendingInterrupt = false;
+        // compute number of frames available to write (AudioTrack) or read (AudioRecord)
+        int32_t front;
+        int32_t rear;
+        if (mIsOut) {
+            // The barrier following the read of mFront is probably redundant.
+            // We're about to perform a conditional branch based on 'filled',
+            // which will force the processor to observe the read of mFront
+            // prior to allowing data writes starting at mRaw.
+            // However, the processor may support speculative execution,
+            // and be unable to undo speculative writes into shared memory.
+            // The barrier will prevent such speculative execution.
+            front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
+            rear = cblk->u.mStreaming.mRear;
         } else {
-            // do not block on mutex shared with client on AudioFlinger side
-            if (!tryLock()) {
-                ALOGW("framesReady() could not lock cblk");
-                return 0;
+            // On the other hand, this barrier is required.
+            rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+            front = cblk->u.mStreaming.mFront;
+        }
+        ssize_t filled = rear - front;
+        // pipe should not be overfull
+        if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+            ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+            mIsShutdown = true;
+            status = NO_INIT;
+            goto end;
+        }
+        // don't allow filling pipe beyond the nominal size
+        size_t avail = mIsOut ? mFrameCount - filled : filled;
+        if (avail > 0) {
+            // 'avail' may be non-contiguous, so return only the first contiguous chunk
+            size_t part1;
+            if (mIsOut) {
+                rear &= mFrameCountP2 - 1;
+                part1 = mFrameCountP2 - rear;
+            } else {
+                front &= mFrameCountP2 - 1;
+                part1 = mFrameCountP2 - front;
             }
-            uint32_t frames = UINT_MAX;
-            if (loopCount >= 0) {
-                frames = (loopEnd - loopStart)*loopCount + u - s;
+            if (part1 > avail) {
+                part1 = avail;
             }
-            lock.unlock();
-            return frames;
+            if (part1 > buffer->mFrameCount) {
+                part1 = buffer->mFrameCount;
+            }
+            buffer->mFrameCount = part1;
+            buffer->mRaw = part1 > 0 ?
+                    &((char *) mBuffers)[(mIsOut ? rear : front) * mFrameSize] : NULL;
+            buffer->mNonContig = avail - part1;
+            // mUnreleased = part1;
+            status = NO_ERROR;
+            break;
+        }
+        struct timespec remaining;
+        const struct timespec *ts;
+        switch (timeout) {
+        case TIMEOUT_ZERO:
+            status = WOULD_BLOCK;
+            goto end;
+        case TIMEOUT_INFINITE:
+            ts = NULL;
+            break;
+        case TIMEOUT_FINITE:
+            timeout = TIMEOUT_CONTINUE;
+            if (MAX_SEC == 0) {
+                ts = requested;
+                break;
+            }
+            // fall through
+        case TIMEOUT_CONTINUE:
+            // FIXME we do not retry if requested < 10ms? needs documentation on this state machine
+            if (!measure || requested->tv_sec < total.tv_sec ||
+                    (requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {
+                status = TIMED_OUT;
+                goto end;
+            }
+            remaining.tv_sec = requested->tv_sec - total.tv_sec;
+            if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {
+                remaining.tv_nsec += 1000000000;
+                remaining.tv_sec++;
+            }
+            if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {
+                remaining.tv_sec = MAX_SEC;
+                remaining.tv_nsec = 0;
+            }
+            ts = &remaining;
+            break;
+        default:
+            LOG_FATAL("%s timeout=%d", timeout);
+            ts = NULL;
+            break;
+        }
+        int32_t old = android_atomic_dec(&cblk->mFutex);
+        if (old <= 0) {
+            int rc;
+            if (measure && !beforeIsValid) {
+                clock_gettime(CLOCK_MONOTONIC, &before);
+                beforeIsValid = true;
+            }
+            int ret = __futex_syscall4(&cblk->mFutex,
+                    mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old - 1, ts);
+            // update total elapsed time spent waiting
+            if (measure) {
+                struct timespec after;
+                clock_gettime(CLOCK_MONOTONIC, &after);
+                total.tv_sec += after.tv_sec - before.tv_sec;
+                long deltaNs = after.tv_nsec - before.tv_nsec;
+                if (deltaNs < 0) {
+                    deltaNs += 1000000000;
+                    total.tv_sec--;
+                }
+                if ((total.tv_nsec += deltaNs) >= 1000000000) {
+                    total.tv_nsec -= 1000000000;
+                    total.tv_sec++;
+                }
+                before = after;
+                beforeIsValid = true;
+            }
+            switch (ret) {
+            case 0:             // normal wakeup by server, or by binderDied()
+            case -EWOULDBLOCK:  // benign race condition with server
+            case -EINTR:        // wait was interrupted by signal or other spurious wakeup
+            case -ETIMEDOUT:    // time-out expired
+                break;
+            default:
+                ALOGE("%s unexpected error %d", __func__, ret);
+                status = -ret;
+                goto end;
+            }
+        }
+    }
+
+end:
+    if (status != NO_ERROR) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+    }
+    if (elapsed != NULL) {
+        *elapsed = total;
+    }
+    if (requested == NULL) {
+        requested = &kNonBlocking;
+    }
+    if (measure) {
+        ALOGV("requested %d.%03d elapsed %d.%03d", requested->tv_sec, requested->tv_nsec / 1000000,
+                total.tv_sec, total.tv_nsec / 1000000);
+    }
+    return status;
+}
+
+void ClientProxy::releaseBuffer(Buffer* buffer)
+{
+    size_t stepCount = buffer->mFrameCount;
+    // FIXME
+    //  check mUnreleased
+    //  verify that stepCount <= frameCount returned by the last obtainBuffer()
+    //  verify stepCount not > total frame count of pipe
+    if (stepCount == 0) {
+        return;
+    }
+    audio_track_cblk_t* cblk = mCblk;
+    // Both of these barriers are required
+    if (mIsOut) {
+        int32_t rear = cblk->u.mStreaming.mRear;
+        android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear);
+    } else {
+        int32_t front = cblk->u.mStreaming.mFront;
+        android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront);
+    }
+}
+
+void ClientProxy::binderDied()
+{
+    audio_track_cblk_t* cblk = mCblk;
+    if (!(android_atomic_or(CBLK_INVALID, &cblk->flags) & CBLK_INVALID)) {
+        // it seems that a FUTEX_WAKE_PRIVATE will not wake a FUTEX_WAIT, even within same process
+        (void) __futex_syscall3(&cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
+                1);
+    }
+}
+
+void ClientProxy::interrupt()
+{
+    audio_track_cblk_t* cblk = mCblk;
+    if (!(android_atomic_or(CBLK_INTERRUPT, &cblk->flags) & CBLK_INTERRUPT)) {
+        (void) __futex_syscall3(&cblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
+                1);
+    }
+}
+
+size_t ClientProxy::getMisalignment()
+{
+    audio_track_cblk_t* cblk = mCblk;
+    return (mFrameCountP2 - (mIsOut ? cblk->u.mStreaming.mRear : cblk->u.mStreaming.mFront)) &
+            (mFrameCountP2 - 1);
+}
+
+// ---------------------------------------------------------------------------
+
+void AudioTrackClientProxy::flush()
+{
+    mCblk->u.mStreaming.mFlush++;
+}
+
+// ---------------------------------------------------------------------------
+
+StaticAudioTrackClientProxy::StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers,
+        size_t frameCount, size_t frameSize)
+    : AudioTrackClientProxy(cblk, buffers, frameCount, frameSize),
+      mMutator(&cblk->u.mStatic.mSingleStateQueue), mBufferPosition(0)
+{
+}
+
+void StaticAudioTrackClientProxy::flush()
+{
+    LOG_FATAL("static flush");
+}
+
+void StaticAudioTrackClientProxy::setLoop(size_t loopStart, size_t loopEnd, int loopCount)
+{
+    StaticAudioTrackState newState;
+    newState.mLoopStart = loopStart;
+    newState.mLoopEnd = loopEnd;
+    newState.mLoopCount = loopCount;
+    mBufferPosition = loopStart;
+    (void) mMutator.push(newState);
+}
+
+size_t StaticAudioTrackClientProxy::getBufferPosition()
+{
+    size_t bufferPosition;
+    if (mMutator.ack()) {
+        bufferPosition = mCblk->u.mStatic.mBufferPosition;
+        if (bufferPosition > mFrameCount) {
+            bufferPosition = mFrameCount;
         }
     } else {
-        return s - u;
+        bufferPosition = mBufferPosition;
     }
+    return bufferPosition;
 }
 
-bool audio_track_cblk_t::tryLock()
+// ---------------------------------------------------------------------------
+
+ServerProxy::ServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
+        size_t frameSize, bool isOut, bool clientInServer)
+    : Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer), mUnreleased(0),
+      mAvailToClient(0), mFlush(0), mDeferWake(false)
 {
-    // the code below simulates lock-with-timeout
-    // we MUST do this to protect the AudioFlinger server
-    // as this lock is shared with the client.
-    status_t err;
-
-    err = lock.tryLock();
-    if (err == -EBUSY) { // just wait a bit
-        usleep(1000);
-        err = lock.tryLock();
-    }
-    if (err != NO_ERROR) {
-        // probably, the client just died.
-        return false;
-    }
-    return true;
 }
 
+status_t ServerProxy::obtainBuffer(Buffer* buffer)
+{
+    if (mIsShutdown) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        mUnreleased = 0;
+        return NO_INIT;
+    }
+    audio_track_cblk_t* cblk = mCblk;
+    // compute number of frames available to write (AudioTrack) or read (AudioRecord),
+    // or use previous cached value from framesReady(), with added barrier if it omits.
+    int32_t front;
+    int32_t rear;
+    // See notes on barriers at ClientProxy::obtainBuffer()
+    if (mIsOut) {
+        int32_t flush = cblk->u.mStreaming.mFlush;
+        rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+        if (flush != mFlush) {
+            front = rear;
+            mFlush = flush;
+        } else {
+            front = cblk->u.mStreaming.mFront;
+        }
+    } else {
+        front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
+        rear = cblk->u.mStreaming.mRear;
+    }
+    ssize_t filled = rear - front;
+    // pipe should not already be overfull
+    if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+        ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+        mIsShutdown = true;
+    }
+    if (mIsShutdown) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        mUnreleased = 0;
+        return NO_INIT;
+    }
+    // don't allow filling pipe beyond the nominal size
+    size_t availToServer;
+    if (mIsOut) {
+        availToServer = filled;
+        mAvailToClient = mFrameCount - filled;
+    } else {
+        availToServer = mFrameCount - filled;
+        mAvailToClient = filled;
+    }
+    // 'availToServer' may be non-contiguous, so return only the first contiguous chunk
+    size_t part1;
+    if (mIsOut) {
+        front &= mFrameCountP2 - 1;
+        part1 = mFrameCountP2 - front;
+    } else {
+        rear &= mFrameCountP2 - 1;
+        part1 = mFrameCountP2 - rear;
+    }
+    if (part1 > availToServer) {
+        part1 = availToServer;
+    }
+    size_t ask = buffer->mFrameCount;
+    if (part1 > ask) {
+        part1 = ask;
+    }
+    // is assignment redundant in some cases?
+    buffer->mFrameCount = part1;
+    buffer->mRaw = part1 > 0 ?
+            &((char *) mBuffers)[(mIsOut ? front : rear) * mFrameSize] : NULL;
+    buffer->mNonContig = availToServer - part1;
+    mUnreleased = part1;
+    // optimization to avoid waking up the client too early
+    // FIXME need to test for recording
+    mDeferWake = part1 < ask && availToServer >= ask;
+    return part1 > 0 ? NO_ERROR : WOULD_BLOCK;
+}
+
+void ServerProxy::releaseBuffer(Buffer* buffer)
+{
+    if (mIsShutdown) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        return;
+    }
+    size_t stepCount = buffer->mFrameCount;
+    LOG_ALWAYS_FATAL_IF(stepCount > mUnreleased);
+    if (stepCount == 0) {
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        return;
+    }
+    mUnreleased -= stepCount;
+    audio_track_cblk_t* cblk = mCblk;
+    if (mIsOut) {
+        int32_t front = cblk->u.mStreaming.mFront;
+        android_atomic_release_store(stepCount + front, &cblk->u.mStreaming.mFront);
+    } else {
+        int32_t rear = cblk->u.mStreaming.mRear;
+        android_atomic_release_store(stepCount + rear, &cblk->u.mStreaming.mRear);
+    }
+
+    mCblk->server += stepCount;
+
+    size_t half = mFrameCount / 2;
+    if (half == 0) {
+        half = 1;
+    }
+    size_t minimum = cblk->mMinimum;
+    if (minimum == 0) {
+        minimum = mIsOut ? half : 1;
+    } else if (minimum > half) {
+        minimum = half;
+    }
+    if (!mDeferWake && mAvailToClient + stepCount >= minimum) {
+        ALOGV("mAvailToClient=%u stepCount=%u minimum=%u", mAvailToClient, stepCount, minimum);
+        // could client be sleeping, or not need this increment and counter overflows?
+        int32_t old = android_atomic_inc(&cblk->mFutex);
+        if (old == -1) {
+            (void) __futex_syscall3(&cblk->mFutex,
+                    mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+        }
+    }
+
+    buffer->mFrameCount = 0;
+    buffer->mRaw = NULL;
+    buffer->mNonContig = 0;
+}
+
+// ---------------------------------------------------------------------------
+
+size_t AudioTrackServerProxy::framesReady()
+{
+    LOG_ALWAYS_FATAL_IF(!mIsOut);
+
+    if (mIsShutdown) {
+        return 0;
+    }
+    audio_track_cblk_t* cblk = mCblk;
+    // the acquire might not be necessary since not doing a subsequent read
+    int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+    ssize_t filled = rear - cblk->u.mStreaming.mFront;
+    // pipe should not already be overfull
+    if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
+        ALOGE("Shared memory control block is corrupt (filled=%d); shutting down", filled);
+        mIsShutdown = true;
+        return 0;
+    }
+    //  cache this value for later use by obtainBuffer(), with added barrier
+    //  and racy if called by normal mixer thread
+    // ignores flush(), so framesReady() may report a larger mFrameCount than obtainBuffer()
+    return filled;
+}
+
+// ---------------------------------------------------------------------------
+
+StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
+        size_t frameCount, size_t frameSize)
+    : AudioTrackServerProxy(cblk, buffers, frameCount, frameSize),
+      mObserver(&cblk->u.mStatic.mSingleStateQueue), mPosition(0),
+      mEnd(frameCount), mFramesReadyIsCalledByMultipleThreads(false)
+{
+    mState.mLoopStart = 0;
+    mState.mLoopEnd = 0;
+    mState.mLoopCount = 0;
+}
+
+void StaticAudioTrackServerProxy::framesReadyIsCalledByMultipleThreads()
+{
+    mFramesReadyIsCalledByMultipleThreads = true;
+}
+
+size_t StaticAudioTrackServerProxy::framesReady()
+{
+    // FIXME
+    // This is racy if called by normal mixer thread,
+    // as we're reading 2 independent variables without a lock.
+    // Can't call mObserver.poll(), as we might be called from wrong thread.
+    // If looping is enabled, should return a higher number (since includes non-contiguous).
+    size_t position = mPosition;
+    if (!mFramesReadyIsCalledByMultipleThreads) {
+        ssize_t positionOrStatus = pollPosition();
+        if (positionOrStatus >= 0) {
+            position = (size_t) positionOrStatus;
+        }
+    }
+    size_t end = mEnd;
+    return position < end ? end - position : 0;
+}
+
+ssize_t StaticAudioTrackServerProxy::pollPosition()
+{
+    size_t position = mPosition;
+    StaticAudioTrackState state;
+    if (mObserver.poll(state)) {
+        bool valid = false;
+        size_t loopStart = state.mLoopStart;
+        size_t loopEnd = state.mLoopEnd;
+        if (state.mLoopCount == 0) {
+            if (loopStart > mFrameCount) {
+                loopStart = mFrameCount;
+            }
+            // ignore loopEnd
+            mPosition = position = loopStart;
+            mEnd = mFrameCount;
+            mState.mLoopCount = 0;
+            valid = true;
+        } else {
+            if (loopStart < loopEnd && loopEnd <= mFrameCount &&
+                    loopEnd - loopStart >= MIN_LOOP) {
+                if (!(loopStart <= position && position < loopEnd)) {
+                    mPosition = position = loopStart;
+                }
+                mEnd = loopEnd;
+                mState = state;
+                valid = true;
+            }
+        }
+        if (!valid) {
+            ALOGE("%s client pushed an invalid state, shutting down", __func__);
+            mIsShutdown = true;
+            return (ssize_t) NO_INIT;
+        }
+        mCblk->u.mStatic.mBufferPosition = position;
+    }
+    return (ssize_t) position;
+}
+
+status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer)
+{
+    if (mIsShutdown) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        mUnreleased = 0;
+        return NO_INIT;
+    }
+    ssize_t positionOrStatus = pollPosition();
+    if (positionOrStatus < 0) {
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        mUnreleased = 0;
+        return (status_t) positionOrStatus;
+    }
+    size_t position = (size_t) positionOrStatus;
+    size_t avail;
+    if (position < mEnd) {
+        avail = mEnd - position;
+        size_t wanted = buffer->mFrameCount;
+        if (avail < wanted) {
+            buffer->mFrameCount = avail;
+        } else {
+            avail = wanted;
+        }
+        buffer->mRaw = &((char *) mBuffers)[position * mFrameSize];
+    } else {
+        avail = 0;
+        buffer->mFrameCount = 0;
+        buffer->mRaw = NULL;
+    }
+    buffer->mNonContig = 0;     // FIXME should be > 0 for looping
+    mUnreleased = avail;
+    return NO_ERROR;
+}
+
+void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
+{
+    size_t stepCount = buffer->mFrameCount;
+    LOG_ALWAYS_FATAL_IF(stepCount > mUnreleased);
+    if (stepCount == 0) {
+        buffer->mRaw = NULL;
+        buffer->mNonContig = 0;
+        return;
+    }
+    mUnreleased -= stepCount;
+    audio_track_cblk_t* cblk = mCblk;
+    size_t position = mPosition;
+    size_t newPosition = position + stepCount;
+    int32_t setFlags = 0;
+    if (!(position <= newPosition && newPosition <= mFrameCount)) {
+        ALOGW("%s newPosition %u outside [%u, %u]", __func__, newPosition, position, mFrameCount);
+        newPosition = mFrameCount;
+    } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {
+        if (mState.mLoopCount == -1 || --mState.mLoopCount != 0) {
+            newPosition = mState.mLoopStart;
+            setFlags = CBLK_LOOP_CYCLE;
+        } else {
+            mEnd = mFrameCount;     // this is what allows playback to continue after the loop
+            setFlags = CBLK_LOOP_FINAL;
+        }
+    }
+    if (newPosition == mFrameCount) {
+        setFlags |= CBLK_BUFFER_END;
+    }
+    mPosition = newPosition;
+
+    cblk->server += stepCount;
+    cblk->u.mStatic.mBufferPosition = newPosition;
+    if (setFlags != 0) {
+        (void) android_atomic_or(setFlags, &cblk->flags);
+        // this would be a good place to wake a futex
+    }
+
+    buffer->mFrameCount = 0;
+    buffer->mRaw = NULL;
+    buffer->mNonContig = 0;
+}
+
+// ---------------------------------------------------------------------------
+
 }   // namespace android
diff --git a/media/libmedia/IHDCP.cpp b/media/libmedia/IHDCP.cpp
index f13addc..a46ff91 100644
--- a/media/libmedia/IHDCP.cpp
+++ b/media/libmedia/IHDCP.cpp
@@ -31,6 +31,7 @@
     HDCP_INIT_ASYNC,
     HDCP_SHUTDOWN_ASYNC,
     HDCP_ENCRYPT,
+    HDCP_ENCRYPT_NATIVE,
     HDCP_DECRYPT,
 };
 
@@ -108,6 +109,31 @@
         return err;
     }
 
+    virtual status_t encryptNative(
+            const sp<GraphicBuffer> &graphicBuffer,
+            size_t offset, size_t size, uint32_t streamCTR,
+            uint64_t *outInputCTR, void *outData) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IHDCP::getInterfaceDescriptor());
+        data.write(*graphicBuffer);
+        data.writeInt32(offset);
+        data.writeInt32(size);
+        data.writeInt32(streamCTR);
+        remote()->transact(HDCP_ENCRYPT_NATIVE, data, &reply);
+
+        status_t err = reply.readInt32();
+
+        if (err != OK) {
+            *outInputCTR = 0;
+            return err;
+        }
+
+        *outInputCTR = reply.readInt64();
+        reply.read(outData, size);
+
+        return err;
+    }
+
     virtual status_t decrypt(
             const void *inData, size_t size,
             uint32_t streamCTR, uint64_t inputCTR,
@@ -222,6 +248,34 @@
             return OK;
         }
 
+        case HDCP_ENCRYPT_NATIVE:
+        {
+            CHECK_INTERFACE(IHDCP, data, reply);
+
+            sp<GraphicBuffer> graphicBuffer = new GraphicBuffer();
+            data.read(*graphicBuffer);
+            size_t offset = data.readInt32();
+            size_t size = data.readInt32();
+            uint32_t streamCTR = data.readInt32();
+            void *outData = malloc(size);
+            uint64_t inputCTR;
+
+            status_t err = encryptNative(graphicBuffer, offset, size,
+                                         streamCTR, &inputCTR, outData);
+
+            reply->writeInt32(err);
+
+            if (err == OK) {
+                reply->writeInt64(inputCTR);
+                reply->write(outData, size);
+            }
+
+            free(outData);
+            outData = NULL;
+
+            return OK;
+        }
+
         case HDCP_DECRYPT:
         {
             size_t size = data.readInt32();
diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp
index 9199db6..9db5b1b 100644
--- a/media/libmedia/IMediaDeathNotifier.cpp
+++ b/media/libmedia/IMediaDeathNotifier.cpp
@@ -49,10 +49,10 @@
         } while (true);
 
         if (sDeathNotifier == NULL) {
-        sDeathNotifier = new DeathNotifier();
-    }
-    binder->linkToDeath(sDeathNotifier);
-    sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
+            sDeathNotifier = new DeathNotifier();
+        }
+        binder->linkToDeath(sDeathNotifier);
+        sMediaPlayerService = interface_cast<IMediaPlayerService>(binder);
     }
     ALOGE_IF(sMediaPlayerService == 0, "no media player service!?");
     return sMediaPlayerService;
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 59e538f..8fe5bb3 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -39,7 +39,6 @@
         mMaxTracks(maxTracks),
         mEasData(NULL),
         mEasJetFileLoc(NULL),
-        mAudioTrack(NULL),
         mTrackBufferSize(trackBufferSize)
 {
     ALOGV("JetPlayer constructor");
@@ -140,11 +139,10 @@
         free(mEasJetFileLoc);
         mEasJetFileLoc = NULL;
     }
-    if (mAudioTrack) {
+    if (mAudioTrack != 0) {
         mAudioTrack->stop();
         mAudioTrack->flush();
-        delete mAudioTrack;
-        mAudioTrack = NULL;
+        mAudioTrack.clear();
     }
     if (mAudioBuffer) {
         delete mAudioBuffer;
diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp
index e1e3348..93a4a4c 100644
--- a/media/libmedia/MediaScannerClient.cpp
+++ b/media/libmedia/MediaScannerClient.cpp
@@ -16,7 +16,7 @@
 
 #include <media/mediascanner.h>
 
-#include <utils/StringArray.h>
+#include "StringArray.h"
 
 #include "autodetect.h"
 #include "unicode/ucnv.h"
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index ee70ef7..e1e88ec 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -547,8 +547,8 @@
 void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftVolume,
         float rightVolume, int priority, int loop, float rate)
 {
-    AudioTrack* oldTrack;
-    AudioTrack* newTrack;
+    sp<AudioTrack> oldTrack;
+    sp<AudioTrack> newTrack;
     status_t status;
 
     { // scope for the lock
@@ -620,7 +620,7 @@
             ALOGE("Error creating AudioTrack");
             goto exit;
         }
-        ALOGV("setVolume %p", newTrack);
+        ALOGV("setVolume %p", newTrack.get());
         newTrack->setVolume(leftVolume, rightVolume);
         newTrack->setLoop(0, frameCount, loop);
 
@@ -643,11 +643,9 @@
     }
 
 exit:
-    ALOGV("delete oldTrack %p", oldTrack);
-    delete oldTrack;
+    ALOGV("delete oldTrack %p", oldTrack.get());
     if (status != NO_ERROR) {
-        delete newTrack;
-        mAudioTrack = NULL;
+        mAudioTrack.clear();
     }
 }
 
@@ -884,7 +882,7 @@
     }
     // do not call AudioTrack destructor with mLock held as it will wait for the AudioTrack
     // callback thread to exit which may need to execute process() and acquire the mLock.
-    delete mAudioTrack;
+    mAudioTrack.clear();
 }
 
 void SoundChannel::dump()
diff --git a/media/libmedia/StringArray.cpp b/media/libmedia/StringArray.cpp
new file mode 100644
index 0000000..5f5b57a
--- /dev/null
+++ b/media/libmedia/StringArray.cpp
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Sortable array of strings.  STL-ish, but STL-free.
+//  
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "StringArray.h"
+
+namespace android {
+
+//
+// An expanding array of strings.  Add, get, sort, delete.
+//
+StringArray::StringArray()
+    : mMax(0), mCurrent(0), mArray(NULL)
+{
+}
+
+StringArray:: ~StringArray() {
+    for (int i = 0; i < mCurrent; i++)
+        delete[] mArray[i];
+    delete[] mArray;
+}
+
+//
+// Add a string.  A copy of the string is made.
+//
+bool StringArray::push_back(const char* str) {
+    if (mCurrent >= mMax) {
+        char** tmp;
+
+        if (mMax == 0)
+            mMax = 16;      // initial storage
+        else
+            mMax *= 2;
+
+        tmp = new char*[mMax];
+        if (tmp == NULL)
+            return false;
+
+        memcpy(tmp, mArray, mCurrent * sizeof(char*));
+        delete[] mArray;
+        mArray = tmp;
+    }
+
+    int len = strlen(str);
+    mArray[mCurrent] = new char[len+1];
+    memcpy(mArray[mCurrent], str, len+1);
+    mCurrent++;
+
+    return true;
+}
+
+//
+// Delete an entry.
+//
+void StringArray::erase(int idx) {
+    if (idx < 0 || idx >= mCurrent)
+        return;
+    delete[] mArray[idx];
+    if (idx < mCurrent-1) {
+        memmove(&mArray[idx], &mArray[idx+1],
+                (mCurrent-1 - idx) * sizeof(char*));
+    }
+    mCurrent--;
+}
+
+//
+// Sort the array.
+//
+void StringArray::sort(int (*compare)(const void*, const void*)) {
+    qsort(mArray, mCurrent, sizeof(char*), compare);
+}
+
+//
+// Pass this to the sort routine to do an ascending alphabetical sort.
+//
+int StringArray::cmpAscendingAlpha(const void* pstr1, const void* pstr2) {
+    return strcmp(*(const char**)pstr1, *(const char**)pstr2);
+}
+
+//
+// Set entry N to specified string.
+// [should use operator[] here]
+//
+void StringArray::setEntry(int idx, const char* str) {
+    if (idx < 0 || idx >= mCurrent)
+        return;
+    delete[] mArray[idx];
+    int len = strlen(str);
+    mArray[idx] = new char[len+1];
+    memcpy(mArray[idx], str, len+1);
+}
+
+
+}; // namespace android
diff --git a/media/libmedia/StringArray.h b/media/libmedia/StringArray.h
new file mode 100644
index 0000000..ae47085
--- /dev/null
+++ b/media/libmedia/StringArray.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Sortable array of strings.  STL-ish, but STL-free.
+//  
+#ifndef _LIBS_MEDIA_STRING_ARRAY_H
+#define _LIBS_MEDIA_STRING_ARRAY_H
+
+#include <stdlib.h>
+#include <string.h>
+
+namespace android {
+
+//
+// An expanding array of strings.  Add, get, sort, delete.
+//
+class StringArray {
+public:
+    StringArray();
+    virtual ~StringArray();
+
+    //
+    // Add a string.  A copy of the string is made.
+    //
+    bool push_back(const char* str);
+
+    //
+    // Delete an entry.
+    //
+    void erase(int idx);
+
+    //
+    // Sort the array.
+    //
+    void sort(int (*compare)(const void*, const void*));
+
+    //
+    // Pass this to the sort routine to do an ascending alphabetical sort.
+    //
+    static int cmpAscendingAlpha(const void* pstr1, const void* pstr2);
+
+    //
+    // Get the #of items in the array.
+    //
+    inline int size(void) const { return mCurrent; }
+
+    //
+    // Return entry N.
+    // [should use operator[] here]
+    //
+    const char* getEntry(int idx) const {
+        return (unsigned(idx) >= unsigned(mCurrent)) ? NULL : mArray[idx];
+    }
+
+    //
+    // Set entry N to specified string.
+    // [should use operator[] here]
+    //
+    void setEntry(int idx, const char* str);
+
+private:
+    int     mMax;
+    int     mCurrent;
+    char**  mArray;
+};
+
+}; // namespace android
+
+#endif // _LIBS_MEDIA_STRING_ARRAY_H
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index f55b697..f9ad31d 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -803,7 +803,6 @@
     ALOGV("ToneGenerator constructor: streamType=%d, volume=%f", streamType, volume);
 
     mState = TONE_IDLE;
-    mpAudioTrack = NULL;
 
     if (AudioSystem::getOutputSamplingRate(&mSamplingRate, streamType) != NO_ERROR) {
         ALOGE("Unable to marshal AudioFlinger");
@@ -855,10 +854,10 @@
 ToneGenerator::~ToneGenerator() {
     ALOGV("ToneGenerator destructor");
 
-    if (mpAudioTrack != NULL) {
+    if (mpAudioTrack != 0) {
         stopTone();
-        ALOGV("Delete Track: %p", mpAudioTrack);
-        delete mpAudioTrack;
+        ALOGV("Delete Track: %p", mpAudioTrack.get());
+        mpAudioTrack.clear();
     }
 }
 
@@ -1047,14 +1046,9 @@
 ////////////////////////////////////////////////////////////////////////////////
 bool ToneGenerator::initAudioTrack() {
 
-    if (mpAudioTrack) {
-        delete mpAudioTrack;
-        mpAudioTrack = NULL;
-    }
-
     // Open audio track in mono, PCM 16bit, default sampling rate, default buffer size
     mpAudioTrack = new AudioTrack();
-    ALOGV("Create Track: %p", mpAudioTrack);
+    ALOGV("Create Track: %p", mpAudioTrack.get());
 
     mpAudioTrack->set(mStreamType,
                       0,    // sampleRate
@@ -1066,7 +1060,9 @@
                       this, // user
                       0,    // notificationFrames
                       0,    // sharedBuffer
-                      mThreadCanCallJava);
+                      mThreadCanCallJava,
+                      0,    // sessionId
+                      AudioTrack::TRANSFER_CALLBACK);
 
     if (mpAudioTrack->initCheck() != NO_ERROR) {
         ALOGE("AudioTrack->initCheck failed");
@@ -1081,12 +1077,10 @@
 
 initAudioTrack_exit:
 
+    ALOGV("Init failed: %p", mpAudioTrack.get());
+
     // Cleanup
-    if (mpAudioTrack != NULL) {
-        ALOGV("Delete Track I: %p", mpAudioTrack);
-        delete mpAudioTrack;
-        mpAudioTrack = NULL;
-    }
+    mpAudioTrack.clear();
 
     return false;
 }
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index d87bc7f..8f21632 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -34,6 +34,7 @@
     libsonivox                  \
     libstagefright              \
     libstagefright_foundation   \
+    libstagefright_httplive     \
     libstagefright_omx          \
     libstagefright_wfd          \
     libutils                    \
diff --git a/media/libmediaplayerservice/HDCP.cpp b/media/libmediaplayerservice/HDCP.cpp
index 469a02e..8a3188c 100644
--- a/media/libmediaplayerservice/HDCP.cpp
+++ b/media/libmediaplayerservice/HDCP.cpp
@@ -116,6 +116,24 @@
     return mHDCPModule->encrypt(inData, size, streamCTR, outInputCTR, outData);
 }
 
+status_t HDCP::encryptNative(
+        const sp<GraphicBuffer> &graphicBuffer,
+        size_t offset, size_t size, uint32_t streamCTR,
+        uint64_t *outInputCTR, void *outData) {
+    Mutex::Autolock autoLock(mLock);
+
+    CHECK(mIsEncryptionModule);
+
+    if (mHDCPModule == NULL) {
+        *outInputCTR = 0;
+
+        return NO_INIT;
+    }
+
+    return mHDCPModule->encryptNative(graphicBuffer->handle,
+                    offset, size, streamCTR, outInputCTR, outData);
+}
+
 status_t HDCP::decrypt(
         const void *inData, size_t size,
         uint32_t streamCTR, uint64_t outInputCTR, void *outData) {
diff --git a/media/libmediaplayerservice/HDCP.h b/media/libmediaplayerservice/HDCP.h
index 42e6467..c60c2e0 100644
--- a/media/libmediaplayerservice/HDCP.h
+++ b/media/libmediaplayerservice/HDCP.h
@@ -35,6 +35,11 @@
             const void *inData, size_t size, uint32_t streamCTR,
             uint64_t *outInputCTR, void *outData);
 
+    virtual status_t encryptNative(
+            const sp<GraphicBuffer> &graphicBuffer,
+            size_t offset, size_t size, uint32_t streamCTR,
+            uint64_t *outInputCTR, void *outData);
+
     virtual status_t decrypt(
             const void *inData, size_t size,
             uint32_t streamCTR, uint64_t outInputCTR, void *outData);
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index e600a3f..fa1ff36 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1295,8 +1295,6 @@
       mSessionId(sessionId),
       mFlags(AUDIO_OUTPUT_FLAG_NONE) {
     ALOGV("AudioOutput(%d)", sessionId);
-    mTrack = 0;
-    mRecycledTrack = 0;
     mStreamType = AUDIO_STREAM_MUSIC;
     mLeftVolume = 1.0;
     mRightVolume = 1.0;
@@ -1311,7 +1309,6 @@
 MediaPlayerService::AudioOutput::~AudioOutput()
 {
     close();
-    delete mRecycledTrack;
     delete mCallbackData;
 }
 
@@ -1422,7 +1419,7 @@
         }
     }
 
-    AudioTrack *t;
+    sp<AudioTrack> t;
     CallbackData *newcbd = NULL;
     if (mCallback != NULL) {
         newcbd = new CallbackData(this);
@@ -1453,13 +1450,12 @@
 
     if ((t == 0) || (t->initCheck() != NO_ERROR)) {
         ALOGE("Unable to create audio track");
-        delete t;
         delete newcbd;
         return NO_INIT;
     }
 
 
-    if (mRecycledTrack) {
+    if (mRecycledTrack != 0) {
         // check if the existing track can be reused as-is, or if a new track needs to be created.
 
         bool reuse = true;
@@ -1484,11 +1480,10 @@
             ALOGV("chaining to next output");
             close();
             mTrack = mRecycledTrack;
-            mRecycledTrack = NULL;
+            mRecycledTrack.clear();
             if (mCallbackData != NULL) {
                 mCallbackData->setOutput(this);
             }
-            delete t;
             delete newcbd;
             return OK;
         }
@@ -1499,8 +1494,7 @@
             mCallbackData->endTrackSwitch();
         }
         mRecycledTrack->flush();
-        delete mRecycledTrack;
-        mRecycledTrack = NULL;
+        mRecycledTrack.clear();
         delete mCallbackData;
         mCallbackData = NULL;
         close();
@@ -1533,7 +1527,7 @@
     if (mCallbackData != NULL) {
         mCallbackData->endTrackSwitch();
     }
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
         mTrack->setAuxEffectSendLevel(mSendLevel);
         mTrack->start();
@@ -1555,7 +1549,7 @@
         mNextOutput->mCallbackData = mCallbackData;
         mCallbackData = NULL;
         mNextOutput->mRecycledTrack = mTrack;
-        mTrack = NULL;
+        mTrack.clear();
         mNextOutput->mSampleRateHz = mSampleRateHz;
         mNextOutput->mMsecsPerFrame = mMsecsPerFrame;
         mNextOutput->mBytesWritten = mBytesWritten;
@@ -1568,7 +1562,7 @@
     LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
 
     //ALOGV("write(%p, %u)", buffer, size);
-    if (mTrack) {
+    if (mTrack != 0) {
         ssize_t ret = mTrack->write(buffer, size);
         mBytesWritten += ret;
         return ret;
@@ -1579,26 +1573,25 @@
 void MediaPlayerService::AudioOutput::stop()
 {
     ALOGV("stop");
-    if (mTrack) mTrack->stop();
+    if (mTrack != 0) mTrack->stop();
 }
 
 void MediaPlayerService::AudioOutput::flush()
 {
     ALOGV("flush");
-    if (mTrack) mTrack->flush();
+    if (mTrack != 0) mTrack->flush();
 }
 
 void MediaPlayerService::AudioOutput::pause()
 {
     ALOGV("pause");
-    if (mTrack) mTrack->pause();
+    if (mTrack != 0) mTrack->pause();
 }
 
 void MediaPlayerService::AudioOutput::close()
 {
     ALOGV("close");
-    delete mTrack;
-    mTrack = 0;
+    mTrack.clear();
 }
 
 void MediaPlayerService::AudioOutput::setVolume(float left, float right)
@@ -1606,7 +1599,7 @@
     ALOGV("setVolume(%f, %f)", left, right);
     mLeftVolume = left;
     mRightVolume = right;
-    if (mTrack) {
+    if (mTrack != 0) {
         mTrack->setVolume(left, right);
     }
 }
@@ -1615,7 +1608,7 @@
 {
     ALOGV("setPlaybackRatePermille(%d)", ratePermille);
     status_t res = NO_ERROR;
-    if (mTrack) {
+    if (mTrack != 0) {
         res = mTrack->setSampleRate(ratePermille * mSampleRateHz / 1000);
     } else {
         res = NO_INIT;
@@ -1631,7 +1624,7 @@
 {
     ALOGV("setAuxEffectSendLevel(%f)", level);
     mSendLevel = level;
-    if (mTrack) {
+    if (mTrack != 0) {
         return mTrack->setAuxEffectSendLevel(level);
     }
     return NO_ERROR;
@@ -1641,7 +1634,7 @@
 {
     ALOGV("attachAuxEffect(%d)", effectId);
     mAuxEffectId = effectId;
-    if (mTrack) {
+    if (mTrack != 0) {
         return mTrack->attachAuxEffect(effectId);
     }
     return NO_ERROR;
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index b33805d..e586156 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -78,7 +78,7 @@
                                 AudioOutput(int sessionId);
         virtual                 ~AudioOutput();
 
-        virtual bool            ready() const { return mTrack != NULL; }
+        virtual bool            ready() const { return mTrack != 0; }
         virtual bool            realtime() const { return true; }
         virtual ssize_t         bufferSize() const;
         virtual ssize_t         frameCount() const;
@@ -120,8 +120,8 @@
         static void             CallbackWrapper(
                 int event, void *me, void *info);
 
-        AudioTrack*             mTrack;
-        AudioTrack*             mRecycledTrack;
+        sp<AudioTrack>          mTrack;
+        sp<AudioTrack>          mRecycledTrack;
         sp<AudioOutput>         mNextOutput;
         AudioCallback           mCallback;
         void *                  mCallbackCookie;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index 655ee55..c8901ce 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -20,7 +20,6 @@
 
 #include "HTTPLiveSource.h"
 
-#include "ATSParser.h"
 #include "AnotherPacketSource.h"
 #include "LiveDataSource.h"
 #include "LiveSession.h"
@@ -62,7 +61,10 @@
 NuPlayer::HTTPLiveSource::~HTTPLiveSource() {
     if (mLiveSession != NULL) {
         mLiveSession->disconnect();
+        mLiveSession.clear();
+
         mLiveLooper->stop();
+        mLiveLooper.clear();
     }
 }
 
@@ -76,112 +78,42 @@
     mLiveSession = new LiveSession(
             notify,
             (mFlags & kFlagIncognito) ? LiveSession::kFlagIncognito : 0,
-            mUIDValid, mUID);
+            mUIDValid,
+            mUID);
 
     mLiveLooper->registerHandler(mLiveSession);
 
-    mLiveSession->connect(
+    mLiveSession->connectAsync(
             mURL.c_str(), mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
-
-    mTSParser = new ATSParser;
 }
 
 void NuPlayer::HTTPLiveSource::start() {
 }
 
-sp<MetaData> NuPlayer::HTTPLiveSource::getFormatMeta(bool audio) {
-    ATSParser::SourceType type =
-        audio ? ATSParser::AUDIO : ATSParser::VIDEO;
+sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) {
+    sp<AMessage> format;
+    status_t err = mLiveSession->getStreamFormat(
+            audio ? LiveSession::STREAMTYPE_AUDIO
+                  : LiveSession::STREAMTYPE_VIDEO,
+            &format);
 
-    sp<AnotherPacketSource> source =
-        static_cast<AnotherPacketSource *>(mTSParser->getSource(type).get());
-
-    if (source == NULL) {
+    if (err != OK) {
         return NULL;
     }
 
-    return source->getFormat();
+    return format;
 }
 
 status_t NuPlayer::HTTPLiveSource::feedMoreTSData() {
-    if (mFinalResult != OK) {
-        return mFinalResult;
-    }
-
-    sp<LiveDataSource> source =
-        static_cast<LiveDataSource *>(mLiveSession->getDataSource().get());
-
-    for (int32_t i = 0; i < 50; ++i) {
-        char buffer[188];
-        ssize_t n = source->readAtNonBlocking(mOffset, buffer, sizeof(buffer));
-
-        if (n == -EWOULDBLOCK) {
-            break;
-        } else if (n < 0) {
-            if (n != ERROR_END_OF_STREAM) {
-                ALOGI("input data EOS reached, error %ld", n);
-            } else {
-                ALOGI("input data EOS reached.");
-            }
-            mTSParser->signalEOS(n);
-            mFinalResult = n;
-            break;
-        } else {
-            if (buffer[0] == 0x00) {
-                // XXX legacy
-
-                uint8_t type = buffer[1];
-
-                sp<AMessage> extra = new AMessage;
-
-                if (type & 2) {
-                    int64_t mediaTimeUs;
-                    memcpy(&mediaTimeUs, &buffer[2], sizeof(mediaTimeUs));
-
-                    extra->setInt64(IStreamListener::kKeyMediaTimeUs, mediaTimeUs);
-                }
-
-                mTSParser->signalDiscontinuity(
-                        ((type & 1) == 0)
-                            ? ATSParser::DISCONTINUITY_SEEK
-                            : ATSParser::DISCONTINUITY_FORMATCHANGE,
-                        extra);
-            } else {
-                status_t err = mTSParser->feedTSPacket(buffer, sizeof(buffer));
-
-                if (err != OK) {
-                    ALOGE("TS Parser returned error %d", err);
-                    mTSParser->signalEOS(err);
-                    mFinalResult = err;
-                    break;
-                }
-            }
-
-            mOffset += n;
-        }
-    }
-
     return OK;
 }
 
 status_t NuPlayer::HTTPLiveSource::dequeueAccessUnit(
         bool audio, sp<ABuffer> *accessUnit) {
-    ATSParser::SourceType type =
-        audio ? ATSParser::AUDIO : ATSParser::VIDEO;
-
-    sp<AnotherPacketSource> source =
-        static_cast<AnotherPacketSource *>(mTSParser->getSource(type).get());
-
-    if (source == NULL) {
-        return -EWOULDBLOCK;
-    }
-
-    status_t finalResult;
-    if (!source->hasBufferAvailable(&finalResult)) {
-        return finalResult == OK ? -EWOULDBLOCK : finalResult;
-    }
-
-    return source->dequeueAccessUnit(accessUnit);
+    return mLiveSession->dequeueAccessUnit(
+            audio ? LiveSession::STREAMTYPE_AUDIO
+                  : LiveSession::STREAMTYPE_VIDEO,
+            accessUnit);
 }
 
 status_t NuPlayer::HTTPLiveSource::getDuration(int64_t *durationUs) {
@@ -189,15 +121,7 @@
 }
 
 status_t NuPlayer::HTTPLiveSource::seekTo(int64_t seekTimeUs) {
-    // We need to make sure we're not seeking until we have seen the very first
-    // PTS timestamp in the whole stream (from the beginning of the stream).
-    while (!mTSParser->PTSTimeDeltaEstablished() && feedMoreTSData() == OK) {
-        usleep(100000);
-    }
-
-    mLiveSession->seekTo(seekTimeUs);
-
-    return OK;
+    return mLiveSession->seekTo(seekTimeUs);
 }
 
 void NuPlayer::HTTPLiveSource::onMessageReceived(const sp<AMessage> &msg) {
@@ -249,6 +173,32 @@
             break;
         }
 
+        case LiveSession::kWhatStreamsChanged:
+        {
+            uint32_t changedMask;
+            CHECK(msg->findInt32(
+                        "changedMask", (int32_t *)&changedMask));
+
+            bool audio = changedMask & LiveSession::STREAMTYPE_AUDIO;
+            bool video = changedMask & LiveSession::STREAMTYPE_VIDEO;
+
+            sp<AMessage> reply;
+            CHECK(msg->findMessage("reply", &reply));
+
+            sp<AMessage> notify = dupNotify();
+            notify->setInt32("what", kWhatQueueDecoderShutdown);
+            notify->setInt32("audio", audio);
+            notify->setInt32("video", video);
+            notify->setMessage("reply", reply);
+            notify->post();
+            break;
+        }
+
+        case LiveSession::kWhatError:
+        {
+            break;
+        }
+
         default:
             TRESPASS();
     }
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index 067d1da..aa9434b 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -23,7 +23,6 @@
 
 namespace android {
 
-struct ATSParser;
 struct LiveSession;
 
 struct NuPlayer::HTTPLiveSource : public NuPlayer::Source {
@@ -37,18 +36,16 @@
     virtual void prepareAsync();
     virtual void start();
 
-    virtual status_t feedMoreTSData();
-
     virtual status_t dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit);
+    virtual sp<AMessage> getFormat(bool audio);
 
+    virtual status_t feedMoreTSData();
     virtual status_t getDuration(int64_t *durationUs);
     virtual status_t seekTo(int64_t seekTimeUs);
 
 protected:
     virtual ~HTTPLiveSource();
 
-    virtual sp<MetaData> getFormatMeta(bool audio);
-
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
 private:
@@ -70,7 +67,6 @@
     off64_t mOffset;
     sp<ALooper> mLiveLooper;
     sp<LiveSession> mLiveSession;
-    sp<ATSParser> mTSParser;
 
     void onSessionNotify(const sp<AMessage> &msg);
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index b89b1c8..7e81035 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -89,6 +89,38 @@
     DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction);
 };
 
+struct NuPlayer::ShutdownDecoderAction : public Action {
+    ShutdownDecoderAction(bool audio, bool video)
+        : mAudio(audio),
+          mVideo(video) {
+    }
+
+    virtual void execute(NuPlayer *player) {
+        player->performDecoderShutdown(mAudio, mVideo);
+    }
+
+private:
+    bool mAudio;
+    bool mVideo;
+
+    DISALLOW_EVIL_CONSTRUCTORS(ShutdownDecoderAction);
+};
+
+struct NuPlayer::PostMessageAction : public Action {
+    PostMessageAction(const sp<AMessage> &msg)
+        : mMessage(msg) {
+    }
+
+    virtual void execute(NuPlayer *) {
+        mMessage->post();
+    }
+
+private:
+    sp<AMessage> mMessage;
+
+    DISALLOW_EVIL_CONSTRUCTORS(PostMessageAction);
+};
+
 // Use this if there's no state necessary to save in order to execute
 // the action.
 struct NuPlayer::SimpleAction : public Action {
@@ -335,7 +367,8 @@
             ALOGV("kWhatSetVideoNativeWindow");
 
             mDeferredActions.push_back(
-                    new SimpleAction(&NuPlayer::performDecoderShutdown));
+                    new ShutdownDecoderAction(
+                        false /* audio */, true /* video */));
 
             sp<RefBase> obj;
             CHECK(msg->findObject("native-window", &obj));
@@ -712,7 +745,8 @@
             ALOGV("kWhatReset");
 
             mDeferredActions.push_back(
-                    new SimpleAction(&NuPlayer::performDecoderShutdown));
+                    new ShutdownDecoderAction(
+                        true /* audio */, true /* video */));
 
             mDeferredActions.push_back(
                     new SimpleAction(&NuPlayer::performReset));
@@ -1023,6 +1057,9 @@
 }
 
 void NuPlayer::flushDecoder(bool audio, bool needShutdown) {
+    ALOGV("[%s] flushDecoder needShutdown=%d",
+          audio ? "audio" : "video", needShutdown);
+
     if ((audio && mAudioDecoder == NULL) || (!audio && mVideoDecoder == NULL)) {
         ALOGI("flushDecoder %s without decoder present",
              audio ? "audio" : "video");
@@ -1173,20 +1210,29 @@
     }
 }
 
-void NuPlayer::performDecoderShutdown() {
-    ALOGV("performDecoderShutdown");
+void NuPlayer::performDecoderShutdown(bool audio, bool video) {
+    ALOGV("performDecoderShutdown audio=%d, video=%d", audio, video);
 
-    if (mAudioDecoder == NULL && mVideoDecoder == NULL) {
+    if ((!audio || mAudioDecoder == NULL)
+            && (!video || mVideoDecoder == NULL)) {
         return;
     }
 
     mTimeDiscontinuityPending = true;
 
-    if (mAudioDecoder != NULL) {
+    if (mFlushingAudio == NONE && (!audio || mAudioDecoder == NULL)) {
+        mFlushingAudio = FLUSHED;
+    }
+
+    if (mFlushingVideo == NONE && (!video || mVideoDecoder == NULL)) {
+        mFlushingVideo = FLUSHED;
+    }
+
+    if (audio && mAudioDecoder != NULL) {
         flushDecoder(true /* audio */, true /* needShutdown */);
     }
 
-    if (mVideoDecoder != NULL) {
+    if (video && mVideoDecoder != NULL) {
         flushDecoder(false /* audio */, true /* needShutdown */);
     }
 }
@@ -1322,6 +1368,19 @@
             break;
         }
 
+        case Source::kWhatQueueDecoderShutdown:
+        {
+            int32_t audio, video;
+            CHECK(msg->findInt32("audio", &audio));
+            CHECK(msg->findInt32("video", &video));
+
+            sp<AMessage> reply;
+            CHECK(msg->findMessage("reply", &reply));
+
+            queueDecoderShutdown(audio, video, reply);
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -1355,4 +1414,19 @@
     TRESPASS();
 }
 
+void NuPlayer::queueDecoderShutdown(
+        bool audio, bool video, const sp<AMessage> &reply) {
+    ALOGI("queueDecoderShutdown audio=%d, video=%d", audio, video);
+
+    mDeferredActions.push_back(
+            new ShutdownDecoderAction(audio, video));
+
+    mDeferredActions.push_back(
+            new SimpleAction(&NuPlayer::performScanSources));
+
+    mDeferredActions.push_back(new PostMessageAction(reply));
+
+    processDeferredActions();
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 50d0462..8b6c8c1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -80,6 +80,8 @@
     struct Action;
     struct SeekAction;
     struct SetSurfaceAction;
+    struct ShutdownDecoderAction;
+    struct PostMessageAction;
     struct SimpleAction;
 
     enum {
@@ -172,13 +174,16 @@
 
     void performSeek(int64_t seekTimeUs);
     void performDecoderFlush();
-    void performDecoderShutdown();
+    void performDecoderShutdown(bool audio, bool video);
     void performReset();
     void performScanSources();
     void performSetSurface(const sp<NativeWindowWrapper> &wrapper);
 
     void onSourceNotify(const sp<AMessage> &msg);
 
+    void queueDecoderShutdown(
+            bool audio, bool video, const sp<AMessage> &reply);
+
     DISALLOW_EVIL_CONSTRUCTORS(NuPlayer);
 };
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 404b56f..b543d9d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -95,11 +95,11 @@
 }
 
 void NuPlayer::Renderer::signalTimeDiscontinuity() {
-    CHECK(mAudioQueue.empty());
-    CHECK(mVideoQueue.empty());
+    // CHECK(mAudioQueue.empty());
+    // CHECK(mVideoQueue.empty());
     mAnchorTimeMediaUs = -1;
     mAnchorTimeRealUs = -1;
-    mSyncQueues = mHasAudio && mHasVideo;
+    mSyncQueues = false;
 }
 
 void NuPlayer::Renderer::pause() {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index 1cbf575..81ffd21 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -42,6 +42,7 @@
         kWhatVideoSizeChanged,
         kWhatBufferingStart,
         kWhatBufferingEnd,
+        kWhatQueueDecoderShutdown,
     };
 
     // The provides message is used to notify the player about various
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index cf41cf2..bf650b4 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -359,6 +359,7 @@
       mNode(NULL),
       mSentFormat(false),
       mIsEncoder(false),
+      mUseMetadataOnEncoderOutput(false),
       mShutdownInProgress(false),
       mEncoderDelay(0),
       mEncoderPadding(0),
@@ -483,7 +484,8 @@
                         ? OMXCodec::kRequiresAllocateBufferOnInputPorts
                         : OMXCodec::kRequiresAllocateBufferOnOutputPorts;
 
-                if (portIndex == kPortIndexInput && (mFlags & kFlagIsSecure)) {
+                if ((portIndex == kPortIndexInput && (mFlags & kFlagIsSecure))
+                        || mUseMetadataOnEncoderOutput) {
                     mem.clear();
 
                     void *ptr;
@@ -491,7 +493,10 @@
                             mNode, portIndex, def.nBufferSize, &info.mBufferID,
                             &ptr);
 
-                    info.mData = new ABuffer(ptr, def.nBufferSize);
+                    int32_t bufSize = mUseMetadataOnEncoderOutput ?
+                            (4 + sizeof(buffer_handle_t)) : def.nBufferSize;
+
+                    info.mData = new ABuffer(ptr, bufSize);
                 } else if (mQuirks & requiresAllocateBufferBit) {
                     err = mOMX->allocateBufferWithBackup(
                             mNode, portIndex, mem, &info.mBufferID);
@@ -912,14 +917,14 @@
         err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE);
 
         if (err != OK) {
-            ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
-                  mComponentName.c_str(), err);
+              ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
+                    mComponentName.c_str(), err);
 
-            return err;
-        }
-    }
+              return err;
+          }
+      }
 
-    int32_t prependSPSPPS;
+    int32_t prependSPSPPS = 0;
     if (encoder
             && msg->findInt32("prepend-sps-pps-to-idr-frames", &prependSPSPPS)
             && prependSPSPPS != 0) {
@@ -946,7 +951,27 @@
         }
     }
 
-    if (!strncasecmp(mime, "video/", 6)) {
+    // Only enable metadata mode on encoder output if encoder can prepend
+    // sps/pps to idr frames, since in metadata mode the bitstream is in an
+    // opaque handle, to which we don't have access.
+    int32_t video = !strncasecmp(mime, "video/", 6);
+    if (encoder && video) {
+        OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
+            && msg->findInt32("store-metadata-in-buffers-output", &storeMeta)
+            && storeMeta != 0);
+
+        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable);
+
+        if (err != OK) {
+            ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
+                mComponentName.c_str(), err);
+            mUseMetadataOnEncoderOutput = 0;
+        } else {
+            mUseMetadataOnEncoderOutput = enable;
+        }
+    }
+
+    if (video) {
         if (encoder) {
             err = setupVideoEncoder(mime, msg);
         } else {
@@ -2321,10 +2346,15 @@
                                 &params, sizeof(params)),
                              (status_t)OK);
 
+                    CHECK_GT(params.nChannels, 0);
                     CHECK(params.nChannels == 1 || params.bInterleaved);
                     CHECK_EQ(params.nBitPerSample, 16u);
-                    CHECK_EQ((int)params.eNumData, (int)OMX_NumericalDataSigned);
-                    CHECK_EQ((int)params.ePCMMode, (int)OMX_AUDIO_PCMModeLinear);
+
+                    CHECK_EQ((int)params.eNumData,
+                             (int)OMX_NumericalDataSigned);
+
+                    CHECK_EQ((int)params.ePCMMode,
+                             (int)OMX_AUDIO_PCMModeLinear);
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
                     notify->setInt32("channel-count", params.nChannels);
@@ -2334,11 +2364,14 @@
                         if (mSkipCutBuffer != NULL) {
                             size_t prevbufsize = mSkipCutBuffer->size();
                             if (prevbufsize != 0) {
-                                ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbufsize);
+                                ALOGW("Replacing SkipCutBuffer holding %d "
+                                      "bytes",
+                                      prevbufsize);
                             }
                         }
-                        mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay * frameSize,
-                                                           mEncoderPadding * frameSize);
+                        mSkipCutBuffer = new SkipCutBuffer(
+                                mEncoderDelay * frameSize,
+                                mEncoderPadding * frameSize);
                     }
 
                     if (mChannelMaskPresent) {
@@ -3062,7 +3095,15 @@
                 mCodec->sendFormatChange(reply);
             }
 
-            info->mData->setRange(rangeOffset, rangeLength);
+            if (mCodec->mUseMetadataOnEncoderOutput) {
+                native_handle_t* handle =
+                        *(native_handle_t**)(info->mData->data() + 4);
+                info->mData->meta()->setPointer("handle", handle);
+                info->mData->meta()->setInt32("rangeOffset", rangeOffset);
+                info->mData->meta()->setInt32("rangeLength", rangeLength);
+            } else {
+                info->mData->setRange(rangeOffset, rangeLength);
+            }
 #if 0
             if (mCodec->mNativeWindow == NULL) {
                 if (IsIDR(info->mData)) {
@@ -3220,6 +3261,7 @@
     mCodec->mOMX.clear();
     mCodec->mQuirks = 0;
     mCodec->mFlags = 0;
+    mCodec->mUseMetadataOnEncoderOutput = 0;
     mCodec->mComponentName.clear();
 }
 
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index acc3abf..9544dbc 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -69,7 +69,6 @@
 LOCAL_SHARED_LIBRARIES := \
         libbinder \
         libcamera_client \
-        libcrypto \
         libcutils \
         libdl \
         libdrmframework \
@@ -97,7 +96,6 @@
         libvpx \
         libwebm \
         libstagefright_mpeg2ts \
-        libstagefright_httplive \
         libstagefright_id3 \
         libFLAC \
 
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 4208019..92efae8 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -36,8 +36,7 @@
         const sp<MediaPlayerBase::AudioSink> &audioSink,
         bool allowDeepBuffering,
         AwesomePlayer *observer)
-    : mAudioTrack(NULL),
-      mInputBuffer(NULL),
+    : mInputBuffer(NULL),
       mSampleRate(0),
       mLatencyUs(0),
       mFrameSize(0),
@@ -166,8 +165,7 @@
                 0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
 
         if ((err = mAudioTrack->initCheck()) != OK) {
-            delete mAudioTrack;
-            mAudioTrack = NULL;
+            mAudioTrack.clear();
 
             if (mFirstBuffer != NULL) {
                 mFirstBuffer->release();
@@ -235,8 +233,7 @@
     } else {
         mAudioTrack->stop();
 
-        delete mAudioTrack;
-        mAudioTrack = NULL;
+        mAudioTrack.clear();
     }
 
     // Make sure to release any buffer we hold onto so that the
@@ -297,7 +294,7 @@
 status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
     if (mAudioSink.get() != NULL) {
         return mAudioSink->setPlaybackRatePermille(ratePermille);
-    } else if (mAudioTrack != NULL){
+    } else if (mAudioTrack != 0){
         return mAudioTrack->setSampleRate(ratePermille * mSampleRate / 1000);
     } else {
         return NO_INIT;
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 3cf4d5c..bdd842f 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -49,8 +49,7 @@
 
 AudioSource::AudioSource(
         audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount)
-    : mRecord(NULL),
-      mStarted(false),
+    : mStarted(false),
       mSampleRate(sampleRate),
       mPrevSampleTimeUs(0),
       mNumFramesReceived(0),
@@ -91,9 +90,6 @@
     if (mStarted) {
         reset();
     }
-
-    delete mRecord;
-    mRecord = NULL;
 }
 
 status_t AudioSource::initCheck() const {
@@ -122,8 +118,7 @@
     if (err == OK) {
         mStarted = true;
     } else {
-        delete mRecord;
-        mRecord = NULL;
+        mRecord.clear();
     }
 
 
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index bd28118..6c197e2 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -597,7 +597,7 @@
 
 bool AwesomePlayer::getBitrate(int64_t *bitrate) {
     off64_t size;
-    if (mDurationUs >= 0 && mCachedSource != NULL
+    if (mDurationUs > 0 && mCachedSource != NULL
             && mCachedSource->getSize(&size) == OK) {
         *bitrate = size * 8000000ll / mDurationUs;  // in bits/sec
         return true;
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index 145869e..42a9c7a 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -341,6 +341,7 @@
       mDataSource(source),
       mInitCheck(NO_INIT),
       mHasVideo(false),
+      mHeaderTimescale(0),
       mFirstTrack(NULL),
       mLastTrack(NULL),
       mFileMetaData(new MetaData),
@@ -817,6 +818,7 @@
         case FOURCC('i', 'l', 's', 't'):
         case FOURCC('s', 'i', 'n', 'f'):
         case FOURCC('s', 'c', 'h', 'i'):
+        case FOURCC('e', 'd', 't', 's'):
         {
             if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
                 ALOGV("sampleTable chunk is %d bytes long.", (size_t)chunk_size);
@@ -904,6 +906,68 @@
             break;
         }
 
+        case FOURCC('e', 'l', 's', 't'):
+        {
+            // See 14496-12 8.6.6
+            uint8_t version;
+            if (mDataSource->readAt(data_offset, &version, 1) < 1) {
+                return ERROR_IO;
+            }
+
+            uint32_t entry_count;
+            if (!mDataSource->getUInt32(data_offset + 4, &entry_count)) {
+                return ERROR_IO;
+            }
+
+            if (entry_count != 1) {
+                // we only support a single entry at the moment, for gapless playback
+                ALOGW("ignoring edit list with %d entries", entry_count);
+            } else if (mHeaderTimescale == 0) {
+                ALOGW("ignoring edit list because timescale is 0");
+            } else {
+                off64_t entriesoffset = data_offset + 8;
+                uint64_t segment_duration;
+                int64_t media_time;
+
+                if (version == 1) {
+                    if (!mDataSource->getUInt64(entriesoffset, &segment_duration) ||
+                            !mDataSource->getUInt64(entriesoffset + 8, (uint64_t*)&media_time)) {
+                        return ERROR_IO;
+                    }
+                } else if (version == 0) {
+                    uint32_t sd;
+                    int32_t mt;
+                    if (!mDataSource->getUInt32(entriesoffset, &sd) ||
+                            !mDataSource->getUInt32(entriesoffset + 4, (uint32_t*)&mt)) {
+                        return ERROR_IO;
+                    }
+                    segment_duration = sd;
+                    media_time = mt;
+                } else {
+                    return ERROR_IO;
+                }
+
+                uint64_t halfscale = mHeaderTimescale / 2;
+                segment_duration = (segment_duration * 1000000 + halfscale)/ mHeaderTimescale;
+                media_time = (media_time * 1000000 + halfscale) / mHeaderTimescale;
+
+                int64_t duration;
+                int32_t samplerate;
+                if (mLastTrack->meta->findInt64(kKeyDuration, &duration) &&
+                        mLastTrack->meta->findInt32(kKeySampleRate, &samplerate)) {
+
+                    int64_t delay = (media_time  * samplerate + 500000) / 1000000;
+                    mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
+
+                    int64_t paddingus = duration - (segment_duration + media_time);
+                    int64_t paddingsamples = (paddingus * samplerate + 500000) / 1000000;
+                    mLastTrack->meta->setInt32(kKeyEncoderPadding, paddingsamples);
+                }
+            }
+            *offset += chunk_size;
+            break;
+        }
+
         case FOURCC('f', 'r', 'm', 'a'):
         {
             uint32_t original_fourcc;
@@ -1564,24 +1628,26 @@
 
         case FOURCC('m', 'v', 'h', 'd'):
         {
-            if (chunk_data_size < 12) {
+            if (chunk_data_size < 24) {
                 return ERROR_MALFORMED;
             }
 
-            uint8_t header[12];
+            uint8_t header[24];
             if (mDataSource->readAt(
                         data_offset, header, sizeof(header))
                     < (ssize_t)sizeof(header)) {
                 return ERROR_IO;
             }
 
-            int64_t creationTime;
+            uint64_t creationTime;
             if (header[0] == 1) {
                 creationTime = U64_AT(&header[4]);
+                mHeaderTimescale = U32_AT(&header[20]);
             } else if (header[0] != 0) {
                 return ERROR_MALFORMED;
             } else {
                 creationTime = U32_AT(&header[4]);
+                mHeaderTimescale = U32_AT(&header[12]);
             }
 
             String8 s;
diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp
index 409038a..71b6569 100644
--- a/media/libstagefright/SurfaceMediaSource.cpp
+++ b/media/libstagefright/SurfaceMediaSource.cpp
@@ -305,8 +305,9 @@
 
             // First time seeing the buffer?  Added it to the SMS slot
             if (item.mGraphicBuffer != NULL) {
-                mBufferSlot[item.mBuf] = item.mGraphicBuffer;
+                mSlots[item.mBuf].mGraphicBuffer = item.mGraphicBuffer;
             }
+            mSlots[item.mBuf].mFrameNumber = item.mFrameNumber;
 
             // check for the timing of this buffer
             if (mNumFramesReceived == 0 && !mUseAbsoluteTimestamps) {
@@ -315,7 +316,8 @@
                 if (mStartTimeNs > 0) {
                     if (item.mTimestamp < mStartTimeNs) {
                         // This frame predates start of record, discard
-                        mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY,
+                        mBufferQueue->releaseBuffer(
+                                item.mBuf, item.mFrameNumber, EGL_NO_DISPLAY,
                                 EGL_NO_SYNC_KHR, Fence::NO_FENCE);
                         continue;
                     }
@@ -345,17 +347,18 @@
 
     // First time seeing the buffer?  Added it to the SMS slot
     if (item.mGraphicBuffer != NULL) {
-        mBufferSlot[mCurrentSlot] = item.mGraphicBuffer;
+        mSlots[item.mBuf].mGraphicBuffer = item.mGraphicBuffer;
     }
+    mSlots[item.mBuf].mFrameNumber = item.mFrameNumber;
 
-    mCurrentBuffers.push_back(mBufferSlot[mCurrentSlot]);
+    mCurrentBuffers.push_back(mSlots[mCurrentSlot].mGraphicBuffer);
     int64_t prevTimeStamp = mCurrentTimestamp;
     mCurrentTimestamp = item.mTimestamp;
 
     mNumFramesEncoded++;
     // Pass the data to the MediaBuffer. Pass in only the metadata
 
-    passMetadataBuffer(buffer, mBufferSlot[mCurrentSlot]->handle);
+    passMetadataBuffer(buffer, mSlots[mCurrentSlot].mGraphicBuffer->handle);
 
     (*buffer)->setObserver(this);
     (*buffer)->add_ref();
@@ -405,15 +408,16 @@
     }
 
     for (int id = 0; id < BufferQueue::NUM_BUFFER_SLOTS; id++) {
-        if (mBufferSlot[id] == NULL) {
+        if (mSlots[id].mGraphicBuffer == NULL) {
             continue;
         }
 
-        if (bufferHandle == mBufferSlot[id]->handle) {
+        if (bufferHandle == mSlots[id].mGraphicBuffer->handle) {
             ALOGV("Slot %d returned, matches handle = %p", id,
-                    mBufferSlot[id]->handle);
+                    mSlots[id].mGraphicBuffer->handle);
 
-            mBufferQueue->releaseBuffer(id, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
+            mBufferQueue->releaseBuffer(id, mSlots[id].mFrameNumber,
+                                        EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
                     Fence::NO_FENCE);
 
             buffer->setObserver(0);
@@ -469,7 +473,7 @@
     mFrameAvailableCondition.signal();
 
     for (int i = 0; i < BufferQueue::NUM_BUFFER_SLOTS; i++) {
-       mBufferSlot[i] = 0;
+       mSlots[i].mGraphicBuffer = 0;
     }
 }
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index cf50dc9..1b20cbb 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -604,6 +604,9 @@
     // To make the codec behave the same before and after a reset, we need to invalidate the
     // streaminfo struct. This does that:
     mStreamInfo->sampleRate = 0;
+
+    mSignalledError = false;
+    mOutputPortSettingsChange = NONE;
 }
 
 void SoftAAC2::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
diff --git a/media/libstagefright/codecs/aacenc/SampleCode/Android.mk b/media/libstagefright/codecs/aacenc/SampleCode/Android.mk
index 01016e7..d06dcf6 100644
--- a/media/libstagefright/codecs/aacenc/SampleCode/Android.mk
+++ b/media/libstagefright/codecs/aacenc/SampleCode/Android.mk
@@ -5,7 +5,7 @@
     AAC_E_SAMPLES.c \
     ../../common/cmnMemory.c
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE := AACEncTest
 
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index 4d4212f..3320688 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -457,6 +457,11 @@
     }
 }
 
+void SoftAMR::onReset() {
+    mSignalledError = false;
+    mOutputPortSettingsChange = NONE;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.h b/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
index 9a596e5..758d6ac 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.h
@@ -40,6 +40,7 @@
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
     virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
 
 private:
     enum {
diff --git a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
index db34d08..c203f77 100644
--- a/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
+++ b/media/libstagefright/codecs/amrwbenc/SampleCode/Android.mk
@@ -5,7 +5,7 @@
     AMRWB_E_SAMPLE.c \
     ../../common/cmnMemory.c
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE := AMRWBEncTest
 
 LOCAL_ARM_MODE := arm
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index 020cc0a..fb2a430 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -48,42 +48,32 @@
     { OMX_VIDEO_H263ProfileISWV2,    OMX_VIDEO_H263Level45 },
 };
 
-template<class T>
-static void InitOMXParams(T *params) {
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 SoftMPEG4::SoftMPEG4(
         const char *name,
+        const char *componentRole,
+        OMX_VIDEO_CODINGTYPE codingType,
+        const CodecProfileLevel *profileLevels,
+        size_t numProfileLevels,
         const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component)
-    : SimpleSoftOMXComponent(name, callbacks, appData, component),
-      mMode(MODE_MPEG4),
+    : SoftVideoDecoderOMXComponent(
+            name, componentRole, codingType, profileLevels, numProfileLevels,
+            352 /* width */, 288 /* height */, callbacks, appData, component),
+      mMode(codingType == OMX_VIDEO_CodingH263 ? MODE_H263 : MODE_MPEG4),
       mHandle(new tagvideoDecControls),
       mInputBufferCount(0),
-      mWidth(352),
-      mHeight(288),
-      mCropLeft(0),
-      mCropTop(0),
-      mCropRight(mWidth - 1),
-      mCropBottom(mHeight - 1),
       mSignalledError(false),
       mInitialized(false),
       mFramesConfigured(false),
       mNumSamplesOutput(0),
-      mOutputPortSettingsChange(NONE) {
-    if (!strcmp(name, "OMX.google.h263.decoder")) {
-        mMode = MODE_H263;
-    } else {
-        CHECK(!strcmp(name, "OMX.google.mpeg4.decoder"));
-    }
-
-    initPorts();
+      mPvTime(0) {
+    initPorts(
+            kNumInputBuffers,
+            8192 /* inputBufferSize */,
+            kNumOutputBuffers,
+            (mMode == MODE_MPEG4)
+            ? MEDIA_MIMETYPE_VIDEO_MPEG4 : MEDIA_MIMETYPE_VIDEO_H263);
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
 
@@ -96,219 +86,11 @@
     mHandle = NULL;
 }
 
-void SoftMPEG4::initPorts() {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-
-    def.nPortIndex = 0;
-    def.eDir = OMX_DirInput;
-    def.nBufferCountMin = kNumInputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 8192;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 1;
-
-    def.format.video.cMIMEType =
-        (mMode == MODE_MPEG4)
-            ? const_cast<char *>(MEDIA_MIMETYPE_VIDEO_MPEG4)
-            : const_cast<char *>(MEDIA_MIMETYPE_VIDEO_H263);
-
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-
-    def.format.video.eCompressionFormat =
-        mMode == MODE_MPEG4 ? OMX_VIDEO_CodingMPEG4 : OMX_VIDEO_CodingH263;
-
-    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
-    def.format.video.pNativeWindow = NULL;
-
-    addPort(def);
-
-    def.nPortIndex = 1;
-    def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = kNumOutputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 2;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
-    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
-    def.format.video.pNativeWindow = NULL;
-
-    def.nBufferSize =
-        (def.format.video.nFrameWidth * def.format.video.nFrameHeight * 3) / 2;
-
-    addPort(def);
-}
-
 status_t SoftMPEG4::initDecoder() {
     memset(mHandle, 0, sizeof(tagvideoDecControls));
     return OK;
 }
 
-OMX_ERRORTYPE SoftMPEG4::internalGetParameter(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            if (formatParams->nPortIndex == 0) {
-                formatParams->eCompressionFormat =
-                    (mMode == MODE_MPEG4)
-                        ? OMX_VIDEO_CodingMPEG4 : OMX_VIDEO_CodingH263;
-
-                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
-                formatParams->xFramerate = 0;
-            } else {
-                CHECK_EQ(formatParams->nPortIndex, 1u);
-
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
-                formatParams->xFramerate = 0;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoProfileLevelQuerySupported:
-        {
-            OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
-                    (OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
-
-            if (profileLevel->nPortIndex != 0) {  // Input port only
-                ALOGE("Invalid port index: %ld", profileLevel->nPortIndex);
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            size_t index = profileLevel->nProfileIndex;
-            if (mMode == MODE_H263) {
-                size_t nProfileLevels =
-                    sizeof(kH263ProfileLevels) / sizeof(kH263ProfileLevels[0]);
-                if (index >= nProfileLevels) {
-                    return OMX_ErrorNoMore;
-                }
-
-                profileLevel->eProfile = kH263ProfileLevels[index].mProfile;
-                profileLevel->eLevel = kH263ProfileLevels[index].mLevel;
-            } else {
-                size_t nProfileLevels =
-                    sizeof(kM4VProfileLevels) / sizeof(kM4VProfileLevels[0]);
-                if (index >= nProfileLevels) {
-                    return OMX_ErrorNoMore;
-                }
-
-                profileLevel->eProfile = kM4VProfileLevels[index].mProfile;
-                profileLevel->eLevel = kM4VProfileLevels[index].mLevel;
-            }
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalGetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftMPEG4::internalSetParameter(
-        OMX_INDEXTYPE index, const OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamStandardComponentRole:
-        {
-            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
-                (const OMX_PARAM_COMPONENTROLETYPE *)params;
-
-            if (mMode == MODE_MPEG4) {
-                if (strncmp((const char *)roleParams->cRole,
-                            "video_decoder.mpeg4",
-                            OMX_MAX_STRINGNAME_SIZE - 1)) {
-                    return OMX_ErrorUndefined;
-                }
-            } else {
-                if (strncmp((const char *)roleParams->cRole,
-                            "video_decoder.h263",
-                            OMX_MAX_STRINGNAME_SIZE - 1)) {
-                    return OMX_ErrorUndefined;
-                }
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalSetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftMPEG4::getConfig(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexConfigCommonOutputCrop:
-        {
-            OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
-
-            if (rectParams->nPortIndex != 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            rectParams->nLeft = mCropLeft;
-            rectParams->nTop = mCropTop;
-            rectParams->nWidth = mCropRight - mCropLeft + 1;
-            rectParams->nHeight = mCropBottom - mCropTop + 1;
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return OMX_ErrorUnsupportedIndex;
-    }
-}
-
 void SoftMPEG4::onQueueFilled(OMX_U32 portIndex) {
     if (mSignalledError || mOutputPortSettingsChange != NONE) {
         return;
@@ -415,9 +197,14 @@
 
         uint32_t useExtTimestamp = (inHeader->nOffset == 0);
 
-        // decoder deals in ms, OMX in us.
-        uint32_t timestamp =
-            useExtTimestamp ? (inHeader->nTimeStamp + 500) / 1000 : 0xFFFFFFFF;
+        // decoder deals in ms (int32_t), OMX in us (int64_t)
+        // so use fake timestamp instead
+        uint32_t timestamp = 0xFFFFFFFF;
+        if (useExtTimestamp) {
+            mPvToOmxTimeMap.add(mPvTime, inHeader->nTimeStamp);
+            timestamp = mPvTime;
+            mPvTime++;
+        }
 
         int32_t bufferSize = inHeader->nFilledLen;
         int32_t tmp = bufferSize;
@@ -441,7 +228,8 @@
         }
 
         // decoder deals in ms, OMX in us.
-        outHeader->nTimeStamp = timestamp * 1000;
+        outHeader->nTimeStamp = mPvToOmxTimeMap.valueFor(timestamp);
+        mPvToOmxTimeMap.removeItem(timestamp);
 
         inHeader->nOffset += bufferSize;
         inHeader->nFilledLen = 0;
@@ -482,11 +270,11 @@
 }
 
 bool SoftMPEG4::portSettingsChanged() {
-    int32_t disp_width, disp_height;
-    PVGetVideoDimensions(mHandle, &disp_width, &disp_height);
+    uint32_t disp_width, disp_height;
+    PVGetVideoDimensions(mHandle, (int32 *)&disp_width, (int32 *)&disp_height);
 
-    int32_t buf_width, buf_height;
-    PVGetBufferDimensions(mHandle, &buf_width, &buf_height);
+    uint32_t buf_width, buf_height;
+    PVGetBufferDimensions(mHandle, (int32 *)&buf_width, (int32 *)&buf_height);
 
     CHECK_LE(disp_width, buf_width);
     CHECK_LE(disp_height, buf_height);
@@ -494,12 +282,12 @@
     ALOGV("disp_width = %d, disp_height = %d, buf_width = %d, buf_height = %d",
             disp_width, disp_height, buf_width, buf_height);
 
-    if (mCropRight != disp_width - 1
-            || mCropBottom != disp_height - 1) {
+    if (mCropWidth != disp_width
+            || mCropHeight != disp_height) {
         mCropLeft = 0;
         mCropTop = 0;
-        mCropRight = disp_width - 1;
-        mCropBottom = disp_height - 1;
+        mCropWidth = disp_width;
+        mCropHeight = disp_height;
 
         notify(OMX_EventPortSettingsChanged,
                1,
@@ -545,45 +333,22 @@
     }
 }
 
-void SoftMPEG4::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
-    if (portIndex != 1) {
-        return;
-    }
-
-    switch (mOutputPortSettingsChange) {
-        case NONE:
-            break;
-
-        case AWAITING_DISABLED:
-        {
-            CHECK(!enabled);
-            mOutputPortSettingsChange = AWAITING_ENABLED;
-            break;
-        }
-
-        default:
-        {
-            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
-            CHECK(enabled);
-            mOutputPortSettingsChange = NONE;
-            break;
-        }
+void SoftMPEG4::onReset() {
+    SoftVideoDecoderOMXComponent::onReset();
+    mPvToOmxTimeMap.clear();
+    mSignalledError = false;
+    mFramesConfigured = false;
+    if (mInitialized) {
+        PVCleanUpVideoDecoder(mHandle);
+        mInitialized = false;
     }
 }
 
 void SoftMPEG4::updatePortDefinitions() {
-    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(0)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+    SoftVideoDecoderOMXComponent::updatePortDefinitions();
 
-    def = &editPortInfo(1)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
+    /* We have to align our width and height - this should affect stride! */
+    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kOutputPortIndex)->mDef;
     def->nBufferSize =
         (((def->format.video.nFrameWidth + 15) & -16)
             * ((def->format.video.nFrameHeight + 15) & -16) * 3) / 2;
@@ -594,6 +359,19 @@
 android::SoftOMXComponent *createSoftOMXComponent(
         const char *name, const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData, OMX_COMPONENTTYPE **component) {
-    return new android::SoftMPEG4(name, callbacks, appData, component);
+    using namespace android;
+    if (!strcmp(name, "OMX.google.h263.decoder")) {
+        return new android::SoftMPEG4(
+                name, "video_decoder.h263", OMX_VIDEO_CodingH263,
+                kH263ProfileLevels, ARRAY_SIZE(kH263ProfileLevels),
+                callbacks, appData, component);
+    } else if (!strcmp(name, "OMX.google.mpeg4.decoder")) {
+        return new android::SoftMPEG4(
+                name, "video_decoder.mpeg4", OMX_VIDEO_CodingMPEG4,
+                kM4VProfileLevels, ARRAY_SIZE(kM4VProfileLevels),
+                callbacks, appData, component);
+    } else {
+        CHECK(!"Unknown component");
+    }
 }
 
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
index dff08a7..de14aaf 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.h
@@ -18,14 +18,18 @@
 
 #define SOFT_MPEG4_H_
 
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoDecoderOMXComponent.h"
 
 struct tagvideoDecControls;
 
 namespace android {
 
-struct SoftMPEG4 : public SimpleSoftOMXComponent {
+struct SoftMPEG4 : public SoftVideoDecoderOMXComponent {
     SoftMPEG4(const char *name,
+            const char *componentRole,
+            OMX_VIDEO_CODINGTYPE codingType,
+            const CodecProfileLevel *profileLevels,
+            size_t numProfileLevels,
             const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData,
             OMX_COMPONENTTYPE **component);
@@ -33,17 +37,9 @@
 protected:
     virtual ~SoftMPEG4();
 
-    virtual OMX_ERRORTYPE internalGetParameter(
-            OMX_INDEXTYPE index, OMX_PTR params);
-
-    virtual OMX_ERRORTYPE internalSetParameter(
-            OMX_INDEXTYPE index, const OMX_PTR params);
-
-    virtual OMX_ERRORTYPE getConfig(OMX_INDEXTYPE index, OMX_PTR params);
-
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
-    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
 
 private:
     enum {
@@ -54,32 +50,23 @@
     enum {
         MODE_MPEG4,
         MODE_H263,
-
     } mMode;
 
     tagvideoDecControls *mHandle;
 
     size_t mInputBufferCount;
 
-    int32_t mWidth, mHeight;
-    int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
-
     bool mSignalledError;
     bool mInitialized;
     bool mFramesConfigured;
 
     int32_t mNumSamplesOutput;
+    int32_t mPvTime;
+    KeyedVector<int32_t, OMX_TICKS> mPvToOmxTimeMap;
 
-    enum {
-        NONE,
-        AWAITING_DISABLED,
-        AWAITING_ENABLED
-    } mOutputPortSettingsChange;
-
-    void initPorts();
     status_t initDecoder();
 
-    void updatePortDefinitions();
+    virtual void updatePortDefinitions();
     bool portSettingsChanged();
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftMPEG4);
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 9f25536..7c382fb 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -361,6 +361,8 @@
 void SoftMP3::onReset() {
     pvmp3_InitDecoder(mConfig, mDecoderBuf);
     mIsFirst = true;
+    mSignalledError = false;
+    mOutputPortSettingsChange = NONE;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index a400b4c..43d0263 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -29,26 +29,19 @@
 
 namespace android {
 
-template<class T>
-static void InitOMXParams(T *params) {
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 SoftVPX::SoftVPX(
         const char *name,
         const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component)
-    : SimpleSoftOMXComponent(name, callbacks, appData, component),
-      mCtx(NULL),
-      mWidth(320),
-      mHeight(240),
-      mOutputPortSettingsChange(NONE) {
-    initPorts();
+    : SoftVideoDecoderOMXComponent(
+            name, "video_decoder.vpx", OMX_VIDEO_CodingVPX,
+            NULL /* profileLevels */, 0 /* numProfileLevels */,
+            320 /* width */, 240 /* height */, callbacks, appData, component),
+      mCtx(NULL) {
+    initPorts(kNumBuffers, 768 * 1024 /* inputBufferSize */,
+            kNumBuffers, MEDIA_MIMETYPE_VIDEO_VPX);
+
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
 
@@ -58,65 +51,6 @@
     mCtx = NULL;
 }
 
-void SoftVPX::initPorts() {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-
-    def.nPortIndex = 0;
-    def.eDir = OMX_DirInput;
-    def.nBufferCountMin = kNumBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 768 * 1024;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 1;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_VPX);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingVPX;
-    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
-    def.format.video.pNativeWindow = NULL;
-
-    addPort(def);
-
-    def.nPortIndex = 1;
-    def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = kNumBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 2;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
-    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
-    def.format.video.pNativeWindow = NULL;
-
-    def.nBufferSize =
-        (def.format.video.nFrameWidth * def.format.video.nFrameHeight * 3) / 2;
-
-    addPort(def);
-}
-
 static int GetCPUCoreCount() {
     int cpuCoreCount = 1;
 #if defined(_SC_NPROCESSORS_ONLN)
@@ -145,80 +79,6 @@
     return OK;
 }
 
-OMX_ERRORTYPE SoftVPX::internalGetParameter(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            if (formatParams->nPortIndex == 0) {
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingVPX;
-                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
-                formatParams->xFramerate = 0;
-            } else {
-                CHECK_EQ(formatParams->nPortIndex, 1u);
-
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
-                formatParams->xFramerate = 0;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalGetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftVPX::internalSetParameter(
-        OMX_INDEXTYPE index, const OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamStandardComponentRole:
-        {
-            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
-                (const OMX_PARAM_COMPONENTROLETYPE *)params;
-
-            if (strncmp((const char *)roleParams->cRole,
-                        "video_decoder.vpx",
-                        OMX_MAX_STRINGNAME_SIZE - 1)) {
-                return OMX_ErrorUndefined;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalSetParameter(index, params);
-    }
-}
-
 void SoftVPX::onQueueFilled(OMX_U32 portIndex) {
     if (mOutputPortSettingsChange != NONE) {
         return;
@@ -226,6 +86,7 @@
 
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
+    bool EOSseen = false;
 
     while (!inQueue.empty() && !outQueue.empty()) {
         BufferInfo *inInfo = *inQueue.begin();
@@ -235,17 +96,20 @@
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
 
         if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inHeader);
+            EOSseen = true;
+            if (inHeader->nFilledLen == 0) {
+                inQueue.erase(inQueue.begin());
+                inInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inHeader);
 
-            outHeader->nFilledLen = 0;
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+                outHeader->nFilledLen = 0;
+                outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
-            outQueue.erase(outQueue.begin());
-            outInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outHeader);
-            return;
+                outQueue.erase(outQueue.begin());
+                outInfo->mOwnedByUs = false;
+                notifyFillBufferDone(outHeader);
+                return;
+            }
         }
 
         if (vpx_codec_decode(
@@ -266,8 +130,8 @@
         if (img != NULL) {
             CHECK_EQ(img->fmt, IMG_FMT_I420);
 
-            int32_t width = img->d_w;
-            int32_t height = img->d_h;
+            uint32_t width = img->d_w;
+            uint32_t height = img->d_h;
 
             if (width != mWidth || height != mHeight) {
                 mWidth = width;
@@ -282,7 +146,7 @@
 
             outHeader->nOffset = 0;
             outHeader->nFilledLen = (width * height * 3) / 2;
-            outHeader->nFlags = 0;
+            outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
             outHeader->nTimeStamp = inHeader->nTimeStamp;
 
             const uint8_t *srcLine = (const uint8_t *)img->planes[PLANE_Y];
@@ -325,53 +189,6 @@
     }
 }
 
-void SoftVPX::onPortFlushCompleted(OMX_U32 portIndex) {
-}
-
-void SoftVPX::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
-    if (portIndex != 1) {
-        return;
-    }
-
-    switch (mOutputPortSettingsChange) {
-        case NONE:
-            break;
-
-        case AWAITING_DISABLED:
-        {
-            CHECK(!enabled);
-            mOutputPortSettingsChange = AWAITING_ENABLED;
-            break;
-        }
-
-        default:
-        {
-            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
-            CHECK(enabled);
-            mOutputPortSettingsChange = NONE;
-            break;
-        }
-    }
-}
-
-void SoftVPX::updatePortDefinitions() {
-    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(0)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def = &editPortInfo(1)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def->nBufferSize =
-        (def->format.video.nFrameWidth
-            * def->format.video.nFrameHeight * 3) / 2;
-}
-
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index 3e814a2..626307b 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -18,11 +18,11 @@
 
 #define SOFT_VPX_H_
 
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoDecoderOMXComponent.h"
 
 namespace android {
 
-struct SoftVPX : public SimpleSoftOMXComponent {
+struct SoftVPX : public SoftVideoDecoderOMXComponent {
     SoftVPX(const char *name,
             const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData,
@@ -31,15 +31,7 @@
 protected:
     virtual ~SoftVPX();
 
-    virtual OMX_ERRORTYPE internalGetParameter(
-            OMX_INDEXTYPE index, OMX_PTR params);
-
-    virtual OMX_ERRORTYPE internalSetParameter(
-            OMX_INDEXTYPE index, const OMX_PTR params);
-
     virtual void onQueueFilled(OMX_U32 portIndex);
-    virtual void onPortFlushCompleted(OMX_U32 portIndex);
-    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
 
 private:
     enum {
@@ -48,20 +40,8 @@
 
     void *mCtx;
 
-    int32_t mWidth;
-    int32_t mHeight;
-
-    enum {
-        NONE,
-        AWAITING_DISABLED,
-        AWAITING_ENABLED
-    } mOutputPortSettingsChange;
-
-    void initPorts();
     status_t initDecoder();
 
-    void updatePortDefinitions();
-
     DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
 };
 
diff --git a/media/libstagefright/codecs/on2/h264dec/Android.mk b/media/libstagefright/codecs/on2/h264dec/Android.mk
index 2539f98..655b2ab 100644
--- a/media/libstagefright/codecs/on2/h264dec/Android.mk
+++ b/media/libstagefright/codecs/on2/h264dec/Android.mk
@@ -119,7 +119,7 @@
 
 LOCAL_SHARED_LIBRARIES := libstagefright_soft_h264dec
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE := decoder
 
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
index 6e36651..7ddb13c 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.cpp
@@ -47,38 +47,28 @@
     { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel51 },
 };
 
-template<class T>
-static void InitOMXParams(T *params) {
-    params->nSize = sizeof(T);
-    params->nVersion.s.nVersionMajor = 1;
-    params->nVersion.s.nVersionMinor = 0;
-    params->nVersion.s.nRevision = 0;
-    params->nVersion.s.nStep = 0;
-}
-
 SoftAVC::SoftAVC(
         const char *name,
         const OMX_CALLBACKTYPE *callbacks,
         OMX_PTR appData,
         OMX_COMPONENTTYPE **component)
-    : SimpleSoftOMXComponent(name, callbacks, appData, component),
+    : SoftVideoDecoderOMXComponent(
+            name, "video_decoder.avc", OMX_VIDEO_CodingAVC,
+            kProfileLevels, ARRAY_SIZE(kProfileLevels),
+            320 /* width */, 240 /* height */, callbacks, appData, component),
       mHandle(NULL),
       mInputBufferCount(0),
-      mWidth(320),
-      mHeight(240),
       mPictureSize(mWidth * mHeight * 3 / 2),
-      mCropLeft(0),
-      mCropTop(0),
-      mCropWidth(mWidth),
-      mCropHeight(mHeight),
       mFirstPicture(NULL),
       mFirstPictureId(-1),
       mPicId(0),
       mHeadersDecoded(false),
       mEOSStatus(INPUT_DATA_AVAILABLE),
-      mOutputPortSettingsChange(NONE),
       mSignalledError(false) {
-    initPorts();
+    initPorts(
+            kNumInputBuffers, 8192 /* inputBufferSize */,
+            kNumOutputBuffers, MEDIA_MIMETYPE_VIDEO_AVC);
+
     CHECK_EQ(initDecoder(), (status_t)OK);
 }
 
@@ -100,65 +90,6 @@
     delete[] mFirstPicture;
 }
 
-void SoftAVC::initPorts() {
-    OMX_PARAM_PORTDEFINITIONTYPE def;
-    InitOMXParams(&def);
-
-    def.nPortIndex = kInputPortIndex;
-    def.eDir = OMX_DirInput;
-    def.nBufferCountMin = kNumInputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.nBufferSize = 8192;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 1;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_AVC);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingAVC;
-    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
-    def.format.video.pNativeWindow = NULL;
-
-    addPort(def);
-
-    def.nPortIndex = kOutputPortIndex;
-    def.eDir = OMX_DirOutput;
-    def.nBufferCountMin = kNumOutputBuffers;
-    def.nBufferCountActual = def.nBufferCountMin;
-    def.bEnabled = OMX_TRUE;
-    def.bPopulated = OMX_FALSE;
-    def.eDomain = OMX_PortDomainVideo;
-    def.bBuffersContiguous = OMX_FALSE;
-    def.nBufferAlignment = 2;
-
-    def.format.video.cMIMEType = const_cast<char *>(MEDIA_MIMETYPE_VIDEO_RAW);
-    def.format.video.pNativeRender = NULL;
-    def.format.video.nFrameWidth = mWidth;
-    def.format.video.nFrameHeight = mHeight;
-    def.format.video.nStride = def.format.video.nFrameWidth;
-    def.format.video.nSliceHeight = def.format.video.nFrameHeight;
-    def.format.video.nBitrate = 0;
-    def.format.video.xFramerate = 0;
-    def.format.video.bFlagErrorConcealment = OMX_FALSE;
-    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
-    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
-    def.format.video.pNativeWindow = NULL;
-
-    def.nBufferSize =
-        (def.format.video.nFrameWidth * def.format.video.nFrameHeight * 3) / 2;
-
-    addPort(def);
-}
-
 status_t SoftAVC::initDecoder() {
     // Force decoder to output buffers in display order.
     if (H264SwDecInit(&mHandle, 0) == H264SWDEC_OK) {
@@ -167,126 +98,6 @@
     return UNKNOWN_ERROR;
 }
 
-OMX_ERRORTYPE SoftAVC::internalGetParameter(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > kOutputPortIndex) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            if (formatParams->nPortIndex == kInputPortIndex) {
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingAVC;
-                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
-                formatParams->xFramerate = 0;
-            } else {
-                CHECK(formatParams->nPortIndex == kOutputPortIndex);
-
-                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
-                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
-                formatParams->xFramerate = 0;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoProfileLevelQuerySupported:
-        {
-            OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
-                    (OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
-
-            if (profileLevel->nPortIndex != kInputPortIndex) {
-                ALOGE("Invalid port index: %ld", profileLevel->nPortIndex);
-                return OMX_ErrorUnsupportedIndex;
-            }
-
-            size_t index = profileLevel->nProfileIndex;
-            size_t nProfileLevels =
-                    sizeof(kProfileLevels) / sizeof(kProfileLevels[0]);
-            if (index >= nProfileLevels) {
-                return OMX_ErrorNoMore;
-            }
-
-            profileLevel->eProfile = kProfileLevels[index].mProfile;
-            profileLevel->eLevel = kProfileLevels[index].mLevel;
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalGetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftAVC::internalSetParameter(
-        OMX_INDEXTYPE index, const OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexParamStandardComponentRole:
-        {
-            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
-                (const OMX_PARAM_COMPONENTROLETYPE *)params;
-
-            if (strncmp((const char *)roleParams->cRole,
-                        "video_decoder.avc",
-                        OMX_MAX_STRINGNAME_SIZE - 1)) {
-                return OMX_ErrorUndefined;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        case OMX_IndexParamVideoPortFormat:
-        {
-            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
-                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
-
-            if (formatParams->nPortIndex > kOutputPortIndex) {
-                return OMX_ErrorUndefined;
-            }
-
-            if (formatParams->nIndex != 0) {
-                return OMX_ErrorNoMore;
-            }
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return SimpleSoftOMXComponent::internalSetParameter(index, params);
-    }
-}
-
-OMX_ERRORTYPE SoftAVC::getConfig(
-        OMX_INDEXTYPE index, OMX_PTR params) {
-    switch (index) {
-        case OMX_IndexConfigCommonOutputCrop:
-        {
-            OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
-
-            if (rectParams->nPortIndex != 1) {
-                return OMX_ErrorUndefined;
-            }
-
-            rectParams->nLeft = mCropLeft;
-            rectParams->nTop = mCropTop;
-            rectParams->nWidth = mCropWidth;
-            rectParams->nHeight = mCropHeight;
-
-            return OMX_ErrorNone;
-        }
-
-        default:
-            return OMX_ErrorUnsupportedIndex;
-    }
-}
-
 void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
     if (mSignalledError || mOutputPortSettingsChange != NONE) {
         return;
@@ -298,13 +109,21 @@
 
     List<BufferInfo *> &inQueue = getPortQueue(kInputPortIndex);
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
+
+    if (mHeadersDecoded) {
+        // Dequeue any already decoded output frames to free up space
+        // in the output queue.
+
+        drainAllOutputBuffers(false /* eos */);
+    }
+
     H264SwDecRet ret = H264SWDEC_PIC_RDY;
     bool portSettingsChanged = false;
     while ((mEOSStatus != INPUT_DATA_AVAILABLE || !inQueue.empty())
             && outQueue.size() == kNumOutputBuffers) {
 
         if (mEOSStatus == INPUT_EOS_SEEN) {
-            drainAllOutputBuffers();
+            drainAllOutputBuffers(true /* eos */);
             return;
         }
 
@@ -392,15 +211,7 @@
             mFirstPictureId = -1;
         }
 
-        while (!outQueue.empty() &&
-                mHeadersDecoded &&
-                H264SwDecNextPicture(mHandle, &decodedPicture, 0)
-                    == H264SWDEC_PIC_RDY) {
-
-            int32_t picId = decodedPicture.picId;
-            uint8_t *data = (uint8_t *) decodedPicture.pOutputPicture;
-            drainOneOutputBuffer(picId, data);
-        }
+        drainAllOutputBuffers(false /* eos */);
     }
 }
 
@@ -409,8 +220,6 @@
         mWidth  = info->picWidth;
         mHeight = info->picHeight;
         mPictureSize = mWidth * mHeight * 3 / 2;
-        mCropWidth = mWidth;
-        mCropHeight = mHeight;
         updatePortDefinitions();
         notify(OMX_EventPortSettingsChanged, 1, 0, NULL);
         mOutputPortSettingsChange = AWAITING_DISABLED;
@@ -463,43 +272,38 @@
     notifyFillBufferDone(outHeader);
 }
 
-bool SoftAVC::drainAllOutputBuffers() {
+void SoftAVC::drainAllOutputBuffers(bool eos) {
     List<BufferInfo *> &outQueue = getPortQueue(kOutputPortIndex);
     H264SwDecPicture decodedPicture;
 
+    if (mHeadersDecoded) {
+        while (!outQueue.empty()
+                && H264SWDEC_PIC_RDY == H264SwDecNextPicture(
+                    mHandle, &decodedPicture, eos /* flush */)) {
+            int32_t picId = decodedPicture.picId;
+            uint8_t *data = (uint8_t *) decodedPicture.pOutputPicture;
+            drainOneOutputBuffer(picId, data);
+        }
+    }
+
+    if (!eos) {
+        return;
+    }
+
     while (!outQueue.empty()) {
         BufferInfo *outInfo = *outQueue.begin();
         outQueue.erase(outQueue.begin());
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
-        if (mHeadersDecoded &&
-            H264SWDEC_PIC_RDY ==
-                H264SwDecNextPicture(mHandle, &decodedPicture, 1 /* flush */)) {
 
-            int32_t picId = decodedPicture.picId;
-            CHECK(mPicToHeaderMap.indexOfKey(picId) >= 0);
-
-            memcpy(outHeader->pBuffer + outHeader->nOffset,
-                decodedPicture.pOutputPicture,
-                mPictureSize);
-
-            OMX_BUFFERHEADERTYPE *header = mPicToHeaderMap.valueFor(picId);
-            outHeader->nTimeStamp = header->nTimeStamp;
-            outHeader->nFlags = header->nFlags;
-            outHeader->nFilledLen = mPictureSize;
-            mPicToHeaderMap.removeItem(picId);
-            delete header;
-        } else {
-            outHeader->nTimeStamp = 0;
-            outHeader->nFilledLen = 0;
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-            mEOSStatus = OUTPUT_FRAMES_FLUSHED;
-        }
+        outHeader->nTimeStamp = 0;
+        outHeader->nFilledLen = 0;
+        outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
         outInfo->mOwnedByUs = false;
         notifyFillBufferDone(outHeader);
-    }
 
-    return true;
+        mEOSStatus = OUTPUT_FRAMES_FLUSHED;
+    }
 }
 
 void SoftAVC::onPortFlushCompleted(OMX_U32 portIndex) {
@@ -508,44 +312,9 @@
     }
 }
 
-void SoftAVC::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
-    switch (mOutputPortSettingsChange) {
-        case NONE:
-            break;
-
-        case AWAITING_DISABLED:
-        {
-            CHECK(!enabled);
-            mOutputPortSettingsChange = AWAITING_ENABLED;
-            break;
-        }
-
-        default:
-        {
-            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
-            CHECK(enabled);
-            mOutputPortSettingsChange = NONE;
-            break;
-        }
-    }
-}
-
-void SoftAVC::updatePortDefinitions() {
-    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(0)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def = &editPortInfo(1)->mDef;
-    def->format.video.nFrameWidth = mWidth;
-    def->format.video.nFrameHeight = mHeight;
-    def->format.video.nStride = def->format.video.nFrameWidth;
-    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
-
-    def->nBufferSize =
-        (def->format.video.nFrameWidth
-            * def->format.video.nFrameHeight * 3) / 2;
+void SoftAVC::onReset() {
+    SoftVideoDecoderOMXComponent::onReset();
+    mSignalledError = false;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
index 879b014..ee69926 100644
--- a/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
+++ b/media/libstagefright/codecs/on2/h264dec/SoftAVC.h
@@ -18,7 +18,7 @@
 
 #define SOFT_AVC_H_
 
-#include "SimpleSoftOMXComponent.h"
+#include "SoftVideoDecoderOMXComponent.h"
 #include <utils/KeyedVector.h>
 
 #include "H264SwDecApi.h"
@@ -26,7 +26,7 @@
 
 namespace android {
 
-struct SoftAVC : public SimpleSoftOMXComponent {
+struct SoftAVC : public SoftVideoDecoderOMXComponent {
     SoftAVC(const char *name,
             const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData,
@@ -35,22 +35,12 @@
 protected:
     virtual ~SoftAVC();
 
-    virtual OMX_ERRORTYPE internalGetParameter(
-            OMX_INDEXTYPE index, OMX_PTR params);
-
-    virtual OMX_ERRORTYPE internalSetParameter(
-            OMX_INDEXTYPE index, const OMX_PTR params);
-
-    virtual OMX_ERRORTYPE getConfig(OMX_INDEXTYPE index, OMX_PTR params);
-
     virtual void onQueueFilled(OMX_U32 portIndex);
     virtual void onPortFlushCompleted(OMX_U32 portIndex);
-    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
 
 private:
     enum {
-        kInputPortIndex   = 0,
-        kOutputPortIndex  = 1,
         kNumInputBuffers  = 8,
         kNumOutputBuffers = 2,
     };
@@ -65,9 +55,7 @@
 
     size_t mInputBufferCount;
 
-    uint32_t mWidth, mHeight, mPictureSize;
-    uint32_t mCropLeft, mCropTop;
-    uint32_t mCropWidth, mCropHeight;
+    uint32_t mPictureSize;
 
     uint8_t *mFirstPicture;
     int32_t mFirstPictureId;
@@ -81,19 +69,10 @@
 
     EOSStatus mEOSStatus;
 
-    enum OutputPortSettingChange {
-        NONE,
-        AWAITING_DISABLED,
-        AWAITING_ENABLED
-    };
-    OutputPortSettingChange mOutputPortSettingsChange;
-
     bool mSignalledError;
 
-    void initPorts();
     status_t initDecoder();
-    void updatePortDefinitions();
-    bool drainAllOutputBuffers();
+    void drainAllOutputBuffers(bool eos);
     void drainOneOutputBuffer(int32_t picId, uint8_t *data);
     void saveFirstOutputBuffer(int32_t pidId, uint8_t *data);
     bool handleCropRectEvent(const CropParams* crop);
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 4115324..51bb958 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -424,6 +424,8 @@
         delete mVi;
         mVi = NULL;
     }
+
+    mOutputPortSettingsChange = NONE;
 }
 
 void SoftVorbis::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
diff --git a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
index 40c5a3c..f7a00d8 100644
--- a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
+++ b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AHierarchicalStateMachine"
+#include <utils/Log.h>
+
 #include <media/stagefright/foundation/AHierarchicalStateMachine.h>
 
 #include <media/stagefright/foundation/ADebug.h>
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index a3fa7a3..85bd492 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -6,16 +6,25 @@
         LiveDataSource.cpp      \
         LiveSession.cpp         \
         M3UParser.cpp           \
+        PlaylistFetcher.cpp     \
 
 LOCAL_C_INCLUDES:= \
 	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax \
 	$(TOP)/external/openssl/include
 
+LOCAL_SHARED_LIBRARIES := \
+        libcrypto \
+        libcutils \
+        libmedia \
+        libstagefright \
+        libstagefright_foundation \
+        libutils \
+
 LOCAL_MODULE:= libstagefright_httplive
 
 ifeq ($(TARGET_ARCH),arm)
     LOCAL_CFLAGS += -Wno-psabi
 endif
 
-include $(BUILD_STATIC_LIBRARY)
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index 505bdb3..e91c60b 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -18,12 +18,13 @@
 #define LOG_TAG "LiveSession"
 #include <utils/Log.h>
 
-#include "include/LiveSession.h"
+#include "LiveSession.h"
 
-#include "LiveDataSource.h"
+#include "M3UParser.h"
+#include "PlaylistFetcher.h"
 
-#include "include/M3UParser.h"
 #include "include/HTTPBase.h"
+#include "mpeg2ts/AnotherPacketSource.h"
 
 #include <cutils/properties.h>
 #include <media/stagefright/foundation/hexdump.h>
@@ -33,6 +34,8 @@
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 
 #include <ctype.h>
 #include <openssl/aes.h>
@@ -47,37 +50,107 @@
       mUIDValid(uidValid),
       mUID(uid),
       mInPreparationPhase(true),
-      mDataSource(new LiveDataSource),
       mHTTPDataSource(
               HTTPBase::Create(
                   (mFlags & kFlagIncognito)
                     ? HTTPBase::kFlagIncognito
                     : 0)),
       mPrevBandwidthIndex(-1),
-      mLastPlaylistFetchTimeUs(-1),
-      mSeqNumber(-1),
-      mSeekTimeUs(-1),
-      mNumRetries(0),
-      mStartOfPlayback(true),
-      mDurationUs(-1),
-      mDurationFixed(false),
-      mSeekDone(false),
-      mDisconnectPending(false),
-      mMonitorQueueGeneration(0),
-      mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY) {
+      mStreamMask(0),
+      mCheckBandwidthGeneration(0),
+      mLastDequeuedTimeUs(0ll),
+      mReconfigurationInProgress(false),
+      mDisconnectReplyID(0) {
     if (mUIDValid) {
         mHTTPDataSource->setUID(mUID);
     }
+
+    mPacketSources.add(
+            STREAMTYPE_AUDIO, new AnotherPacketSource(NULL /* meta */));
+
+    mPacketSources.add(
+            STREAMTYPE_VIDEO, new AnotherPacketSource(NULL /* meta */));
+
+    mPacketSources.add(
+            STREAMTYPE_SUBTITLES, new AnotherPacketSource(NULL /* meta */));
 }
 
 LiveSession::~LiveSession() {
 }
 
-sp<DataSource> LiveSession::getDataSource() {
-    return mDataSource;
+status_t LiveSession::dequeueAccessUnit(
+        StreamType stream, sp<ABuffer> *accessUnit) {
+    if (!(mStreamMask & stream)) {
+        return UNKNOWN_ERROR;
+    }
+
+    sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
+
+    status_t finalResult;
+    if (!packetSource->hasBufferAvailable(&finalResult)) {
+        return finalResult == OK ? -EAGAIN : finalResult;
+    }
+
+    status_t err = packetSource->dequeueAccessUnit(accessUnit);
+
+    const char *streamStr;
+    switch (stream) {
+        case STREAMTYPE_AUDIO:
+            streamStr = "audio";
+            break;
+        case STREAMTYPE_VIDEO:
+            streamStr = "video";
+            break;
+        case STREAMTYPE_SUBTITLES:
+            streamStr = "subs";
+            break;
+        default:
+            TRESPASS();
+    }
+
+    if (err == INFO_DISCONTINUITY) {
+        int32_t type;
+        CHECK((*accessUnit)->meta()->findInt32("discontinuity", &type));
+
+        sp<AMessage> extra;
+        if (!(*accessUnit)->meta()->findMessage("extra", &extra)) {
+            extra.clear();
+        }
+
+        ALOGI("[%s] read discontinuity of type %d, extra = %s",
+              streamStr,
+              type,
+              extra == NULL ? "NULL" : extra->debugString().c_str());
+    } else if (err == OK) {
+        int64_t timeUs;
+        CHECK((*accessUnit)->meta()->findInt64("timeUs",  &timeUs));
+        ALOGV("[%s] read buffer at time %lld us", streamStr, timeUs);
+
+        mLastDequeuedTimeUs = timeUs;
+    } else {
+        ALOGI("[%s] encountered error %d", streamStr, err);
+    }
+
+    return err;
 }
 
-void LiveSession::connect(
+status_t LiveSession::getStreamFormat(StreamType stream, sp<AMessage> *format) {
+    if (!(mStreamMask & stream)) {
+        return UNKNOWN_ERROR;
+    }
+
+    sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
+
+    sp<MetaData> meta = packetSource->getFormat();
+
+    if (meta == NULL) {
+        return -EAGAIN;
+    }
+
+    return convertMetaDataToMessage(meta, format);
+}
+
+void LiveSession::connectAsync(
         const char *url, const KeyedVector<String8, String8> *headers) {
     sp<AMessage> msg = new AMessage(kWhatConnect, id());
     msg->setString("url", url);
@@ -91,55 +164,184 @@
     msg->post();
 }
 
-void LiveSession::disconnect() {
-    Mutex::Autolock autoLock(mLock);
-    mDisconnectPending = true;
+status_t LiveSession::disconnect() {
+    sp<AMessage> msg = new AMessage(kWhatDisconnect, id());
 
-    mHTTPDataSource->disconnect();
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
 
-    (new AMessage(kWhatDisconnect, id()))->post();
+    return err;
 }
 
-void LiveSession::seekTo(int64_t timeUs) {
-    Mutex::Autolock autoLock(mLock);
-    mSeekDone = false;
-
+status_t LiveSession::seekTo(int64_t timeUs) {
     sp<AMessage> msg = new AMessage(kWhatSeek, id());
     msg->setInt64("timeUs", timeUs);
-    msg->post();
 
-    while (!mSeekDone) {
-        mCondition.wait(mLock);
-    }
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+
+    return err;
 }
 
 void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatConnect:
+        {
             onConnect(msg);
             break;
+        }
 
         case kWhatDisconnect:
-            onDisconnect();
-            break;
-
-        case kWhatMonitorQueue:
         {
-            int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
+            CHECK(msg->senderAwaitsResponse(&mDisconnectReplyID));
 
-            if (generation != mMonitorQueueGeneration) {
-                // Stale event
+            if (mReconfigurationInProgress) {
                 break;
             }
 
-            onMonitorQueue();
+            finishDisconnect();
             break;
         }
 
         case kWhatSeek:
-            onSeek(msg);
+        {
+            uint32_t replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            status_t err = onSeek(msg);
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+
+            response->postReply(replyID);
             break;
+        }
+
+        case kWhatFetcherNotify:
+        {
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            switch (what) {
+                case PlaylistFetcher::kWhatStarted:
+                    break;
+                case PlaylistFetcher::kWhatPaused:
+                case PlaylistFetcher::kWhatStopped:
+                {
+                    if (what == PlaylistFetcher::kWhatStopped) {
+                        AString uri;
+                        CHECK(msg->findString("uri", &uri));
+                        mFetcherInfos.removeItem(uri);
+                    }
+
+                    if (mContinuation != NULL) {
+                        CHECK_GT(mContinuationCounter, 0);
+                        if (--mContinuationCounter == 0) {
+                            mContinuation->post();
+                        }
+                    }
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatDurationUpdate:
+                {
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+
+                    int64_t durationUs;
+                    CHECK(msg->findInt64("durationUs", &durationUs));
+
+                    FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
+                    info->mDurationUs = durationUs;
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatError:
+                {
+                    status_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    ALOGE("XXX Received error %d from PlaylistFetcher.", err);
+
+                    if (mInPreparationPhase) {
+                        postPrepared(err);
+                    }
+
+                    mPacketSources.valueFor(STREAMTYPE_AUDIO)->signalEOS(err);
+
+                    mPacketSources.valueFor(STREAMTYPE_VIDEO)->signalEOS(err);
+
+                    mPacketSources.valueFor(
+                            STREAMTYPE_SUBTITLES)->signalEOS(err);
+
+                    sp<AMessage> notify = mNotify->dup();
+                    notify->setInt32("what", kWhatError);
+                    notify->setInt32("err", err);
+                    notify->post();
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatTemporarilyDoneFetching:
+                {
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+
+                    FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
+                    info->mIsPrepared = true;
+
+                    if (mInPreparationPhase) {
+                        bool allFetchersPrepared = true;
+                        for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+                            if (!mFetcherInfos.valueAt(i).mIsPrepared) {
+                                allFetchersPrepared = false;
+                                break;
+                            }
+                        }
+
+                        if (allFetchersPrepared) {
+                            postPrepared(OK);
+                        }
+                    }
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+
+            break;
+        }
+
+        case kWhatCheckBandwidth:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mCheckBandwidthGeneration) {
+                break;
+            }
+
+            onCheckBandwidth();
+            break;
+        }
+
+        case kWhatChangeConfiguration2:
+        {
+            onChangeConfiguration2(msg);
+            break;
+        }
+
+        case kWhatChangeConfiguration3:
+        {
+            onChangeConfiguration3(msg);
+            break;
+        }
+
+        case kWhatFinishDisconnect2:
+        {
+            onFinishDisconnect2();
+            break;
+        }
 
         default:
             TRESPASS();
@@ -172,48 +374,127 @@
         headers = NULL;
     }
 
+#if 1
     ALOGI("onConnect <URL suppressed>");
+#else
+    ALOGI("onConnect %s", url.c_str());
+#endif
 
     mMasterURL = url;
 
     bool dummy;
-    sp<M3UParser> playlist = fetchPlaylist(url.c_str(), &dummy);
+    mPlaylist = fetchPlaylist(url.c_str(), NULL /* curPlaylistHash */, &dummy);
 
-    if (playlist == NULL) {
+    if (mPlaylist == NULL) {
         ALOGE("unable to fetch master playlist '%s'.", url.c_str());
 
-        signalEOS(ERROR_IO);
+        postPrepared(ERROR_IO);
         return;
     }
 
-    if (playlist->isVariantPlaylist()) {
-        for (size_t i = 0; i < playlist->size(); ++i) {
+    // We trust the content provider to make a reasonable choice of preferred
+    // initial bandwidth by listing it first in the variant playlist.
+    // At startup we really don't have a good estimate on the available
+    // network bandwidth since we haven't tranferred any data yet. Once
+    // we have we can make a better informed choice.
+    size_t initialBandwidth = 0;
+    size_t initialBandwidthIndex = 0;
+
+    if (mPlaylist->isVariantPlaylist()) {
+        for (size_t i = 0; i < mPlaylist->size(); ++i) {
             BandwidthItem item;
 
+            item.mPlaylistIndex = i;
+
             sp<AMessage> meta;
-            playlist->itemAt(i, &item.mURI, &meta);
+            AString uri;
+            mPlaylist->itemAt(i, &uri, &meta);
 
             unsigned long bandwidth;
             CHECK(meta->findInt32("bandwidth", (int32_t *)&item.mBandwidth));
 
+            if (initialBandwidth == 0) {
+                initialBandwidth = item.mBandwidth;
+            }
+
             mBandwidthItems.push(item);
         }
 
         CHECK_GT(mBandwidthItems.size(), 0u);
 
         mBandwidthItems.sort(SortByBandwidth);
+
+        for (size_t i = 0; i < mBandwidthItems.size(); ++i) {
+            if (mBandwidthItems.itemAt(i).mBandwidth == initialBandwidth) {
+                initialBandwidthIndex = i;
+                break;
+            }
+        }
+    } else {
+        // dummy item.
+        BandwidthItem item;
+        item.mPlaylistIndex = 0;
+        item.mBandwidth = 0;
+        mBandwidthItems.push(item);
     }
 
-    postMonitorQueue();
+    changeConfiguration(0ll /* timeUs */, initialBandwidthIndex);
 }
 
-void LiveSession::onDisconnect() {
-    ALOGI("onDisconnect");
+void LiveSession::finishDisconnect() {
+    // No reconfiguration is currently pending, make sure none will trigger
+    // during disconnection either.
+    cancelCheckBandwidthEvent();
 
-    signalEOS(ERROR_END_OF_STREAM);
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        mFetcherInfos.valueAt(i).mFetcher->stopAsync();
+    }
 
-    Mutex::Autolock autoLock(mLock);
-    mDisconnectPending = false;
+    sp<AMessage> msg = new AMessage(kWhatFinishDisconnect2, id());
+
+    mContinuationCounter = mFetcherInfos.size();
+    mContinuation = msg;
+
+    if (mContinuationCounter == 0) {
+        msg->post();
+    }
+}
+
+void LiveSession::onFinishDisconnect2() {
+    mContinuation.clear();
+
+    mPacketSources.valueFor(STREAMTYPE_AUDIO)->signalEOS(ERROR_END_OF_STREAM);
+    mPacketSources.valueFor(STREAMTYPE_VIDEO)->signalEOS(ERROR_END_OF_STREAM);
+
+    mPacketSources.valueFor(
+            STREAMTYPE_SUBTITLES)->signalEOS(ERROR_END_OF_STREAM);
+
+    sp<AMessage> response = new AMessage;
+    response->setInt32("err", OK);
+
+    response->postReply(mDisconnectReplyID);
+    mDisconnectReplyID = 0;
+}
+
+sp<PlaylistFetcher> LiveSession::addFetcher(const char *uri) {
+    ssize_t index = mFetcherInfos.indexOfKey(uri);
+
+    if (index >= 0) {
+        return NULL;
+    }
+
+    sp<AMessage> notify = new AMessage(kWhatFetcherNotify, id());
+    notify->setString("uri", uri);
+
+    FetcherInfo info;
+    info.mFetcher = new PlaylistFetcher(notify, this, uri);
+    info.mDurationUs = -1ll;
+    info.mIsPrepared = false;
+    looper()->registerHandler(info.mFetcher);
+
+    mFetcherInfos.add(uri, info);
+
+    return info.mFetcher;
 }
 
 status_t LiveSession::fetchFile(
@@ -229,14 +510,6 @@
             && strncasecmp(url, "https://", 8)) {
         return ERROR_UNSUPPORTED;
     } else {
-        {
-            Mutex::Autolock autoLock(mLock);
-
-            if (mDisconnectPending) {
-                return ERROR_IO;
-            }
-        }
-
         KeyedVector<String8, String8> headers = mExtraHeaders;
         if (range_offset > 0 || range_length >= 0) {
             headers.add(
@@ -315,7 +588,8 @@
     return OK;
 }
 
-sp<M3UParser> LiveSession::fetchPlaylist(const char *url, bool *unchanged) {
+sp<M3UParser> LiveSession::fetchPlaylist(
+        const char *url, uint8_t *curPlaylistHash, bool *unchanged) {
     ALOGV("fetchPlaylist '%s'", url);
 
     *unchanged = false;
@@ -339,13 +613,8 @@
 
     MD5_Final(hash, &m);
 
-    if (mPlaylist != NULL && !memcmp(hash, mPlaylistHash, 16)) {
+    if (curPlaylistHash != NULL && !memcmp(hash, curPlaylistHash, 16)) {
         // playlist unchanged
-
-        if (mRefreshState != THIRD_UNCHANGED_RELOAD_ATTEMPT) {
-            mRefreshState = (RefreshState)(mRefreshState + 1);
-        }
-
         *unchanged = true;
 
         ALOGV("Playlist unchanged, refresh state is now %d",
@@ -354,9 +623,9 @@
         return NULL;
     }
 
-    memcpy(mPlaylistHash, hash, sizeof(hash));
-
-    mRefreshState = INITIAL_MINIMUM_RELOAD_DELAY;
+    if (curPlaylistHash != NULL) {
+        memcpy(curPlaylistHash, hash, sizeof(hash));
+    }
 #endif
 
     sp<M3UParser> playlist =
@@ -371,37 +640,6 @@
     return playlist;
 }
 
-int64_t LiveSession::getSegmentStartTimeUs(int32_t seqNumber) const {
-    CHECK(mPlaylist != NULL);
-
-    int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                "media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-
-    int32_t lastSeqNumberInPlaylist =
-        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
-
-    CHECK_GE(seqNumber, firstSeqNumberInPlaylist);
-    CHECK_LE(seqNumber, lastSeqNumberInPlaylist);
-
-    int64_t segmentStartUs = 0ll;
-    for (int32_t index = 0;
-            index < seqNumber - firstSeqNumberInPlaylist; ++index) {
-        sp<AMessage> itemMeta;
-        CHECK(mPlaylist->itemAt(
-                    index, NULL /* uri */, &itemMeta));
-
-        int64_t itemDurationUs;
-        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-        segmentStartUs += itemDurationUs;
-    }
-
-    return segmentStartUs;
-}
-
 static double uniformRand() {
     return (double)rand() / RAND_MAX;
 }
@@ -412,36 +650,50 @@
     }
 
 #if 1
-    int32_t bandwidthBps;
-    if (mHTTPDataSource != NULL
-            && mHTTPDataSource->estimateBandwidth(&bandwidthBps)) {
-        ALOGV("bandwidth estimated at %.2f kbps", bandwidthBps / 1024.0f);
-    } else {
-        ALOGV("no bandwidth estimate.");
-        return 0;  // Pick the lowest bandwidth stream by default.
-    }
-
     char value[PROPERTY_VALUE_MAX];
-    if (property_get("media.httplive.max-bw", value, NULL)) {
+    ssize_t index = -1;
+    if (property_get("media.httplive.bw-index", value, NULL)) {
         char *end;
-        long maxBw = strtoul(value, &end, 10);
-        if (end > value && *end == '\0') {
-            if (maxBw > 0 && bandwidthBps > maxBw) {
-                ALOGV("bandwidth capped to %ld bps", maxBw);
-                bandwidthBps = maxBw;
-            }
+        index = strtol(value, &end, 10);
+        CHECK(end > value && *end == '\0');
+
+        if (index >= 0 && (size_t)index >= mBandwidthItems.size()) {
+            index = mBandwidthItems.size() - 1;
         }
     }
 
-    // Consider only 80% of the available bandwidth usable.
-    bandwidthBps = (bandwidthBps * 8) / 10;
+    if (index < 0) {
+        int32_t bandwidthBps;
+        if (mHTTPDataSource != NULL
+                && mHTTPDataSource->estimateBandwidth(&bandwidthBps)) {
+            ALOGV("bandwidth estimated at %.2f kbps", bandwidthBps / 1024.0f);
+        } else {
+            ALOGV("no bandwidth estimate.");
+            return 0;  // Pick the lowest bandwidth stream by default.
+        }
 
-    // Pick the highest bandwidth stream below or equal to estimated bandwidth.
+        char value[PROPERTY_VALUE_MAX];
+        if (property_get("media.httplive.max-bw", value, NULL)) {
+            char *end;
+            long maxBw = strtoul(value, &end, 10);
+            if (end > value && *end == '\0') {
+                if (maxBw > 0 && bandwidthBps > maxBw) {
+                    ALOGV("bandwidth capped to %ld bps", maxBw);
+                    bandwidthBps = maxBw;
+                }
+            }
+        }
 
-    size_t index = mBandwidthItems.size() - 1;
-    while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth
-                            > (size_t)bandwidthBps) {
-        --index;
+        // Consider only 80% of the available bandwidth usable.
+        bandwidthBps = (bandwidthBps * 8) / 10;
+
+        // Pick the highest bandwidth stream below or equal to estimated bandwidth.
+
+        index = mBandwidthItems.size() - 1;
+        while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth
+                                > (size_t)bandwidthBps) {
+            --index;
+        }
     }
 #elif 0
     // Change bandwidth at random()
@@ -452,6 +704,8 @@
     // to lowest)
     const size_t kMinIndex = 0;
 
+    static ssize_t mPrevBandwidthIndex = -1;
+
     size_t index;
     if (mPrevBandwidthIndex < 0) {
         index = kMinIndex;
@@ -463,6 +717,7 @@
             index = kMinIndex;
         }
     }
+    mPrevBandwidthIndex = index;
 #elif 0
     // Pick the highest bandwidth stream below or equal to 1.2 Mbit/sec
 
@@ -470,559 +725,51 @@
     while (index > 0 && mBandwidthItems.itemAt(index).mBandwidth > 1200000) {
         --index;
     }
+#elif 1
+    char value[PROPERTY_VALUE_MAX];
+    size_t index;
+    if (property_get("media.httplive.bw-index", value, NULL)) {
+        char *end;
+        index = strtoul(value, &end, 10);
+        CHECK(end > value && *end == '\0');
+
+        if (index >= mBandwidthItems.size()) {
+            index = mBandwidthItems.size() - 1;
+        }
+    } else {
+        index = 0;
+    }
 #else
     size_t index = mBandwidthItems.size() - 1;  // Highest bandwidth stream
 #endif
 
+    CHECK_GE(index, 0);
+
     return index;
 }
 
-bool LiveSession::timeToRefreshPlaylist(int64_t nowUs) const {
-    if (mPlaylist == NULL) {
-        CHECK_EQ((int)mRefreshState, (int)INITIAL_MINIMUM_RELOAD_DELAY);
-        return true;
+status_t LiveSession::onSeek(const sp<AMessage> &msg) {
+    int64_t timeUs;
+    CHECK(msg->findInt64("timeUs", &timeUs));
+
+    if (!mReconfigurationInProgress) {
+        changeConfiguration(timeUs, getBandwidthIndex());
     }
 
-    int32_t targetDurationSecs;
-    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
-
-    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
-
-    int64_t minPlaylistAgeUs;
-
-    switch (mRefreshState) {
-        case INITIAL_MINIMUM_RELOAD_DELAY:
-        {
-            size_t n = mPlaylist->size();
-            if (n > 0) {
-                sp<AMessage> itemMeta;
-                CHECK(mPlaylist->itemAt(n - 1, NULL /* uri */, &itemMeta));
-
-                int64_t itemDurationUs;
-                CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-                minPlaylistAgeUs = itemDurationUs;
-                break;
-            }
-
-            // fall through
-        }
-
-        case FIRST_UNCHANGED_RELOAD_ATTEMPT:
-        {
-            minPlaylistAgeUs = targetDurationUs / 2;
-            break;
-        }
-
-        case SECOND_UNCHANGED_RELOAD_ATTEMPT:
-        {
-            minPlaylistAgeUs = (targetDurationUs * 3) / 2;
-            break;
-        }
-
-        case THIRD_UNCHANGED_RELOAD_ATTEMPT:
-        {
-            minPlaylistAgeUs = targetDurationUs * 3;
-            break;
-        }
-
-        default:
-            TRESPASS();
-            break;
-    }
-
-    return mLastPlaylistFetchTimeUs + minPlaylistAgeUs <= nowUs;
-}
-
-void LiveSession::onDownloadNext() {
-    size_t bandwidthIndex = getBandwidthIndex();
-
-rinse_repeat:
-    int64_t nowUs = ALooper::GetNowUs();
-
-    if (mLastPlaylistFetchTimeUs < 0
-            || (ssize_t)bandwidthIndex != mPrevBandwidthIndex
-            || (!mPlaylist->isComplete() && timeToRefreshPlaylist(nowUs))) {
-        AString url;
-        if (mBandwidthItems.size() > 0) {
-            url = mBandwidthItems.editItemAt(bandwidthIndex).mURI;
-        } else {
-            url = mMasterURL;
-        }
-
-        if ((ssize_t)bandwidthIndex != mPrevBandwidthIndex) {
-            // If we switch bandwidths, do not pay any heed to whether
-            // playlists changed since the last time...
-            mPlaylist.clear();
-        }
-
-        bool unchanged;
-        sp<M3UParser> playlist = fetchPlaylist(url.c_str(), &unchanged);
-        if (playlist == NULL) {
-            if (unchanged) {
-                // We succeeded in fetching the playlist, but it was
-                // unchanged from the last time we tried.
-            } else {
-                ALOGE("failed to load playlist at url '%s'", url.c_str());
-                signalEOS(ERROR_IO);
-
-                return;
-            }
-        } else {
-            mPlaylist = playlist;
-        }
-
-        if (!mDurationFixed) {
-            Mutex::Autolock autoLock(mLock);
-
-            if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
-                mDurationUs = -1;
-                mDurationFixed = true;
-            } else {
-                mDurationUs = 0;
-                for (size_t i = 0; i < mPlaylist->size(); ++i) {
-                    sp<AMessage> itemMeta;
-                    CHECK(mPlaylist->itemAt(
-                                i, NULL /* uri */, &itemMeta));
-
-                    int64_t itemDurationUs;
-                    CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-                    mDurationUs += itemDurationUs;
-                }
-
-                mDurationFixed = mPlaylist->isComplete();
-            }
-        }
-
-        mLastPlaylistFetchTimeUs = ALooper::GetNowUs();
-    }
-
-    int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                "media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-
-    bool seekDiscontinuity = false;
-    bool explicitDiscontinuity = false;
-    bool bandwidthChanged = false;
-
-    if (mSeekTimeUs >= 0) {
-        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-            size_t index = 0;
-            int64_t segmentStartUs = 0;
-            while (index < mPlaylist->size()) {
-                sp<AMessage> itemMeta;
-                CHECK(mPlaylist->itemAt(
-                            index, NULL /* uri */, &itemMeta));
-
-                int64_t itemDurationUs;
-                CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-                if (mSeekTimeUs < segmentStartUs + itemDurationUs) {
-                    break;
-                }
-
-                segmentStartUs += itemDurationUs;
-                ++index;
-            }
-
-            if (index < mPlaylist->size()) {
-                int32_t newSeqNumber = firstSeqNumberInPlaylist + index;
-
-                ALOGI("seeking to seq no %d", newSeqNumber);
-
-                mSeqNumber = newSeqNumber;
-
-                mDataSource->reset();
-
-                // reseting the data source will have had the
-                // side effect of discarding any previously queued
-                // bandwidth change discontinuity.
-                // Therefore we'll need to treat these seek
-                // discontinuities as involving a bandwidth change
-                // even if they aren't directly.
-                seekDiscontinuity = true;
-                bandwidthChanged = true;
-            }
-        }
-
-        mSeekTimeUs = -1;
-
-        Mutex::Autolock autoLock(mLock);
-        mSeekDone = true;
-        mCondition.broadcast();
-    }
-
-    const int32_t lastSeqNumberInPlaylist =
-        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
-
-    if (mSeqNumber < 0) {
-        if (mPlaylist->isComplete()) {
-            mSeqNumber = firstSeqNumberInPlaylist;
-        } else {
-            // If this is a live session, start 3 segments from the end.
-            mSeqNumber = lastSeqNumberInPlaylist - 3;
-            if (mSeqNumber < firstSeqNumberInPlaylist) {
-                mSeqNumber = firstSeqNumberInPlaylist;
-            }
-        }
-    }
-
-    if (mSeqNumber < firstSeqNumberInPlaylist
-            || mSeqNumber > lastSeqNumberInPlaylist) {
-        if (mPrevBandwidthIndex != (ssize_t)bandwidthIndex) {
-            // Go back to the previous bandwidth.
-
-            ALOGI("new bandwidth does not have the sequence number "
-                 "we're looking for, switching back to previous bandwidth");
-
-            mLastPlaylistFetchTimeUs = -1;
-            bandwidthIndex = mPrevBandwidthIndex;
-            goto rinse_repeat;
-        }
-
-        if (!mPlaylist->isComplete() && mNumRetries < kMaxNumRetries) {
-            ++mNumRetries;
-
-            if (mSeqNumber > lastSeqNumberInPlaylist) {
-                mLastPlaylistFetchTimeUs = -1;
-                postMonitorQueue(3000000ll);
-                return;
-            }
-
-            // we've missed the boat, let's start from the lowest sequence
-            // number available and signal a discontinuity.
-
-            ALOGI("We've missed the boat, restarting playback.");
-            mSeqNumber = lastSeqNumberInPlaylist;
-            explicitDiscontinuity = true;
-
-            // fall through
-        } else {
-            ALOGE("Cannot find sequence number %d in playlist "
-                 "(contains %d - %d)",
-                 mSeqNumber, firstSeqNumberInPlaylist,
-                 firstSeqNumberInPlaylist + mPlaylist->size() - 1);
-
-            signalEOS(ERROR_END_OF_STREAM);
-            return;
-        }
-    }
-
-    mNumRetries = 0;
-
-    AString uri;
-    sp<AMessage> itemMeta;
-    CHECK(mPlaylist->itemAt(
-                mSeqNumber - firstSeqNumberInPlaylist,
-                &uri,
-                &itemMeta));
-
-    int32_t val;
-    if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
-        explicitDiscontinuity = true;
-    }
-
-    int64_t range_offset, range_length;
-    if (!itemMeta->findInt64("range-offset", &range_offset)
-            || !itemMeta->findInt64("range-length", &range_length)) {
-        range_offset = 0;
-        range_length = -1;
-    }
-
-    ALOGV("fetching segment %d from (%d .. %d)",
-          mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
-
-    sp<ABuffer> buffer;
-    status_t err = fetchFile(uri.c_str(), &buffer, range_offset, range_length);
-    if (err != OK) {
-        ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
-        signalEOS(err);
-        return;
-    }
-
-    CHECK(buffer != NULL);
-
-    err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, buffer);
-
-    if (err != OK) {
-        ALOGE("decryptBuffer failed w/ error %d", err);
-
-        signalEOS(err);
-        return;
-    }
-
-    if (buffer->size() == 0 || buffer->data()[0] != 0x47) {
-        // Not a transport stream???
-
-        ALOGE("This doesn't look like a transport stream...");
-
-        mBandwidthItems.removeAt(bandwidthIndex);
-
-        if (mBandwidthItems.isEmpty()) {
-            signalEOS(ERROR_UNSUPPORTED);
-            return;
-        }
-
-        ALOGI("Retrying with a different bandwidth stream.");
-
-        mLastPlaylistFetchTimeUs = -1;
-        bandwidthIndex = getBandwidthIndex();
-        mPrevBandwidthIndex = bandwidthIndex;
-        mSeqNumber = -1;
-
-        goto rinse_repeat;
-    }
-
-    if ((size_t)mPrevBandwidthIndex != bandwidthIndex) {
-        bandwidthChanged = true;
-    }
-
-    if (mPrevBandwidthIndex < 0) {
-        // Don't signal a bandwidth change at the very beginning of
-        // playback.
-        bandwidthChanged = false;
-    }
-
-    if (mStartOfPlayback) {
-        seekDiscontinuity = true;
-        mStartOfPlayback = false;
-    }
-
-    if (seekDiscontinuity || explicitDiscontinuity || bandwidthChanged) {
-        // Signal discontinuity.
-
-        ALOGI("queueing discontinuity (seek=%d, explicit=%d, bandwidthChanged=%d)",
-             seekDiscontinuity, explicitDiscontinuity, bandwidthChanged);
-
-        sp<ABuffer> tmp = new ABuffer(188);
-        memset(tmp->data(), 0, tmp->size());
-
-        // signal a 'hard' discontinuity for explicit or bandwidthChanged.
-        uint8_t type = (explicitDiscontinuity || bandwidthChanged) ? 1 : 0;
-
-        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-            // If this was a live event this made no sense since
-            // we don't have access to all the segment before the current
-            // one.
-            int64_t segmentStartTimeUs = getSegmentStartTimeUs(mSeqNumber);
-            memcpy(tmp->data() + 2, &segmentStartTimeUs, sizeof(segmentStartTimeUs));
-
-            type |= 2;
-        }
-
-        tmp->data()[1] = type;
-
-        mDataSource->queueBuffer(tmp);
-    }
-
-    mDataSource->queueBuffer(buffer);
-
-    mPrevBandwidthIndex = bandwidthIndex;
-    ++mSeqNumber;
-
-    postMonitorQueue();
-}
-
-void LiveSession::signalEOS(status_t err) {
-    if (mInPreparationPhase && mNotify != NULL) {
-        sp<AMessage> notify = mNotify->dup();
-
-        notify->setInt32(
-                "what",
-                err == ERROR_END_OF_STREAM
-                    ? kWhatPrepared : kWhatPreparationFailed);
-
-        if (err != ERROR_END_OF_STREAM) {
-            notify->setInt32("err", err);
-        }
-
-        notify->post();
-
-        mInPreparationPhase = false;
-    }
-
-    mDataSource->queueEOS(err);
-}
-
-void LiveSession::onMonitorQueue() {
-    if (mSeekTimeUs >= 0
-            || mDataSource->countQueuedBuffers() < kMaxNumQueuedFragments) {
-        onDownloadNext();
-    } else {
-        if (mInPreparationPhase) {
-            if (mNotify != NULL) {
-                sp<AMessage> notify = mNotify->dup();
-                notify->setInt32("what", kWhatPrepared);
-                notify->post();
-            }
-
-            mInPreparationPhase = false;
-        }
-
-        postMonitorQueue(1000000ll);
-    }
-}
-
-status_t LiveSession::decryptBuffer(
-        size_t playlistIndex, const sp<ABuffer> &buffer) {
-    sp<AMessage> itemMeta;
-    bool found = false;
-    AString method;
-
-    for (ssize_t i = playlistIndex; i >= 0; --i) {
-        AString uri;
-        CHECK(mPlaylist->itemAt(i, &uri, &itemMeta));
-
-        if (itemMeta->findString("cipher-method", &method)) {
-            found = true;
-            break;
-        }
-    }
-
-    if (!found) {
-        method = "NONE";
-    }
-
-    if (method == "NONE") {
-        return OK;
-    } else if (!(method == "AES-128")) {
-        ALOGE("Unsupported cipher method '%s'", method.c_str());
-        return ERROR_UNSUPPORTED;
-    }
-
-    AString keyURI;
-    if (!itemMeta->findString("cipher-uri", &keyURI)) {
-        ALOGE("Missing key uri");
-        return ERROR_MALFORMED;
-    }
-
-    ssize_t index = mAESKeyForURI.indexOfKey(keyURI);
-
-    sp<ABuffer> key;
-    if (index >= 0) {
-        key = mAESKeyForURI.valueAt(index);
-    } else {
-        key = new ABuffer(16);
-
-        sp<HTTPBase> keySource =
-              HTTPBase::Create(
-                  (mFlags & kFlagIncognito)
-                    ? HTTPBase::kFlagIncognito
-                    : 0);
-
-        if (mUIDValid) {
-            keySource->setUID(mUID);
-        }
-
-        status_t err =
-            keySource->connect(
-                    keyURI.c_str(),
-                    mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders);
-
-        if (err == OK) {
-            size_t offset = 0;
-            while (offset < 16) {
-                ssize_t n = keySource->readAt(
-                        offset, key->data() + offset, 16 - offset);
-                if (n <= 0) {
-                    err = ERROR_IO;
-                    break;
-                }
-
-                offset += n;
-            }
-        }
-
-        if (err != OK) {
-            ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
-            return ERROR_IO;
-        }
-
-        mAESKeyForURI.add(keyURI, key);
-    }
-
-    AES_KEY aes_key;
-    if (AES_set_decrypt_key(key->data(), 128, &aes_key) != 0) {
-        ALOGE("failed to set AES decryption key.");
-        return UNKNOWN_ERROR;
-    }
-
-    unsigned char aes_ivec[16];
-
-    AString iv;
-    if (itemMeta->findString("cipher-iv", &iv)) {
-        if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
-                || iv.size() != 16 * 2 + 2) {
-            ALOGE("malformed cipher IV '%s'.", iv.c_str());
-            return ERROR_MALFORMED;
-        }
-
-        memset(aes_ivec, 0, sizeof(aes_ivec));
-        for (size_t i = 0; i < 16; ++i) {
-            char c1 = tolower(iv.c_str()[2 + 2 * i]);
-            char c2 = tolower(iv.c_str()[3 + 2 * i]);
-            if (!isxdigit(c1) || !isxdigit(c2)) {
-                ALOGE("malformed cipher IV '%s'.", iv.c_str());
-                return ERROR_MALFORMED;
-            }
-            uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
-            uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
-
-            aes_ivec[i] = nibble1 << 4 | nibble2;
-        }
-    } else {
-        memset(aes_ivec, 0, sizeof(aes_ivec));
-        aes_ivec[15] = mSeqNumber & 0xff;
-        aes_ivec[14] = (mSeqNumber >> 8) & 0xff;
-        aes_ivec[13] = (mSeqNumber >> 16) & 0xff;
-        aes_ivec[12] = (mSeqNumber >> 24) & 0xff;
-    }
-
-    AES_cbc_encrypt(
-            buffer->data(), buffer->data(), buffer->size(),
-            &aes_key, aes_ivec, AES_DECRYPT);
-
-    // hexdump(buffer->data(), buffer->size());
-
-    size_t n = buffer->size();
-    CHECK_GT(n, 0u);
-
-    size_t pad = buffer->data()[n - 1];
-
-    CHECK_GT(pad, 0u);
-    CHECK_LE(pad, 16u);
-    CHECK_GE((size_t)n, pad);
-    for (size_t i = 0; i < pad; ++i) {
-        CHECK_EQ((unsigned)buffer->data()[n - 1 - i], pad);
-    }
-
-    n -= pad;
-
-    buffer->setRange(buffer->offset(), n);
-
     return OK;
 }
 
-void LiveSession::postMonitorQueue(int64_t delayUs) {
-    sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
-    msg->setInt32("generation", ++mMonitorQueueGeneration);
-    msg->post(delayUs);
-}
-
-void LiveSession::onSeek(const sp<AMessage> &msg) {
-    int64_t timeUs;
-    CHECK(msg->findInt64("timeUs", &timeUs));
-
-    mSeekTimeUs = timeUs;
-    postMonitorQueue();
-}
-
 status_t LiveSession::getDuration(int64_t *durationUs) const {
-    Mutex::Autolock autoLock(mLock);
-    *durationUs = mDurationUs;
+    int64_t maxDurationUs = 0ll;
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        int64_t fetcherDurationUs = mFetcherInfos.valueAt(i).mDurationUs;
+
+        if (fetcherDurationUs >= 0ll && fetcherDurationUs > maxDurationUs) {
+            maxDurationUs = fetcherDurationUs;
+        }
+    }
+
+    *durationUs = maxDurationUs;
 
     return OK;
 }
@@ -1033,7 +780,326 @@
 }
 
 bool LiveSession::hasDynamicDuration() const {
-    return !mDurationFixed;
+    return false;
+}
+
+void LiveSession::changeConfiguration(int64_t timeUs, size_t bandwidthIndex) {
+    CHECK(!mReconfigurationInProgress);
+    mReconfigurationInProgress = true;
+
+    mPrevBandwidthIndex = bandwidthIndex;
+
+    ALOGV("changeConfiguration => timeUs:%lld us, bwIndex:%d",
+          timeUs, bandwidthIndex);
+
+    mPlaylist->pickRandomMediaItems();
+
+    CHECK_LT(bandwidthIndex, mBandwidthItems.size());
+    const BandwidthItem &item = mBandwidthItems.itemAt(bandwidthIndex);
+
+    uint32_t streamMask = 0;
+
+    AString audioURI;
+    if (mPlaylist->getAudioURI(item.mPlaylistIndex, &audioURI)) {
+        streamMask |= STREAMTYPE_AUDIO;
+    }
+
+    AString videoURI;
+    if (mPlaylist->getVideoURI(item.mPlaylistIndex, &videoURI)) {
+        streamMask |= STREAMTYPE_VIDEO;
+    }
+
+    AString subtitleURI;
+    if (mPlaylist->getSubtitleURI(item.mPlaylistIndex, &subtitleURI)) {
+        streamMask |= STREAMTYPE_SUBTITLES;
+    }
+
+    // Step 1, stop and discard fetchers that are no longer needed.
+    // Pause those that we'll reuse.
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        const AString &uri = mFetcherInfos.keyAt(i);
+
+        bool discardFetcher = true;
+
+        // If we're seeking all current fetchers are discarded.
+        if (timeUs < 0ll) {
+            if (((streamMask & STREAMTYPE_AUDIO) && uri == audioURI)
+                    || ((streamMask & STREAMTYPE_VIDEO) && uri == videoURI)
+                    || ((streamMask & STREAMTYPE_SUBTITLES) && uri == subtitleURI)) {
+                discardFetcher = false;
+            }
+        }
+
+        if (discardFetcher) {
+            mFetcherInfos.valueAt(i).mFetcher->stopAsync();
+        } else {
+            mFetcherInfos.valueAt(i).mFetcher->pauseAsync();
+        }
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatChangeConfiguration2, id());
+    msg->setInt32("streamMask", streamMask);
+    msg->setInt64("timeUs", timeUs);
+    if (streamMask & STREAMTYPE_AUDIO) {
+        msg->setString("audioURI", audioURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_VIDEO) {
+        msg->setString("videoURI", videoURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_SUBTITLES) {
+        msg->setString("subtitleURI", subtitleURI.c_str());
+    }
+
+    // Every time a fetcher acknowledges the stopAsync or pauseAsync request
+    // we'll decrement mContinuationCounter, once it reaches zero, i.e. all
+    // fetchers have completed their asynchronous operation, we'll post
+    // mContinuation, which then is handled below in onChangeConfiguration2.
+    mContinuationCounter = mFetcherInfos.size();
+    mContinuation = msg;
+
+    if (mContinuationCounter == 0) {
+        msg->post();
+    }
+}
+
+void LiveSession::onChangeConfiguration2(const sp<AMessage> &msg) {
+    mContinuation.clear();
+
+    // All fetchers are either suspended or have been removed now.
+
+    uint32_t streamMask;
+    CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
+
+    AString audioURI, videoURI, subtitleURI;
+    if (streamMask & STREAMTYPE_AUDIO) {
+        CHECK(msg->findString("audioURI", &audioURI));
+        ALOGV("audioURI = '%s'", audioURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_VIDEO) {
+        CHECK(msg->findString("videoURI", &videoURI));
+        ALOGV("videoURI = '%s'", videoURI.c_str());
+    }
+    if (streamMask & STREAMTYPE_SUBTITLES) {
+        CHECK(msg->findString("subtitleURI", &subtitleURI));
+        ALOGV("subtitleURI = '%s'", subtitleURI.c_str());
+    }
+
+    // Determine which decoders to shutdown on the player side,
+    // a decoder has to be shutdown if either
+    // 1) its streamtype was active before but now longer isn't.
+    // or
+    // 2) its streamtype was already active and still is but the URI
+    //    has changed.
+    uint32_t changedMask = 0;
+    if (((mStreamMask & streamMask & STREAMTYPE_AUDIO)
+                && !(audioURI == mAudioURI))
+        || (mStreamMask & ~streamMask & STREAMTYPE_AUDIO)) {
+        changedMask |= STREAMTYPE_AUDIO;
+    }
+    if (((mStreamMask & streamMask & STREAMTYPE_VIDEO)
+                && !(videoURI == mVideoURI))
+        || (mStreamMask & ~streamMask & STREAMTYPE_VIDEO)) {
+        changedMask |= STREAMTYPE_VIDEO;
+    }
+
+    if (changedMask == 0) {
+        // If nothing changed as far as the audio/video decoders
+        // are concerned we can proceed.
+        onChangeConfiguration3(msg);
+        return;
+    }
+
+    // Something changed, inform the player which will shutdown the
+    // corresponding decoders and will post the reply once that's done.
+    // Handling the reply will continue executing below in
+    // onChangeConfiguration3.
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatStreamsChanged);
+    notify->setInt32("changedMask", changedMask);
+
+    msg->setWhat(kWhatChangeConfiguration3);
+    msg->setTarget(id());
+
+    notify->setMessage("reply", msg);
+    notify->post();
+}
+
+void LiveSession::onChangeConfiguration3(const sp<AMessage> &msg) {
+    // All remaining fetchers are still suspended, the player has shutdown
+    // any decoders that needed it.
+
+    uint32_t streamMask;
+    CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
+
+    AString audioURI, videoURI, subtitleURI;
+    if (streamMask & STREAMTYPE_AUDIO) {
+        CHECK(msg->findString("audioURI", &audioURI));
+    }
+    if (streamMask & STREAMTYPE_VIDEO) {
+        CHECK(msg->findString("videoURI", &videoURI));
+    }
+    if (streamMask & STREAMTYPE_SUBTITLES) {
+        CHECK(msg->findString("subtitleURI", &subtitleURI));
+    }
+
+    int64_t timeUs;
+    CHECK(msg->findInt64("timeUs", &timeUs));
+
+    if (timeUs < 0ll) {
+        timeUs = mLastDequeuedTimeUs;
+    }
+
+    mStreamMask = streamMask;
+    mAudioURI = audioURI;
+    mVideoURI = videoURI;
+    mSubtitleURI = subtitleURI;
+
+    // Resume all existing fetchers and assign them packet sources.
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        const AString &uri = mFetcherInfos.keyAt(i);
+
+        uint32_t resumeMask = 0;
+
+        sp<AnotherPacketSource> audioSource;
+        if ((streamMask & STREAMTYPE_AUDIO) && uri == audioURI) {
+            audioSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
+            resumeMask |= STREAMTYPE_AUDIO;
+        }
+
+        sp<AnotherPacketSource> videoSource;
+        if ((streamMask & STREAMTYPE_VIDEO) && uri == videoURI) {
+            videoSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
+            resumeMask |= STREAMTYPE_VIDEO;
+        }
+
+        sp<AnotherPacketSource> subtitleSource;
+        if ((streamMask & STREAMTYPE_SUBTITLES) && uri == subtitleURI) {
+            subtitleSource = mPacketSources.valueFor(STREAMTYPE_SUBTITLES);
+            resumeMask |= STREAMTYPE_SUBTITLES;
+        }
+
+        CHECK_NE(resumeMask, 0u);
+
+        ALOGV("resuming fetchers for mask 0x%08x", resumeMask);
+
+        streamMask &= ~resumeMask;
+
+        mFetcherInfos.valueAt(i).mFetcher->startAsync(
+                audioSource, videoSource, subtitleSource);
+    }
+
+    // streamMask now only contains the types that need a new fetcher created.
+
+    if (streamMask != 0) {
+        ALOGV("creating new fetchers for mask 0x%08x", streamMask);
+    }
+
+    while (streamMask != 0) {
+        StreamType streamType = (StreamType)(streamMask & ~(streamMask - 1));
+
+        AString uri;
+        switch (streamType) {
+            case STREAMTYPE_AUDIO:
+                uri = audioURI;
+                break;
+            case STREAMTYPE_VIDEO:
+                uri = videoURI;
+                break;
+            case STREAMTYPE_SUBTITLES:
+                uri = subtitleURI;
+                break;
+            default:
+                TRESPASS();
+        }
+
+        sp<PlaylistFetcher> fetcher = addFetcher(uri.c_str());
+        CHECK(fetcher != NULL);
+
+        sp<AnotherPacketSource> audioSource;
+        if ((streamMask & STREAMTYPE_AUDIO) && uri == audioURI) {
+            audioSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
+            audioSource->clear();
+
+            streamMask &= ~STREAMTYPE_AUDIO;
+        }
+
+        sp<AnotherPacketSource> videoSource;
+        if ((streamMask & STREAMTYPE_VIDEO) && uri == videoURI) {
+            videoSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
+            videoSource->clear();
+
+            streamMask &= ~STREAMTYPE_VIDEO;
+        }
+
+        sp<AnotherPacketSource> subtitleSource;
+        if ((streamMask & STREAMTYPE_SUBTITLES) && uri == subtitleURI) {
+            subtitleSource = mPacketSources.valueFor(STREAMTYPE_SUBTITLES);
+            subtitleSource->clear();
+
+            streamMask &= ~STREAMTYPE_SUBTITLES;
+        }
+
+        fetcher->startAsync(audioSource, videoSource, subtitleSource, timeUs);
+    }
+
+    // All fetchers have now been started, the configuration change
+    // has completed.
+
+    scheduleCheckBandwidthEvent();
+
+    ALOGV("XXX configuration change completed.");
+
+    mReconfigurationInProgress = false;
+
+    if (mDisconnectReplyID != 0) {
+        finishDisconnect();
+    }
+}
+
+void LiveSession::scheduleCheckBandwidthEvent() {
+    sp<AMessage> msg = new AMessage(kWhatCheckBandwidth, id());
+    msg->setInt32("generation", mCheckBandwidthGeneration);
+    msg->post(10000000ll);
+}
+
+void LiveSession::cancelCheckBandwidthEvent() {
+    ++mCheckBandwidthGeneration;
+}
+
+void LiveSession::onCheckBandwidth() {
+    if (mReconfigurationInProgress) {
+        scheduleCheckBandwidthEvent();
+        return;
+    }
+
+    size_t bandwidthIndex = getBandwidthIndex();
+    if (mPrevBandwidthIndex < 0
+            || bandwidthIndex != (size_t)mPrevBandwidthIndex) {
+        changeConfiguration(-1ll /* timeUs */, bandwidthIndex);
+    }
+
+    // Handling the kWhatCheckBandwidth even here does _not_ automatically
+    // schedule another one on return, only an explicit call to
+    // scheduleCheckBandwidthEvent will do that.
+    // This ensures that only one configuration change is ongoing at any
+    // one time, once that completes it'll schedule another check bandwidth
+    // event.
+}
+
+void LiveSession::postPrepared(status_t err) {
+    CHECK(mInPreparationPhase);
+
+    sp<AMessage> notify = mNotify->dup();
+    if (err == OK || err == ERROR_END_OF_STREAM) {
+        notify->setInt32("what", kWhatPrepared);
+    } else {
+        notify->setInt32("what", kWhatPreparationFailed);
+        notify->setInt32("err", err);
+    }
+
+    notify->post();
+
+    mInPreparationPhase = false;
 }
 
 }  // namespace android
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
new file mode 100644
index 0000000..b134725
--- /dev/null
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIVE_SESSION_H_
+
+#define LIVE_SESSION_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include <utils/String8.h>
+
+namespace android {
+
+struct ABuffer;
+struct AnotherPacketSource;
+struct DataSource;
+struct HTTPBase;
+struct LiveDataSource;
+struct M3UParser;
+struct PlaylistFetcher;
+
+struct LiveSession : public AHandler {
+    enum Flags {
+        // Don't log any URLs.
+        kFlagIncognito = 1,
+    };
+    LiveSession(
+            const sp<AMessage> &notify,
+            uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
+
+    enum StreamType {
+        STREAMTYPE_AUDIO        = 1,
+        STREAMTYPE_VIDEO        = 2,
+        STREAMTYPE_SUBTITLES    = 4,
+    };
+    status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
+
+    status_t getStreamFormat(StreamType stream, sp<AMessage> *format);
+
+    void connectAsync(
+            const char *url,
+            const KeyedVector<String8, String8> *headers = NULL);
+
+    status_t disconnect();
+
+    // Blocks until seek is complete.
+    status_t seekTo(int64_t timeUs);
+
+    status_t getDuration(int64_t *durationUs) const;
+
+    bool isSeekable() const;
+    bool hasDynamicDuration() const;
+
+    enum {
+        kWhatStreamsChanged,
+        kWhatError,
+        kWhatPrepared,
+        kWhatPreparationFailed,
+    };
+
+protected:
+    virtual ~LiveSession();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    friend struct PlaylistFetcher;
+
+    enum {
+        kWhatConnect                    = 'conn',
+        kWhatDisconnect                 = 'disc',
+        kWhatSeek                       = 'seek',
+        kWhatFetcherNotify              = 'notf',
+        kWhatCheckBandwidth             = 'bndw',
+        kWhatChangeConfiguration2       = 'chC2',
+        kWhatChangeConfiguration3       = 'chC3',
+        kWhatFinishDisconnect2          = 'fin2',
+    };
+
+    struct BandwidthItem {
+        size_t mPlaylistIndex;
+        unsigned long mBandwidth;
+    };
+
+    struct FetcherInfo {
+        sp<PlaylistFetcher> mFetcher;
+        int64_t mDurationUs;
+        bool mIsPrepared;
+    };
+
+    sp<AMessage> mNotify;
+    uint32_t mFlags;
+    bool mUIDValid;
+    uid_t mUID;
+
+    bool mInPreparationPhase;
+
+    sp<HTTPBase> mHTTPDataSource;
+    KeyedVector<String8, String8> mExtraHeaders;
+
+    AString mMasterURL;
+
+    Vector<BandwidthItem> mBandwidthItems;
+    ssize_t mPrevBandwidthIndex;
+
+    sp<M3UParser> mPlaylist;
+
+    KeyedVector<AString, FetcherInfo> mFetcherInfos;
+    AString mAudioURI, mVideoURI, mSubtitleURI;
+    uint32_t mStreamMask;
+
+    KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources;
+
+    int32_t mCheckBandwidthGeneration;
+
+    size_t mContinuationCounter;
+    sp<AMessage> mContinuation;
+
+    int64_t mLastDequeuedTimeUs;
+
+    bool mReconfigurationInProgress;
+    uint32_t mDisconnectReplyID;
+
+    sp<PlaylistFetcher> addFetcher(const char *uri);
+
+    void onConnect(const sp<AMessage> &msg);
+    status_t onSeek(const sp<AMessage> &msg);
+    void onFinishDisconnect2();
+
+    status_t fetchFile(
+            const char *url, sp<ABuffer> *out,
+            int64_t range_offset = 0, int64_t range_length = -1);
+
+    sp<M3UParser> fetchPlaylist(
+            const char *url, uint8_t *curPlaylistHash, bool *unchanged);
+
+    size_t getBandwidthIndex();
+
+    static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
+
+    void changeConfiguration(int64_t timeUs, size_t bandwidthIndex);
+    void onChangeConfiguration2(const sp<AMessage> &msg);
+    void onChangeConfiguration3(const sp<AMessage> &msg);
+
+    void scheduleCheckBandwidthEvent();
+    void cancelCheckBandwidthEvent();
+
+    void onCheckBandwidth();
+
+    void finishDisconnect();
+
+    void postPrepared(status_t err);
+
+    DISALLOW_EVIL_CONSTRUCTORS(LiveSession);
+};
+
+}  // namespace android
+
+#endif  // LIVE_SESSION_H_
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 68bbca2..be66252 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -18,14 +18,153 @@
 #define LOG_TAG "M3UParser"
 #include <utils/Log.h>
 
-#include "include/M3UParser.h"
+#include "M3UParser.h"
 
+#include <cutils/properties.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
 
 namespace android {
 
+struct M3UParser::MediaGroup : public RefBase {
+    enum Type {
+        TYPE_AUDIO,
+        TYPE_VIDEO,
+        TYPE_SUBS,
+    };
+
+    enum FlagBits {
+        FLAG_AUTOSELECT         = 1,
+        FLAG_DEFAULT            = 2,
+        FLAG_FORCED             = 4,
+        FLAG_HAS_LANGUAGE       = 8,
+        FLAG_HAS_URI            = 16,
+    };
+
+    MediaGroup(Type type);
+
+    Type type() const;
+
+    status_t addMedia(
+            const char *name,
+            const char *uri,
+            const char *language,
+            uint32_t flags);
+
+    bool getActiveURI(AString *uri) const;
+
+    void pickRandomMediaItems();
+
+protected:
+    virtual ~MediaGroup();
+
+private:
+    struct Media {
+        AString mName;
+        AString mURI;
+        AString mLanguage;
+        uint32_t mFlags;
+    };
+
+    Type mType;
+    Vector<Media> mMediaItems;
+
+    ssize_t mSelectedIndex;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaGroup);
+};
+
+M3UParser::MediaGroup::MediaGroup(Type type)
+    : mType(type),
+      mSelectedIndex(-1) {
+}
+
+M3UParser::MediaGroup::~MediaGroup() {
+}
+
+M3UParser::MediaGroup::Type M3UParser::MediaGroup::type() const {
+    return mType;
+}
+
+status_t M3UParser::MediaGroup::addMedia(
+        const char *name,
+        const char *uri,
+        const char *language,
+        uint32_t flags) {
+    mMediaItems.push();
+    Media &item = mMediaItems.editItemAt(mMediaItems.size() - 1);
+
+    item.mName = name;
+
+    if (uri) {
+        item.mURI = uri;
+    }
+
+    if (language) {
+        item.mLanguage = language;
+    }
+
+    item.mFlags = flags;
+
+    return OK;
+}
+
+void M3UParser::MediaGroup::pickRandomMediaItems() {
+#if 1
+    switch (mType) {
+        case TYPE_AUDIO:
+        {
+            char value[PROPERTY_VALUE_MAX];
+            if (property_get("media.httplive.audio-index", value, NULL)) {
+                char *end;
+                mSelectedIndex = strtoul(value, &end, 10);
+                CHECK(end > value && *end == '\0');
+
+                if (mSelectedIndex >= mMediaItems.size()) {
+                    mSelectedIndex = mMediaItems.size() - 1;
+                }
+            } else {
+                mSelectedIndex = 0;
+            }
+            break;
+        }
+
+        case TYPE_VIDEO:
+        {
+            mSelectedIndex = 0;
+            break;
+        }
+
+        case TYPE_SUBS:
+        {
+            mSelectedIndex = -1;
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+#else
+    mSelectedIndex = (rand() * mMediaItems.size()) / RAND_MAX;
+#endif
+}
+
+bool M3UParser::MediaGroup::getActiveURI(AString *uri) const {
+    for (size_t i = 0; i < mMediaItems.size(); ++i) {
+        if (mSelectedIndex >= 0 && i == (size_t)mSelectedIndex) {
+            const Media &item = mMediaItems.itemAt(i);
+
+            *uri = item.mURI;
+            return true;
+        }
+    }
+
+    return false;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
 M3UParser::M3UParser(
         const char *baseURI, const void *data, size_t size)
     : mInitCheck(NO_INIT),
@@ -92,6 +231,58 @@
     return true;
 }
 
+void M3UParser::pickRandomMediaItems() {
+    for (size_t i = 0; i < mMediaGroups.size(); ++i) {
+        mMediaGroups.valueAt(i)->pickRandomMediaItems();
+    }
+}
+
+bool M3UParser::getTypeURI(size_t index, const char *key, AString *uri) const {
+    if (!mIsVariantPlaylist) {
+        *uri = mBaseURI;
+
+        // Assume media without any more specific attribute contains
+        // audio and video, but no subtitles.
+        return !strcmp("audio", key) || !strcmp("video", key);
+    }
+
+    CHECK_LT(index, mItems.size());
+
+    sp<AMessage> meta = mItems.itemAt(index).mMeta;
+
+    AString groupID;
+    if (!meta->findString(key, &groupID)) {
+        *uri = mItems.itemAt(index).mURI;
+
+        // Assume media without any more specific attribute contains
+        // audio and video, but no subtitles.
+        return !strcmp("audio", key) || !strcmp("video", key);
+    }
+
+    sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
+    if (!group->getActiveURI(uri)) {
+        return false;
+    }
+
+    if ((*uri).empty()) {
+        *uri = mItems.itemAt(index).mURI;
+    }
+
+    return true;
+}
+
+bool M3UParser::getAudioURI(size_t index, AString *uri) const {
+    return getTypeURI(index, "audio", uri);
+}
+
+bool M3UParser::getVideoURI(size_t index, AString *uri) const {
+    return getTypeURI(index, "video", uri);
+}
+
+bool M3UParser::getSubtitleURI(size_t index, AString *uri) const {
+    return getTypeURI(index, "subtitles", uri);
+}
+
 static bool MakeURL(const char *baseURL, const char *url, AString *out) {
     out->clear();
 
@@ -241,6 +432,8 @@
 
                     segmentRangeOffset = offset + length;
                 }
+            } else if (line.startsWith("#EXT-X-MEDIA")) {
+                err = parseMedia(line);
             }
 
             if (err != OK) {
@@ -322,9 +515,31 @@
     return OK;
 }
 
-// static
+// Find the next occurence of the character "what" at or after "offset",
+// but ignore occurences between quotation marks.
+// Return the index of the occurrence or -1 if not found.
+static ssize_t FindNextUnquoted(
+        const AString &line, char what, size_t offset) {
+    CHECK_NE((int)what, (int)'"');
+
+    bool quoted = false;
+    while (offset < line.size()) {
+        char c = line.c_str()[offset];
+
+        if (c == '"') {
+            quoted = !quoted;
+        } else if (c == what && !quoted) {
+            return offset;
+        }
+
+        ++offset;
+    }
+
+    return -1;
+}
+
 status_t M3UParser::parseStreamInf(
-        const AString &line, sp<AMessage> *meta) {
+        const AString &line, sp<AMessage> *meta) const {
     ssize_t colonPos = line.find(":");
 
     if (colonPos < 0) {
@@ -334,7 +549,7 @@
     size_t offset = colonPos + 1;
 
     while (offset < line.size()) {
-        ssize_t end = line.find(",", offset);
+        ssize_t end = FindNextUnquoted(line, ',', offset);
         if (end < 0) {
             end = line.size();
         }
@@ -371,35 +586,37 @@
                 *meta = new AMessage;
             }
             (*meta)->setInt32("bandwidth", x);
+        } else if (!strcasecmp("audio", key.c_str())
+                || !strcasecmp("video", key.c_str())
+                || !strcasecmp("subtitles", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for %s attribute, "
+                      "got '%s' instead.",
+                      key.c_str(), val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            AString groupID(val, 1, val.size() - 2);
+            ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
+
+            if (groupIndex < 0) {
+                ALOGE("Undefined media group '%s' referenced in stream info.",
+                      groupID.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            key.tolower();
+            (*meta)->setString(key.c_str(), groupID.c_str());
         }
     }
 
     return OK;
 }
 
-// Find the next occurence of the character "what" at or after "offset",
-// but ignore occurences between quotation marks.
-// Return the index of the occurrence or -1 if not found.
-static ssize_t FindNextUnquoted(
-        const AString &line, char what, size_t offset) {
-    CHECK_NE((int)what, (int)'"');
-
-    bool quoted = false;
-    while (offset < line.size()) {
-        char c = line.c_str()[offset];
-
-        if (c == '"') {
-            quoted = !quoted;
-        } else if (c == what && !quoted) {
-            return offset;
-        }
-
-        ++offset;
-    }
-
-    return -1;
-}
-
 // static
 status_t M3UParser::parseCipherInfo(
         const AString &line, sp<AMessage> *meta, const AString &baseURI) {
@@ -515,6 +732,234 @@
     return OK;
 }
 
+status_t M3UParser::parseMedia(const AString &line) {
+    ssize_t colonPos = line.find(":");
+
+    if (colonPos < 0) {
+        return ERROR_MALFORMED;
+    }
+
+    bool haveGroupType = false;
+    MediaGroup::Type groupType = MediaGroup::TYPE_AUDIO;
+
+    bool haveGroupID = false;
+    AString groupID;
+
+    bool haveGroupLanguage = false;
+    AString groupLanguage;
+
+    bool haveGroupName = false;
+    AString groupName;
+
+    bool haveGroupAutoselect = false;
+    bool groupAutoselect = false;
+
+    bool haveGroupDefault = false;
+    bool groupDefault = false;
+
+    bool haveGroupForced = false;
+    bool groupForced = false;
+
+    bool haveGroupURI = false;
+    AString groupURI;
+
+    size_t offset = colonPos + 1;
+
+    while (offset < line.size()) {
+        ssize_t end = FindNextUnquoted(line, ',', offset);
+        if (end < 0) {
+            end = line.size();
+        }
+
+        AString attr(line, offset, end - offset);
+        attr.trim();
+
+        offset = end + 1;
+
+        ssize_t equalPos = attr.find("=");
+        if (equalPos < 0) {
+            continue;
+        }
+
+        AString key(attr, 0, equalPos);
+        key.trim();
+
+        AString val(attr, equalPos + 1, attr.size() - equalPos - 1);
+        val.trim();
+
+        ALOGV("key=%s value=%s", key.c_str(), val.c_str());
+
+        if (!strcasecmp("type", key.c_str())) {
+            if (!strcasecmp("subtitles", val.c_str())) {
+                groupType = MediaGroup::TYPE_SUBS;
+            } else if (!strcasecmp("audio", val.c_str())) {
+                groupType = MediaGroup::TYPE_AUDIO;
+            } else if (!strcasecmp("video", val.c_str())) {
+                groupType = MediaGroup::TYPE_VIDEO;
+            } else {
+                ALOGE("Invalid media group type '%s'", val.c_str());
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupType = true;
+        } else if (!strcasecmp("group-id", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for GROUP-ID, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            groupID.setTo(val, 1, val.size() - 2);
+            haveGroupID = true;
+        } else if (!strcasecmp("language", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for LANGUAGE, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            groupLanguage.setTo(val, 1, val.size() - 2);
+            haveGroupLanguage = true;
+        } else if (!strcasecmp("name", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for NAME, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            groupName.setTo(val, 1, val.size() - 2);
+            haveGroupName = true;
+        } else if (!strcasecmp("autoselect", key.c_str())) {
+            groupAutoselect = false;
+            if (!strcasecmp("YES", val.c_str())) {
+                groupAutoselect = true;
+            } else if (!strcasecmp("NO", val.c_str())) {
+                groupAutoselect = false;
+            } else {
+                ALOGE("Expected YES or NO for AUTOSELECT attribute, "
+                      "got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupAutoselect = true;
+        } else if (!strcasecmp("default", key.c_str())) {
+            groupDefault = false;
+            if (!strcasecmp("YES", val.c_str())) {
+                groupDefault = true;
+            } else if (!strcasecmp("NO", val.c_str())) {
+                groupDefault = false;
+            } else {
+                ALOGE("Expected YES or NO for DEFAULT attribute, "
+                      "got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupDefault = true;
+        } else if (!strcasecmp("forced", key.c_str())) {
+            groupForced = false;
+            if (!strcasecmp("YES", val.c_str())) {
+                groupForced = true;
+            } else if (!strcasecmp("NO", val.c_str())) {
+                groupForced = false;
+            } else {
+                ALOGE("Expected YES or NO for FORCED attribute, "
+                      "got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            haveGroupForced = true;
+        } else if (!strcasecmp("uri", key.c_str())) {
+            if (val.size() < 2
+                    || val.c_str()[0] != '"'
+                    || val.c_str()[val.size() - 1] != '"') {
+                ALOGE("Expected quoted string for URI, got '%s' instead.",
+                      val.c_str());
+
+                return ERROR_MALFORMED;
+            }
+
+            AString tmp(val, 1, val.size() - 2);
+
+            if (!MakeURL(mBaseURI.c_str(), tmp.c_str(), &groupURI)) {
+                ALOGI("Failed to make absolute URI from '%s'.", tmp.c_str());
+            }
+
+            haveGroupURI = true;
+        }
+    }
+
+    if (!haveGroupType || !haveGroupID || !haveGroupName) {
+        ALOGE("Incomplete EXT-X-MEDIA element.");
+        return ERROR_MALFORMED;
+    }
+
+    uint32_t flags = 0;
+    if (haveGroupAutoselect && groupAutoselect) {
+        flags |= MediaGroup::FLAG_AUTOSELECT;
+    }
+    if (haveGroupDefault && groupDefault) {
+        flags |= MediaGroup::FLAG_DEFAULT;
+    }
+    if (haveGroupForced) {
+        if (groupType != MediaGroup::TYPE_SUBS) {
+            ALOGE("The FORCED attribute MUST not be present on anything "
+                  "but SUBS media.");
+
+            return ERROR_MALFORMED;
+        }
+
+        if (groupForced) {
+            flags |= MediaGroup::FLAG_FORCED;
+        }
+    }
+    if (haveGroupLanguage) {
+        flags |= MediaGroup::FLAG_HAS_LANGUAGE;
+    }
+    if (haveGroupURI) {
+        flags |= MediaGroup::FLAG_HAS_URI;
+    }
+
+    ssize_t groupIndex = mMediaGroups.indexOfKey(groupID);
+    sp<MediaGroup> group;
+
+    if (groupIndex < 0) {
+        group = new MediaGroup(groupType);
+        mMediaGroups.add(groupID, group);
+    } else {
+        group = mMediaGroups.valueAt(groupIndex);
+
+        if (group->type() != groupType) {
+            ALOGE("Attempt to put media item under group of different type "
+                  "(groupType = %d, item type = %d",
+                  group->type(),
+                  groupType);
+
+            return ERROR_MALFORMED;
+        }
+    }
+
+    return group->addMedia(
+            groupName.c_str(),
+            haveGroupURI ? groupURI.c_str() : NULL,
+            haveGroupLanguage ? groupLanguage.c_str() : NULL,
+            flags);
+}
+
 // static
 status_t M3UParser::ParseInt32(const char *s, int32_t *x) {
     char *end;
diff --git a/media/libstagefright/include/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
similarity index 80%
rename from media/libstagefright/include/M3UParser.h
rename to media/libstagefright/httplive/M3UParser.h
index 2d2f50f..abea286 100644
--- a/media/libstagefright/include/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -40,10 +40,18 @@
     size_t size();
     bool itemAt(size_t index, AString *uri, sp<AMessage> *meta = NULL);
 
+    void pickRandomMediaItems();
+
+    bool getAudioURI(size_t index, AString *uri) const;
+    bool getVideoURI(size_t index, AString *uri) const;
+    bool getSubtitleURI(size_t index, AString *uri) const;
+
 protected:
     virtual ~M3UParser();
 
 private:
+    struct MediaGroup;
+
     struct Item {
         AString mURI;
         sp<AMessage> mMeta;
@@ -60,6 +68,9 @@
     sp<AMessage> mMeta;
     Vector<Item> mItems;
 
+    // Media groups keyed by group ID.
+    KeyedVector<AString, sp<MediaGroup> > mMediaGroups;
+
     status_t parse(const void *data, size_t size);
 
     static status_t parseMetaData(
@@ -68,8 +79,8 @@
     static status_t parseMetaDataDuration(
             const AString &line, sp<AMessage> *meta, const char *key);
 
-    static status_t parseStreamInf(
-            const AString &line, sp<AMessage> *meta);
+    status_t parseStreamInf(
+            const AString &line, sp<AMessage> *meta) const;
 
     static status_t parseCipherInfo(
             const AString &line, sp<AMessage> *meta, const AString &baseURI);
@@ -78,6 +89,10 @@
             const AString &line, uint64_t curOffset,
             uint64_t *length, uint64_t *offset);
 
+    status_t parseMedia(const AString &line);
+
+    bool getTypeURI(size_t index, const char *key, AString *uri) const;
+
     static status_t ParseInt32(const char *s, int32_t *x);
     static status_t ParseDouble(const char *s, double *x);
 
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
new file mode 100644
index 0000000..8ae70b7
--- /dev/null
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -0,0 +1,969 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PlaylistFetcher"
+#include <utils/Log.h>
+
+#include "PlaylistFetcher.h"
+
+#include "LiveDataSource.h"
+#include "LiveSession.h"
+#include "M3UParser.h"
+
+#include "include/avc_utils.h"
+#include "include/HTTPBase.h"
+#include "include/ID3.h"
+#include "mpeg2ts/AnotherPacketSource.h"
+
+#include <media/IStreamSource.h>
+#include <media/stagefright/foundation/ABitReader.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+#include <ctype.h>
+#include <openssl/aes.h>
+#include <openssl/md5.h>
+
+namespace android {
+
+// static
+const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
+
+PlaylistFetcher::PlaylistFetcher(
+        const sp<AMessage> &notify,
+        const sp<LiveSession> &session,
+        const char *uri)
+    : mNotify(notify),
+      mSession(session),
+      mURI(uri),
+      mStreamTypeMask(0),
+      mStartTimeUs(-1ll),
+      mLastPlaylistFetchTimeUs(-1ll),
+      mSeqNumber(-1),
+      mNumRetries(0),
+      mStartup(true),
+      mNextPTSTimeUs(-1ll),
+      mMonitorQueueGeneration(0),
+      mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
+      mFirstPTSValid(false),
+      mAbsoluteTimeAnchorUs(0ll) {
+    memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
+}
+
+PlaylistFetcher::~PlaylistFetcher() {
+}
+
+int64_t PlaylistFetcher::getSegmentStartTimeUs(int32_t seqNumber) const {
+    CHECK(mPlaylist != NULL);
+
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    int32_t lastSeqNumberInPlaylist =
+        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+
+    CHECK_GE(seqNumber, firstSeqNumberInPlaylist);
+    CHECK_LE(seqNumber, lastSeqNumberInPlaylist);
+
+    int64_t segmentStartUs = 0ll;
+    for (int32_t index = 0;
+            index < seqNumber - firstSeqNumberInPlaylist; ++index) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt(
+                    index, NULL /* uri */, &itemMeta));
+
+        int64_t itemDurationUs;
+        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+        segmentStartUs += itemDurationUs;
+    }
+
+    return segmentStartUs;
+}
+
+bool PlaylistFetcher::timeToRefreshPlaylist(int64_t nowUs) const {
+    if (mPlaylist == NULL) {
+        CHECK_EQ((int)mRefreshState, (int)INITIAL_MINIMUM_RELOAD_DELAY);
+        return true;
+    }
+
+    int32_t targetDurationSecs;
+    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
+
+    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+
+    int64_t minPlaylistAgeUs;
+
+    switch (mRefreshState) {
+        case INITIAL_MINIMUM_RELOAD_DELAY:
+        {
+            size_t n = mPlaylist->size();
+            if (n > 0) {
+                sp<AMessage> itemMeta;
+                CHECK(mPlaylist->itemAt(n - 1, NULL /* uri */, &itemMeta));
+
+                int64_t itemDurationUs;
+                CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+                minPlaylistAgeUs = itemDurationUs;
+                break;
+            }
+
+            // fall through
+        }
+
+        case FIRST_UNCHANGED_RELOAD_ATTEMPT:
+        {
+            minPlaylistAgeUs = targetDurationUs / 2;
+            break;
+        }
+
+        case SECOND_UNCHANGED_RELOAD_ATTEMPT:
+        {
+            minPlaylistAgeUs = (targetDurationUs * 3) / 2;
+            break;
+        }
+
+        case THIRD_UNCHANGED_RELOAD_ATTEMPT:
+        {
+            minPlaylistAgeUs = targetDurationUs * 3;
+            break;
+        }
+
+        default:
+            TRESPASS();
+            break;
+    }
+
+    return mLastPlaylistFetchTimeUs + minPlaylistAgeUs <= nowUs;
+}
+
+status_t PlaylistFetcher::decryptBuffer(
+        size_t playlistIndex, const sp<ABuffer> &buffer) {
+    sp<AMessage> itemMeta;
+    bool found = false;
+    AString method;
+
+    for (ssize_t i = playlistIndex; i >= 0; --i) {
+        AString uri;
+        CHECK(mPlaylist->itemAt(i, &uri, &itemMeta));
+
+        if (itemMeta->findString("cipher-method", &method)) {
+            found = true;
+            break;
+        }
+    }
+
+    if (!found) {
+        method = "NONE";
+    }
+
+    if (method == "NONE") {
+        return OK;
+    } else if (!(method == "AES-128")) {
+        ALOGE("Unsupported cipher method '%s'", method.c_str());
+        return ERROR_UNSUPPORTED;
+    }
+
+    AString keyURI;
+    if (!itemMeta->findString("cipher-uri", &keyURI)) {
+        ALOGE("Missing key uri");
+        return ERROR_MALFORMED;
+    }
+
+    ssize_t index = mAESKeyForURI.indexOfKey(keyURI);
+
+    sp<ABuffer> key;
+    if (index >= 0) {
+        key = mAESKeyForURI.valueAt(index);
+    } else {
+        status_t err = mSession->fetchFile(keyURI.c_str(), &key);
+
+        if (err != OK) {
+            ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
+            return ERROR_IO;
+        } else if (key->size() != 16) {
+            ALOGE("key file '%s' wasn't 16 bytes in size.", keyURI.c_str());
+            return ERROR_MALFORMED;
+        }
+
+        mAESKeyForURI.add(keyURI, key);
+    }
+
+    AES_KEY aes_key;
+    if (AES_set_decrypt_key(key->data(), 128, &aes_key) != 0) {
+        ALOGE("failed to set AES decryption key.");
+        return UNKNOWN_ERROR;
+    }
+
+    unsigned char aes_ivec[16];
+
+    AString iv;
+    if (itemMeta->findString("cipher-iv", &iv)) {
+        if ((!iv.startsWith("0x") && !iv.startsWith("0X"))
+                || iv.size() != 16 * 2 + 2) {
+            ALOGE("malformed cipher IV '%s'.", iv.c_str());
+            return ERROR_MALFORMED;
+        }
+
+        memset(aes_ivec, 0, sizeof(aes_ivec));
+        for (size_t i = 0; i < 16; ++i) {
+            char c1 = tolower(iv.c_str()[2 + 2 * i]);
+            char c2 = tolower(iv.c_str()[3 + 2 * i]);
+            if (!isxdigit(c1) || !isxdigit(c2)) {
+                ALOGE("malformed cipher IV '%s'.", iv.c_str());
+                return ERROR_MALFORMED;
+            }
+            uint8_t nibble1 = isdigit(c1) ? c1 - '0' : c1 - 'a' + 10;
+            uint8_t nibble2 = isdigit(c2) ? c2 - '0' : c2 - 'a' + 10;
+
+            aes_ivec[i] = nibble1 << 4 | nibble2;
+        }
+    } else {
+        memset(aes_ivec, 0, sizeof(aes_ivec));
+        aes_ivec[15] = mSeqNumber & 0xff;
+        aes_ivec[14] = (mSeqNumber >> 8) & 0xff;
+        aes_ivec[13] = (mSeqNumber >> 16) & 0xff;
+        aes_ivec[12] = (mSeqNumber >> 24) & 0xff;
+    }
+
+    AES_cbc_encrypt(
+            buffer->data(), buffer->data(), buffer->size(),
+            &aes_key, aes_ivec, AES_DECRYPT);
+
+    // hexdump(buffer->data(), buffer->size());
+
+    size_t n = buffer->size();
+    CHECK_GT(n, 0u);
+
+    size_t pad = buffer->data()[n - 1];
+
+    CHECK_GT(pad, 0u);
+    CHECK_LE(pad, 16u);
+    CHECK_GE((size_t)n, pad);
+    for (size_t i = 0; i < pad; ++i) {
+        CHECK_EQ((unsigned)buffer->data()[n - 1 - i], pad);
+    }
+
+    n -= pad;
+
+    buffer->setRange(buffer->offset(), n);
+
+    return OK;
+}
+
+void PlaylistFetcher::postMonitorQueue(int64_t delayUs) {
+    sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
+    msg->setInt32("generation", mMonitorQueueGeneration);
+    msg->post(delayUs);
+}
+
+void PlaylistFetcher::cancelMonitorQueue() {
+    ++mMonitorQueueGeneration;
+}
+
+void PlaylistFetcher::startAsync(
+        const sp<AnotherPacketSource> &audioSource,
+        const sp<AnotherPacketSource> &videoSource,
+        const sp<AnotherPacketSource> &subtitleSource,
+        int64_t startTimeUs) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+
+    uint32_t streamTypeMask = 0ul;
+
+    if (audioSource != NULL) {
+        msg->setPointer("audioSource", audioSource.get());
+        streamTypeMask |= LiveSession::STREAMTYPE_AUDIO;
+    }
+
+    if (videoSource != NULL) {
+        msg->setPointer("videoSource", videoSource.get());
+        streamTypeMask |= LiveSession::STREAMTYPE_VIDEO;
+    }
+
+    if (subtitleSource != NULL) {
+        msg->setPointer("subtitleSource", subtitleSource.get());
+        streamTypeMask |= LiveSession::STREAMTYPE_SUBTITLES;
+    }
+
+    msg->setInt32("streamTypeMask", streamTypeMask);
+    msg->setInt64("startTimeUs", startTimeUs);
+    msg->post();
+}
+
+void PlaylistFetcher::pauseAsync() {
+    (new AMessage(kWhatPause, id()))->post();
+}
+
+void PlaylistFetcher::stopAsync() {
+    (new AMessage(kWhatStop, id()))->post();
+}
+
+void PlaylistFetcher::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatStart:
+        {
+            status_t err = onStart(msg);
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatStarted);
+            notify->setInt32("err", err);
+            notify->post();
+            break;
+        }
+
+        case kWhatPause:
+        {
+            onPause();
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatPaused);
+            notify->post();
+            break;
+        }
+
+        case kWhatStop:
+        {
+            onStop();
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatStopped);
+            notify->post();
+            break;
+        }
+
+        case kWhatMonitorQueue:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mMonitorQueueGeneration) {
+                // Stale event
+                break;
+            }
+
+            onMonitorQueue();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+status_t PlaylistFetcher::onStart(const sp<AMessage> &msg) {
+    mPacketSources.clear();
+
+    uint32_t streamTypeMask;
+    CHECK(msg->findInt32("streamTypeMask", (int32_t *)&streamTypeMask));
+
+    int64_t startTimeUs;
+    CHECK(msg->findInt64("startTimeUs", &startTimeUs));
+
+    if (streamTypeMask & LiveSession::STREAMTYPE_AUDIO) {
+        void *ptr;
+        CHECK(msg->findPointer("audioSource", &ptr));
+
+        mPacketSources.add(
+                LiveSession::STREAMTYPE_AUDIO,
+                static_cast<AnotherPacketSource *>(ptr));
+    }
+
+    if (streamTypeMask & LiveSession::STREAMTYPE_VIDEO) {
+        void *ptr;
+        CHECK(msg->findPointer("videoSource", &ptr));
+
+        mPacketSources.add(
+                LiveSession::STREAMTYPE_VIDEO,
+                static_cast<AnotherPacketSource *>(ptr));
+    }
+
+    if (streamTypeMask & LiveSession::STREAMTYPE_SUBTITLES) {
+        void *ptr;
+        CHECK(msg->findPointer("subtitleSource", &ptr));
+
+        mPacketSources.add(
+                LiveSession::STREAMTYPE_SUBTITLES,
+                static_cast<AnotherPacketSource *>(ptr));
+    }
+
+    mStreamTypeMask = streamTypeMask;
+    mStartTimeUs = startTimeUs;
+
+    if (mStartTimeUs >= 0ll) {
+        mSeqNumber = -1;
+        mStartup = true;
+    }
+
+    postMonitorQueue();
+
+    return OK;
+}
+
+void PlaylistFetcher::onPause() {
+    cancelMonitorQueue();
+
+    mPacketSources.clear();
+    mStreamTypeMask = 0;
+}
+
+void PlaylistFetcher::onStop() {
+    cancelMonitorQueue();
+
+    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+        mPacketSources.valueAt(i)->clear();
+    }
+
+    mPacketSources.clear();
+    mStreamTypeMask = 0;
+}
+
+void PlaylistFetcher::notifyError(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void PlaylistFetcher::queueDiscontinuity(
+        ATSParser::DiscontinuityType type, const sp<AMessage> &extra) {
+    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+        mPacketSources.valueAt(i)->queueDiscontinuity(type, extra);
+    }
+}
+
+void PlaylistFetcher::onMonitorQueue() {
+    bool downloadMore = false;
+
+    status_t finalResult;
+    if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) {
+        sp<AnotherPacketSource> packetSource =
+            mPacketSources.valueFor(LiveSession::STREAMTYPE_SUBTITLES);
+
+        downloadMore = packetSource->hasBufferAvailable(&finalResult);
+    } else {
+        bool first = true;
+        int64_t minBufferedDurationUs = 0ll;
+
+        for (size_t i = 0; i < mPacketSources.size(); ++i) {
+            if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0) {
+                continue;
+            }
+
+            int64_t bufferedDurationUs =
+                mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult);
+
+            if (first || bufferedDurationUs < minBufferedDurationUs) {
+                minBufferedDurationUs = bufferedDurationUs;
+                first = false;
+            }
+        }
+
+        downloadMore =
+            !first && (minBufferedDurationUs < kMinBufferedDurationUs);
+    }
+
+    if (finalResult == OK && downloadMore) {
+        onDownloadNext();
+    } else {
+        // Nothing to do yet, try again in a second.
+
+        sp<AMessage> msg = mNotify->dup();
+        msg->setInt32("what", kWhatTemporarilyDoneFetching);
+        msg->post();
+
+        postMonitorQueue(1000000ll);
+    }
+}
+
+void PlaylistFetcher::onDownloadNext() {
+    int64_t nowUs = ALooper::GetNowUs();
+
+    if (mLastPlaylistFetchTimeUs < 0ll
+            || (!mPlaylist->isComplete() && timeToRefreshPlaylist(nowUs))) {
+        bool unchanged;
+        sp<M3UParser> playlist = mSession->fetchPlaylist(
+                mURI.c_str(), mPlaylistHash, &unchanged);
+
+        if (playlist == NULL) {
+            if (unchanged) {
+                // We succeeded in fetching the playlist, but it was
+                // unchanged from the last time we tried.
+
+                if (mRefreshState != THIRD_UNCHANGED_RELOAD_ATTEMPT) {
+                    mRefreshState = (RefreshState)(mRefreshState + 1);
+                }
+            } else {
+                ALOGE("failed to load playlist at url '%s'", mURI.c_str());
+                notifyError(ERROR_IO);
+                return;
+            }
+        } else {
+            mRefreshState = INITIAL_MINIMUM_RELOAD_DELAY;
+            mPlaylist = playlist;
+
+            if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+                updateDuration();
+            }
+        }
+
+        mLastPlaylistFetchTimeUs = ALooper::GetNowUs();
+    }
+
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    bool seekDiscontinuity = false;
+    bool explicitDiscontinuity = false;
+
+    const int32_t lastSeqNumberInPlaylist =
+        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+
+    if (mSeqNumber < 0) {
+        CHECK_GE(mStartTimeUs, 0ll);
+
+        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+            mSeqNumber = getSeqNumberForTime(mStartTimeUs);
+        } else {
+            // If this is a live session, start 3 segments from the end.
+            mSeqNumber = lastSeqNumberInPlaylist - 3;
+            if (mSeqNumber < firstSeqNumberInPlaylist) {
+                mSeqNumber = firstSeqNumberInPlaylist;
+            }
+        }
+
+        mStartTimeUs = -1ll;
+    }
+
+    if (mSeqNumber < firstSeqNumberInPlaylist
+            || mSeqNumber > lastSeqNumberInPlaylist) {
+        if (!mPlaylist->isComplete() && mNumRetries < kMaxNumRetries) {
+            ++mNumRetries;
+
+            if (mSeqNumber > lastSeqNumberInPlaylist) {
+                mLastPlaylistFetchTimeUs = -1;
+                postMonitorQueue(3000000ll);
+                return;
+            }
+
+            // we've missed the boat, let's start from the lowest sequence
+            // number available and signal a discontinuity.
+
+            ALOGI("We've missed the boat, restarting playback.");
+            mSeqNumber = lastSeqNumberInPlaylist;
+            explicitDiscontinuity = true;
+
+            // fall through
+        } else {
+            ALOGE("Cannot find sequence number %d in playlist "
+                 "(contains %d - %d)",
+                 mSeqNumber, firstSeqNumberInPlaylist,
+                 firstSeqNumberInPlaylist + mPlaylist->size() - 1);
+
+            notifyError(ERROR_END_OF_STREAM);
+            return;
+        }
+    }
+
+    mNumRetries = 0;
+
+    AString uri;
+    sp<AMessage> itemMeta;
+    CHECK(mPlaylist->itemAt(
+                mSeqNumber - firstSeqNumberInPlaylist,
+                &uri,
+                &itemMeta));
+
+    int32_t val;
+    if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
+        explicitDiscontinuity = true;
+    }
+
+    int64_t range_offset, range_length;
+    if (!itemMeta->findInt64("range-offset", &range_offset)
+            || !itemMeta->findInt64("range-length", &range_length)) {
+        range_offset = 0;
+        range_length = -1;
+    }
+
+    ALOGV("fetching segment %d from (%d .. %d)",
+          mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
+
+    ALOGV("fetching '%s'", uri.c_str());
+
+    sp<ABuffer> buffer;
+    status_t err = mSession->fetchFile(
+            uri.c_str(), &buffer, range_offset, range_length);
+
+    if (err != OK) {
+        ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
+        notifyError(err);
+        return;
+    }
+
+    CHECK(buffer != NULL);
+
+    err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, buffer);
+
+    if (err != OK) {
+        ALOGE("decryptBuffer failed w/ error %d", err);
+
+        notifyError(err);
+        return;
+    }
+
+    if (mStartup || seekDiscontinuity || explicitDiscontinuity) {
+        // Signal discontinuity.
+
+        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+            // If this was a live event this made no sense since
+            // we don't have access to all the segment before the current
+            // one.
+            mNextPTSTimeUs = getSegmentStartTimeUs(mSeqNumber);
+        }
+
+        if (seekDiscontinuity || explicitDiscontinuity) {
+            ALOGI("queueing discontinuity (seek=%d, explicit=%d)",
+                 seekDiscontinuity, explicitDiscontinuity);
+
+            queueDiscontinuity(
+                    explicitDiscontinuity
+                        ? ATSParser::DISCONTINUITY_FORMATCHANGE
+                        : ATSParser::DISCONTINUITY_SEEK,
+                    NULL /* extra */);
+        }
+    }
+
+    err = extractAndQueueAccessUnits(buffer);
+
+    if (err != OK) {
+        notifyError(err);
+        return;
+    }
+
+    ++mSeqNumber;
+
+    postMonitorQueue();
+
+    mStartup = false;
+}
+
+int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
+    int32_t firstSeqNumberInPlaylist;
+    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
+                "media-sequence", &firstSeqNumberInPlaylist)) {
+        firstSeqNumberInPlaylist = 0;
+    }
+
+    size_t index = 0;
+    int64_t segmentStartUs = 0;
+    while (index < mPlaylist->size()) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt(
+                    index, NULL /* uri */, &itemMeta));
+
+        int64_t itemDurationUs;
+        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+        if (timeUs < segmentStartUs + itemDurationUs) {
+            break;
+        }
+
+        segmentStartUs += itemDurationUs;
+        ++index;
+    }
+
+    if (index >= mPlaylist->size()) {
+        index = mPlaylist->size() - 1;
+    }
+
+    return firstSeqNumberInPlaylist + index;
+}
+
+status_t PlaylistFetcher::extractAndQueueAccessUnits(
+        const sp<ABuffer> &buffer) {
+    if (buffer->size() > 0 && buffer->data()[0] == 0x47) {
+        // Let's assume this is an MPEG2 transport stream.
+
+        if ((buffer->size() % 188) != 0) {
+            ALOGE("MPEG2 transport stream is not an even multiple of 188 "
+                  "bytes in length.");
+            return ERROR_MALFORMED;
+        }
+
+        if (mTSParser == NULL) {
+            mTSParser = new ATSParser;
+        }
+
+        if (mNextPTSTimeUs >= 0ll) {
+            sp<AMessage> extra = new AMessage;
+            extra->setInt64(IStreamListener::kKeyMediaTimeUs, mNextPTSTimeUs);
+
+            mTSParser->signalDiscontinuity(
+                    ATSParser::DISCONTINUITY_SEEK, extra);
+
+            mNextPTSTimeUs = -1ll;
+        }
+
+        size_t offset = 0;
+        while (offset < buffer->size()) {
+            status_t err = mTSParser->feedTSPacket(buffer->data() + offset, 188);
+
+            if (err != OK) {
+                return err;
+            }
+
+            offset += 188;
+        }
+
+        for (size_t i = mPacketSources.size(); i-- > 0;) {
+            sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
+
+            ATSParser::SourceType type;
+            switch (mPacketSources.keyAt(i)) {
+                case LiveSession::STREAMTYPE_VIDEO:
+                    type = ATSParser::VIDEO;
+                    break;
+
+                case LiveSession::STREAMTYPE_AUDIO:
+                    type = ATSParser::AUDIO;
+                    break;
+
+                case LiveSession::STREAMTYPE_SUBTITLES:
+                {
+                    ALOGE("MPEG2 Transport streams do not contain subtitles.");
+                    return ERROR_MALFORMED;
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+
+            sp<AnotherPacketSource> source =
+                static_cast<AnotherPacketSource *>(
+                        mTSParser->getSource(type).get());
+
+            if (source == NULL) {
+                ALOGW("MPEG2 Transport stream does not contain %s data.",
+                      type == ATSParser::VIDEO ? "video" : "audio");
+
+                mStreamTypeMask &= ~mPacketSources.keyAt(i);
+                mPacketSources.removeItemsAt(i);
+                continue;
+            }
+
+            sp<ABuffer> accessUnit;
+            status_t finalResult;
+            while (source->hasBufferAvailable(&finalResult)
+                    && source->dequeueAccessUnit(&accessUnit) == OK) {
+                // Note that we do NOT dequeue any discontinuities.
+
+                packetSource->queueAccessUnit(accessUnit);
+            }
+
+            if (packetSource->getFormat() == NULL) {
+                packetSource->setFormat(source->getFormat());
+            }
+        }
+
+        return OK;
+    } else if (buffer->size() >= 7 && !memcmp("WEBVTT\n", buffer->data(), 7)) {
+        if (mStreamTypeMask != LiveSession::STREAMTYPE_SUBTITLES) {
+            ALOGE("This stream only contains subtitles.");
+            return ERROR_MALFORMED;
+        }
+
+        const sp<AnotherPacketSource> packetSource =
+            mPacketSources.valueFor(LiveSession::STREAMTYPE_SUBTITLES);
+
+        buffer->meta()->setInt64("timeUs", 0ll);
+
+        packetSource->queueAccessUnit(buffer);
+        return OK;
+    }
+
+    if (mNextPTSTimeUs >= 0ll) {
+        mFirstPTSValid = false;
+        mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
+        mNextPTSTimeUs = -1ll;
+    }
+
+    // This better be an ISO 13818-7 (AAC) or ISO 13818-1 (MPEG) audio
+    // stream prefixed by an ID3 tag.
+
+    bool firstID3Tag = true;
+    uint64_t PTS = 0;
+
+    for (;;) {
+        // Make sure to skip all ID3 tags preceding the audio data.
+        // At least one must be present to provide the PTS timestamp.
+
+        ID3 id3(buffer->data(), buffer->size(), true /* ignoreV1 */);
+        if (!id3.isValid()) {
+            if (firstID3Tag) {
+                ALOGE("Unable to parse ID3 tag.");
+                return ERROR_MALFORMED;
+            } else {
+                break;
+            }
+        }
+
+        if (firstID3Tag) {
+            bool found = false;
+
+            ID3::Iterator it(id3, "PRIV");
+            while (!it.done()) {
+                size_t length;
+                const uint8_t *data = it.getData(&length);
+
+                static const char *kMatchName =
+                    "com.apple.streaming.transportStreamTimestamp";
+                static const size_t kMatchNameLen = strlen(kMatchName);
+
+                if (length == kMatchNameLen + 1 + 8
+                        && !strncmp((const char *)data, kMatchName, kMatchNameLen)) {
+                    found = true;
+                    PTS = U64_AT(&data[kMatchNameLen + 1]);
+                }
+
+                it.next();
+            }
+
+            if (!found) {
+                ALOGE("Unable to extract transportStreamTimestamp from ID3 tag.");
+                return ERROR_MALFORMED;
+            }
+        }
+
+        // skip the ID3 tag
+        buffer->setRange(
+                buffer->offset() + id3.rawSize(), buffer->size() - id3.rawSize());
+
+        firstID3Tag = false;
+    }
+
+    if (!mFirstPTSValid) {
+        mFirstPTSValid = true;
+        mFirstPTS = PTS;
+    }
+    PTS -= mFirstPTS;
+
+    int64_t timeUs = (PTS * 100ll) / 9ll + mAbsoluteTimeAnchorUs;
+
+    if (mStreamTypeMask != LiveSession::STREAMTYPE_AUDIO) {
+        ALOGW("This stream only contains audio data!");
+
+        mStreamTypeMask &= LiveSession::STREAMTYPE_AUDIO;
+
+        if (mStreamTypeMask == 0) {
+            return OK;
+        }
+    }
+
+    sp<AnotherPacketSource> packetSource =
+        mPacketSources.valueFor(LiveSession::STREAMTYPE_AUDIO);
+
+    if (packetSource->getFormat() == NULL && buffer->size() >= 7) {
+        ABitReader bits(buffer->data(), buffer->size());
+
+        // adts_fixed_header
+
+        CHECK_EQ(bits.getBits(12), 0xfffu);
+        bits.skipBits(3);  // ID, layer
+        bool protection_absent = bits.getBits(1) != 0;
+
+        unsigned profile = bits.getBits(2);
+        CHECK_NE(profile, 3u);
+        unsigned sampling_freq_index = bits.getBits(4);
+        bits.getBits(1);  // private_bit
+        unsigned channel_configuration = bits.getBits(3);
+        CHECK_NE(channel_configuration, 0u);
+        bits.skipBits(2);  // original_copy, home
+
+        sp<MetaData> meta = MakeAACCodecSpecificData(
+                profile, sampling_freq_index, channel_configuration);
+
+        meta->setInt32(kKeyIsADTS, true);
+
+        packetSource->setFormat(meta);
+    }
+
+    int64_t numSamples = 0ll;
+    int32_t sampleRate;
+    CHECK(packetSource->getFormat()->findInt32(kKeySampleRate, &sampleRate));
+
+    size_t offset = 0;
+    while (offset < buffer->size()) {
+        const uint8_t *adtsHeader = buffer->data() + offset;
+        CHECK_LT(offset + 5, buffer->size());
+
+        unsigned aac_frame_length =
+            ((adtsHeader[3] & 3) << 11)
+            | (adtsHeader[4] << 3)
+            | (adtsHeader[5] >> 5);
+
+        CHECK_LE(offset + aac_frame_length, buffer->size());
+
+        sp<ABuffer> unit = new ABuffer(aac_frame_length);
+        memcpy(unit->data(), adtsHeader, aac_frame_length);
+
+        int64_t unitTimeUs = timeUs + numSamples * 1000000ll / sampleRate;
+        unit->meta()->setInt64("timeUs", unitTimeUs);
+
+        // Each AAC frame encodes 1024 samples.
+        numSamples += 1024;
+
+        packetSource->queueAccessUnit(unit);
+
+        offset += aac_frame_length;
+    }
+
+    return OK;
+}
+
+void PlaylistFetcher::updateDuration() {
+    int64_t durationUs = 0ll;
+    for (size_t index = 0; index < mPlaylist->size(); ++index) {
+        sp<AMessage> itemMeta;
+        CHECK(mPlaylist->itemAt(
+                    index, NULL /* uri */, &itemMeta));
+
+        int64_t itemDurationUs;
+        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+        durationUs += itemDurationUs;
+    }
+
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatDurationUpdate);
+    msg->setInt64("durationUs", durationUs);
+    msg->post();
+}
+
+}  // namespace android
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
new file mode 100644
index 0000000..5a2b901
--- /dev/null
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PLAYLIST_FETCHER_H_
+
+#define PLAYLIST_FETCHER_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include "mpeg2ts/ATSParser.h"
+#include "LiveSession.h"
+
+namespace android {
+
+struct ABuffer;
+struct AnotherPacketSource;
+struct DataSource;
+struct HTTPBase;
+struct LiveDataSource;
+struct M3UParser;
+struct String8;
+
+struct PlaylistFetcher : public AHandler {
+    enum {
+        kWhatStarted,
+        kWhatPaused,
+        kWhatStopped,
+        kWhatError,
+        kWhatDurationUpdate,
+        kWhatTemporarilyDoneFetching,
+        kWhatPrepared,
+        kWhatPreparationFailed,
+    };
+
+    PlaylistFetcher(
+            const sp<AMessage> &notify,
+            const sp<LiveSession> &session,
+            const char *uri);
+
+    sp<DataSource> getDataSource();
+
+    void startAsync(
+            const sp<AnotherPacketSource> &audioSource,
+            const sp<AnotherPacketSource> &videoSource,
+            const sp<AnotherPacketSource> &subtitleSource,
+            int64_t startTimeUs = -1ll);
+
+    void pauseAsync();
+
+    void stopAsync();
+
+protected:
+    virtual ~PlaylistFetcher();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kMaxNumRetries         = 5,
+    };
+
+    enum {
+        kWhatStart          = 'strt',
+        kWhatPause          = 'paus',
+        kWhatStop           = 'stop',
+        kWhatMonitorQueue   = 'moni',
+    };
+
+    static const int64_t kMinBufferedDurationUs;
+
+    sp<AMessage> mNotify;
+    sp<LiveSession> mSession;
+    AString mURI;
+
+    uint32_t mStreamTypeMask;
+    int64_t mStartTimeUs;
+
+    KeyedVector<LiveSession::StreamType, sp<AnotherPacketSource> >
+        mPacketSources;
+
+    KeyedVector<AString, sp<ABuffer> > mAESKeyForURI;
+
+    int64_t mLastPlaylistFetchTimeUs;
+    sp<M3UParser> mPlaylist;
+    int32_t mSeqNumber;
+    int32_t mNumRetries;
+    bool mStartup;
+    int64_t mNextPTSTimeUs;
+
+    int32_t mMonitorQueueGeneration;
+
+    enum RefreshState {
+        INITIAL_MINIMUM_RELOAD_DELAY,
+        FIRST_UNCHANGED_RELOAD_ATTEMPT,
+        SECOND_UNCHANGED_RELOAD_ATTEMPT,
+        THIRD_UNCHANGED_RELOAD_ATTEMPT
+    };
+    RefreshState mRefreshState;
+
+    uint8_t mPlaylistHash[16];
+
+    sp<ATSParser> mTSParser;
+
+    bool mFirstPTSValid;
+    uint64_t mFirstPTS;
+    int64_t mAbsoluteTimeAnchorUs;
+
+    status_t decryptBuffer(
+            size_t playlistIndex, const sp<ABuffer> &buffer);
+
+    void postMonitorQueue(int64_t delayUs = 0);
+    void cancelMonitorQueue();
+
+    bool timeToRefreshPlaylist(int64_t nowUs) const;
+
+    // Returns the media time in us of the segment specified by seqNumber.
+    // This is computed by summing the durations of all segments before it.
+    int64_t getSegmentStartTimeUs(int32_t seqNumber) const;
+
+    status_t onStart(const sp<AMessage> &msg);
+    void onPause();
+    void onStop();
+    void onMonitorQueue();
+    void onDownloadNext();
+
+    status_t extractAndQueueAccessUnits(const sp<ABuffer> &buffer);
+
+    void notifyError(status_t err);
+
+    void queueDiscontinuity(
+            ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
+
+    int32_t getSeqNumberForTime(int64_t timeUs) const;
+
+    void updateDuration();
+
+    DISALLOW_EVIL_CONSTRUCTORS(PlaylistFetcher);
+};
+
+}  // namespace android
+
+#endif  // PLAYLIST_FETCHER_H_
+
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index 80a1a3a..bf6f7bb 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -21,7 +21,7 @@
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_id3
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE := testid3
 
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 22c2f5a..34d671a 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -30,12 +30,55 @@
 
 static const size_t kMaxMetadataSize = 3 * 1024 * 1024;
 
+struct MemorySource : public DataSource {
+    MemorySource(const uint8_t *data, size_t size)
+        : mData(data),
+          mSize(size) {
+    }
+
+    virtual status_t initCheck() const {
+        return OK;
+    }
+
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
+        off64_t available = (offset >= mSize) ? 0ll : mSize - offset;
+
+        size_t copy = (available > size) ? size : available;
+        memcpy(data, mData + offset, copy);
+
+        return copy;
+    }
+
+private:
+    const uint8_t *mData;
+    size_t mSize;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MemorySource);
+};
+
 ID3::ID3(const sp<DataSource> &source, bool ignoreV1)
     : mIsValid(false),
       mData(NULL),
       mSize(0),
       mFirstFrameOffset(0),
-      mVersion(ID3_UNKNOWN) {
+      mVersion(ID3_UNKNOWN),
+      mRawSize(0) {
+    mIsValid = parseV2(source);
+
+    if (!mIsValid && !ignoreV1) {
+        mIsValid = parseV1(source);
+    }
+}
+
+ID3::ID3(const uint8_t *data, size_t size, bool ignoreV1)
+    : mIsValid(false),
+      mData(NULL),
+      mSize(0),
+      mFirstFrameOffset(0),
+      mVersion(ID3_UNKNOWN),
+      mRawSize(0) {
+    sp<MemorySource> source = new MemorySource(data, size);
+
     mIsValid = parseV2(source);
 
     if (!mIsValid && !ignoreV1) {
@@ -140,6 +183,7 @@
     }
 
     mSize = size;
+    mRawSize = mSize + sizeof(header);
 
     if (source->readAt(sizeof(header), mData, mSize) != (ssize_t)mSize) {
         free(mData);
@@ -313,17 +357,22 @@
         }
 
         if (flags & 2) {
-            // Unsynchronization added.
+            // This file has "unsynchronization", so we have to replace occurrences
+            // of 0xff 0x00 with just 0xff in order to get the real data.
 
+            size_t readOffset = offset + 11;
+            size_t writeOffset = offset + 11;
             for (size_t i = 0; i + 1 < dataSize; ++i) {
-                if (mData[offset + 10 + i] == 0xff
-                        && mData[offset + 11 + i] == 0x00) {
-                    memmove(&mData[offset + 11 + i], &mData[offset + 12 + i],
-                            mSize - offset - 12 - i);
+                if (mData[readOffset - 1] == 0xff
+                        && mData[readOffset] == 0x00) {
+                    ++readOffset;
                     --mSize;
                     --dataSize;
                 }
+                mData[writeOffset++] = mData[readOffset++];
             }
+            // move the remaining data following this frame
+            memmove(&mData[writeOffset], &mData[readOffset], oldSize - readOffset);
 
             flags &= ~2;
         }
@@ -505,7 +554,7 @@
         int32_t i = n - 4;
         while(--i >= 0 && *++frameData != 0) ;
         int skipped = (frameData - mFrameData);
-        if (skipped >= n) {
+        if (skipped >= (int)n) {
             return;
         }
         n -= skipped;
diff --git a/media/libstagefright/include/ID3.h b/media/libstagefright/include/ID3.h
index 3028f56..cca83ab 100644
--- a/media/libstagefright/include/ID3.h
+++ b/media/libstagefright/include/ID3.h
@@ -36,6 +36,7 @@
     };
 
     ID3(const sp<DataSource> &source, bool ignoreV1 = false);
+    ID3(const uint8_t *data, size_t size, bool ignoreV1 = false);
     ~ID3();
 
     bool isValid() const;
@@ -71,6 +72,8 @@
         Iterator &operator=(const Iterator &);
     };
 
+    size_t rawSize() const { return mRawSize; }
+
 private:
     bool mIsValid;
     uint8_t *mData;
@@ -78,6 +81,10 @@
     size_t mFirstFrameOffset;
     Version mVersion;
 
+    // size of the ID3 tag including header before any unsynchronization.
+    // only valid for IDV2+
+    size_t mRawSize;
+
     bool parseV1(const sp<DataSource> &source);
     bool parseV2(const sp<DataSource> &source);
     void removeUnsynchronization();
diff --git a/media/libstagefright/include/LiveSession.h b/media/libstagefright/include/LiveSession.h
deleted file mode 100644
index db44a33..0000000
--- a/media/libstagefright/include/LiveSession.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2010 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIVE_SESSION_H_
-
-#define LIVE_SESSION_H_
-
-#include <media/stagefright/foundation/AHandler.h>
-
-#include <utils/String8.h>
-
-namespace android {
-
-struct ABuffer;
-struct DataSource;
-struct LiveDataSource;
-struct M3UParser;
-struct HTTPBase;
-
-struct LiveSession : public AHandler {
-    enum Flags {
-        // Don't log any URLs.
-        kFlagIncognito = 1,
-    };
-    LiveSession(
-            const sp<AMessage> &notify,
-            uint32_t flags = 0, bool uidValid = false, uid_t uid = 0);
-
-    sp<DataSource> getDataSource();
-
-    void connect(
-            const char *url,
-            const KeyedVector<String8, String8> *headers = NULL);
-
-    void disconnect();
-
-    // Blocks until seek is complete.
-    void seekTo(int64_t timeUs);
-
-    status_t getDuration(int64_t *durationUs) const;
-
-    bool isSeekable() const;
-    bool hasDynamicDuration() const;
-
-    // Posted notification's "what" field will carry one of the following:
-    enum {
-        kWhatPrepared,
-        kWhatPreparationFailed,
-    };
-
-protected:
-    virtual ~LiveSession();
-
-    virtual void onMessageReceived(const sp<AMessage> &msg);
-
-private:
-    enum {
-        kMaxNumQueuedFragments = 3,
-        kMaxNumRetries         = 5,
-    };
-
-    enum {
-        kWhatConnect        = 'conn',
-        kWhatDisconnect     = 'disc',
-        kWhatMonitorQueue   = 'moni',
-        kWhatSeek           = 'seek',
-    };
-
-    struct BandwidthItem {
-        AString mURI;
-        unsigned long mBandwidth;
-    };
-
-    sp<AMessage> mNotify;
-    uint32_t mFlags;
-    bool mUIDValid;
-    uid_t mUID;
-
-    bool mInPreparationPhase;
-
-    sp<LiveDataSource> mDataSource;
-
-    sp<HTTPBase> mHTTPDataSource;
-
-    AString mMasterURL;
-    KeyedVector<String8, String8> mExtraHeaders;
-
-    Vector<BandwidthItem> mBandwidthItems;
-
-    KeyedVector<AString, sp<ABuffer> > mAESKeyForURI;
-
-    ssize_t mPrevBandwidthIndex;
-    int64_t mLastPlaylistFetchTimeUs;
-    sp<M3UParser> mPlaylist;
-    int32_t mSeqNumber;
-    int64_t mSeekTimeUs;
-    int32_t mNumRetries;
-    bool mStartOfPlayback;
-
-    mutable Mutex mLock;
-    Condition mCondition;
-    int64_t mDurationUs;
-    bool mDurationFixed;  // Duration has been determined once and for all.
-    bool mSeekDone;
-    bool mDisconnectPending;
-
-    int32_t mMonitorQueueGeneration;
-
-    enum RefreshState {
-        INITIAL_MINIMUM_RELOAD_DELAY,
-        FIRST_UNCHANGED_RELOAD_ATTEMPT,
-        SECOND_UNCHANGED_RELOAD_ATTEMPT,
-        THIRD_UNCHANGED_RELOAD_ATTEMPT
-    };
-    RefreshState mRefreshState;
-
-    uint8_t mPlaylistHash[16];
-
-    void onConnect(const sp<AMessage> &msg);
-    void onDisconnect();
-    void onDownloadNext();
-    void onMonitorQueue();
-    void onSeek(const sp<AMessage> &msg);
-
-    status_t fetchFile(
-            const char *url, sp<ABuffer> *out,
-            int64_t range_offset = 0, int64_t range_length = -1);
-
-    sp<M3UParser> fetchPlaylist(const char *url, bool *unchanged);
-    size_t getBandwidthIndex();
-
-    status_t decryptBuffer(
-            size_t playlistIndex, const sp<ABuffer> &buffer);
-
-    void postMonitorQueue(int64_t delayUs = 0);
-
-    bool timeToRefreshPlaylist(int64_t nowUs) const;
-
-    static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
-
-    // Returns the media time in us of the segment specified by seqNumber.
-    // This is computed by summing the durations of all segments before it.
-    int64_t getSegmentStartTimeUs(int32_t seqNumber) const;
-
-    void signalEOS(status_t err);
-
-    DISALLOW_EVIL_CONSTRUCTORS(LiveSession);
-};
-
-}  // namespace android
-
-#endif  // LIVE_SESSION_H_
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index fe74a42..c5e86a6 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -31,7 +31,6 @@
 struct DataSource;
 struct MPEG2TSSource;
 struct String8;
-struct LiveSession;
 
 struct MPEG2TSExtractor : public MediaExtractor {
     MPEG2TSExtractor(const sp<DataSource> &source);
@@ -44,16 +43,12 @@
 
     virtual uint32_t flags() const;
 
-    void setLiveSession(const sp<LiveSession> &liveSession);
-    void seekTo(int64_t seekTimeUs);
-
 private:
     friend struct MPEG2TSSource;
 
     mutable Mutex mLock;
 
     sp<DataSource> mDataSource;
-    sp<LiveSession> mLiveSession;
 
     sp<ATSParser> mParser;
 
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 35eff96..bbec1c4 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -82,6 +82,7 @@
     sp<DataSource> mDataSource;
     status_t mInitCheck;
     bool mHasVideo;
+    uint32_t mHeaderTimescale;
 
     Track *mFirstTrack, *mLastTrack;
 
diff --git a/media/libstagefright/include/SoftVideoDecoderOMXComponent.h b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
new file mode 100644
index 0000000..d050fa6
--- /dev/null
+++ b/media/libstagefright/include/SoftVideoDecoderOMXComponent.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SOFT_VIDEO_DECODER_OMX_COMPONENT_H_
+
+#define SOFT_VIDEO_DECODER_OMX_COMPONENT_H_
+
+#include "SimpleSoftOMXComponent.h"
+
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <media/IOMX.h>
+
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+#include <utils/Vector.h>
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
+
+namespace android {
+
+struct SoftVideoDecoderOMXComponent : public SimpleSoftOMXComponent {
+    SoftVideoDecoderOMXComponent(
+            const char *name,
+            const char *componentRole,
+            OMX_VIDEO_CODINGTYPE codingType,
+            const CodecProfileLevel *profileLevels,
+            size_t numProfileLevels,
+            int32_t width,
+            int32_t height,
+            const OMX_CALLBACKTYPE *callbacks,
+            OMX_PTR appData,
+            OMX_COMPONENTTYPE **component);
+
+protected:
+    virtual void onPortEnableCompleted(OMX_U32 portIndex, bool enabled);
+    virtual void onReset();
+
+    virtual OMX_ERRORTYPE internalGetParameter(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    virtual OMX_ERRORTYPE internalSetParameter(
+            OMX_INDEXTYPE index, const OMX_PTR params);
+
+    virtual OMX_ERRORTYPE getConfig(
+            OMX_INDEXTYPE index, OMX_PTR params);
+
+    void initPorts(OMX_U32 numInputBuffers,
+            OMX_U32 inputBufferSize,
+            OMX_U32 numOutputBuffers,
+            const char *mimeType);
+
+    virtual void updatePortDefinitions();
+
+    enum {
+        kInputPortIndex  = 0,
+        kOutputPortIndex = 1,
+        kMaxPortIndex = 1,
+    };
+
+    uint32_t mWidth, mHeight;
+    uint32_t mCropLeft, mCropTop, mCropWidth, mCropHeight;
+
+    enum {
+        NONE,
+        AWAITING_DISABLED,
+        AWAITING_ENABLED
+    } mOutputPortSettingsChange;
+
+private:
+    const char *mComponentRole;
+    OMX_VIDEO_CODINGTYPE mCodingType;
+    const CodecProfileLevel *mProfileLevels;
+    size_t mNumProfileLevels;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SoftVideoDecoderOMXComponent);
+};
+
+}  // namespace android
+
+#endif  // SOFT_VIDEO_DECODER_OMX_COMPONENT_H_
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index 3de3a61..3153c8b 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -32,9 +32,22 @@
 
 AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta)
     : mIsAudio(false),
-      mFormat(meta),
+      mFormat(NULL),
       mLastQueuedTimeUs(0),
       mEOSResult(OK) {
+    setFormat(meta);
+}
+
+void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
+    CHECK(mFormat == NULL);
+
+    mIsAudio = false;
+
+    if (meta == NULL) {
+        return;
+    }
+
+    mFormat = meta;
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
 
@@ -45,11 +58,6 @@
     }
 }
 
-void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
-    CHECK(mFormat == NULL);
-    mFormat = meta;
-}
-
 AnotherPacketSource::~AnotherPacketSource() {
 }
 
@@ -152,6 +160,15 @@
     mCondition.signal();
 }
 
+void AnotherPacketSource::clear() {
+    Mutex::Autolock autoLock(mLock);
+
+    mBuffers.clear();
+    mEOSResult = OK;
+
+    mFormat = NULL;
+}
+
 void AnotherPacketSource::queueDiscontinuity(
         ATSParser::DiscontinuityType type,
         const sp<AMessage> &extra) {
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 1db4068..e16cf78 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -41,6 +41,8 @@
     virtual status_t read(
             MediaBuffer **buffer, const ReadOptions *options = NULL);
 
+    void clear();
+
     bool hasBufferAvailable(status_t *finalResult);
 
     // Returns the difference between the last and the first queued
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index e1589b4..d449c34 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -19,7 +19,6 @@
 #include <utils/Log.h>
 
 #include "include/MPEG2TSExtractor.h"
-#include "include/LiveSession.h"
 #include "include/NuCachedSource2.h"
 
 #include <media/stagefright/foundation/ADebug.h>
@@ -79,15 +78,7 @@
 }
 
 sp<MetaData> MPEG2TSSource::getFormat() {
-    sp<MetaData> meta = mImpl->getFormat();
-
-    int64_t durationUs;
-    if (mExtractor->mLiveSession != NULL
-            && mExtractor->mLiveSession->getDuration(&durationUs) == OK) {
-        meta->setInt64(kKeyDuration, durationUs);
-    }
-
-    return meta;
+    return mImpl->getFormat();
 }
 
 status_t MPEG2TSSource::read(
@@ -97,7 +88,7 @@
     int64_t seekTimeUs;
     ReadOptions::SeekMode seekMode;
     if (mSeekable && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
-        mExtractor->seekTo(seekTimeUs);
+        return ERROR_UNSUPPORTED;
     }
 
     status_t finalResult;
@@ -216,32 +207,8 @@
     return mParser->feedTSPacket(packet, kTSPacketSize);
 }
 
-void MPEG2TSExtractor::setLiveSession(const sp<LiveSession> &liveSession) {
-    Mutex::Autolock autoLock(mLock);
-
-    mLiveSession = liveSession;
-}
-
-void MPEG2TSExtractor::seekTo(int64_t seekTimeUs) {
-    Mutex::Autolock autoLock(mLock);
-
-    if (mLiveSession == NULL) {
-        return;
-    }
-
-    mLiveSession->seekTo(seekTimeUs);
-}
-
 uint32_t MPEG2TSExtractor::flags() const {
-    Mutex::Autolock autoLock(mLock);
-
-    uint32_t flags = CAN_PAUSE;
-
-    if (mLiveSession != NULL && mLiveSession->isSeekable()) {
-        flags |= CAN_SEEK_FORWARD | CAN_SEEK_BACKWARD | CAN_SEEK;
-    }
-
-    return flags;
+    return CAN_PAUSE;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index a8b4939..cd912e7 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -9,6 +9,7 @@
         SimpleSoftOMXComponent.cpp    \
         SoftOMXComponent.cpp          \
         SoftOMXPlugin.cpp             \
+        SoftVideoDecoderOMXComponent.cpp \
 
 LOCAL_C_INCLUDES += \
         $(TOP)/frameworks/av/media/libstagefright \
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index ef27879..b3a8463 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -206,24 +206,15 @@
     // Find matching entry in our cached copy of the BufferQueue slots.
     // If we find a match, release that slot.  If we don't, the BufferQueue
     // has dropped that GraphicBuffer, and there's nothing for us to release.
-    //
-    // (We could store "id" in CodecBuffer and avoid the slot search.)
-    int id;
-    for (id = 0; id < BufferQueue::NUM_BUFFER_SLOTS; id++) {
-        if (mBufferSlot[id] == NULL) {
-            continue;
-        }
+    int id = codecBuffer.mBuf;
+    if (mBufferSlot[id] != NULL &&
+        mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
+        ALOGV("cbi %d matches bq slot %d, handle=%p",
+                cbi, id, mBufferSlot[id]->handle);
 
-        if (mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
-            ALOGV("cbi %d matches bq slot %d, handle=%p",
-                    cbi, id, mBufferSlot[id]->handle);
-
-            mBufferQueue->releaseBuffer(id, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR,
-                    Fence::NO_FENCE);
-            break;
-        }
-    }
-    if (id == BufferQueue::NUM_BUFFER_SLOTS) {
+        mBufferQueue->releaseBuffer(id, codecBuffer.mFrameNumber,
+                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+    } else {
         ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d",
                 cbi);
     }
@@ -287,11 +278,11 @@
         mBufferSlot[item.mBuf] = item.mGraphicBuffer;
     }
 
-    err = submitBuffer_l(mBufferSlot[item.mBuf], item.mTimestamp / 1000, cbi);
+    err = submitBuffer_l(item, cbi);
     if (err != OK) {
         ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
-        mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY,
-                EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+        mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
     } else {
         ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi);
     }
@@ -326,11 +317,13 @@
     return OK;
 }
 
-status_t GraphicBufferSource::submitBuffer_l(sp<GraphicBuffer>& graphicBuffer,
-        int64_t timestampUsec, int cbi) {
+status_t GraphicBufferSource::submitBuffer_l(
+        const BufferQueue::BufferItem &item, int cbi) {
     ALOGV("submitBuffer_l cbi=%d", cbi);
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
-    codecBuffer.mGraphicBuffer = graphicBuffer;
+    codecBuffer.mGraphicBuffer = mBufferSlot[item.mBuf];
+    codecBuffer.mBuf = item.mBuf;
+    codecBuffer.mFrameNumber = item.mFrameNumber;
 
     OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
     CHECK(header->nAllocLen >= 4 + sizeof(buffer_handle_t));
@@ -342,7 +335,7 @@
 
     status_t err = mNodeInstance->emptyDirectBuffer(header, 0,
             4 + sizeof(buffer_handle_t), OMX_BUFFERFLAG_ENDOFFRAME,
-            timestampUsec);
+            item.mTimestamp / 1000);
     if (err != OK) {
         ALOGW("WARNING: emptyDirectBuffer failed: 0x%x", err);
         codecBuffer.mGraphicBuffer = NULL;
@@ -431,8 +424,8 @@
         BufferQueue::BufferItem item;
         status_t err = mBufferQueue->acquireBuffer(&item);
         if (err == OK) {
-            mBufferQueue->releaseBuffer(item.mBuf, EGL_NO_DISPLAY,
-                EGL_NO_SYNC_KHR, item.mFence);
+            mBufferQueue->releaseBuffer(item.mBuf, item.mFrameNumber,
+                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
         }
         return;
     }
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 562d342..8c6b470 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -104,6 +104,13 @@
     // (mGraphicBuffer == NULL) or in use by the codec.
     struct CodecBuffer {
         OMX_BUFFERHEADERTYPE* mHeader;
+
+        // buffer producer's frame-number for buffer
+        uint64_t mFrameNumber;
+
+        // buffer producer's buffer slot for buffer
+        int mBuf;
+
         sp<GraphicBuffer> mGraphicBuffer;
     };
 
@@ -130,8 +137,7 @@
 
     // Marks the mCodecBuffers entry as in-use, copies the GraphicBuffer
     // reference into the codec buffer, and submits the data to the codec.
-    status_t submitBuffer_l(sp<GraphicBuffer>& graphicBuffer,
-            int64_t timestampUsec, int cbi);
+    status_t submitBuffer_l(const BufferQueue::BufferItem &item, int cbi);
 
     // Submits an empty buffer, with the EOS flag set.   Returns without
     // doing anything if we don't have a codec buffer available.
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
new file mode 100644
index 0000000..08a3d42
--- /dev/null
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SoftVideoDecoderOMXComponent"
+#include <utils/Log.h>
+
+#include "include/SoftVideoDecoderOMXComponent.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+    params->nSize = sizeof(T);
+    params->nVersion.s.nVersionMajor = 1;
+    params->nVersion.s.nVersionMinor = 0;
+    params->nVersion.s.nRevision = 0;
+    params->nVersion.s.nStep = 0;
+}
+
+SoftVideoDecoderOMXComponent::SoftVideoDecoderOMXComponent(
+        const char *name,
+        const char *componentRole,
+        OMX_VIDEO_CODINGTYPE codingType,
+        const CodecProfileLevel *profileLevels,
+        size_t numProfileLevels,
+        int32_t width,
+        int32_t height,
+        const OMX_CALLBACKTYPE *callbacks,
+        OMX_PTR appData,
+        OMX_COMPONENTTYPE **component)
+        : SimpleSoftOMXComponent(name, callbacks, appData, component),
+        mWidth(width),
+        mHeight(height),
+        mCropLeft(0),
+        mCropTop(0),
+        mCropWidth(width),
+        mCropHeight(height),
+        mOutputPortSettingsChange(NONE),
+        mComponentRole(componentRole),
+        mCodingType(codingType),
+        mProfileLevels(profileLevels),
+        mNumProfileLevels(numProfileLevels) {
+}
+
+void SoftVideoDecoderOMXComponent::initPorts(
+        OMX_U32 numInputBuffers,
+        OMX_U32 inputBufferSize,
+        OMX_U32 numOutputBuffers,
+        const char *mimeType) {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+
+    def.nPortIndex = kInputPortIndex;
+    def.eDir = OMX_DirInput;
+    def.nBufferCountMin = numInputBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.nBufferSize = inputBufferSize;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainVideo;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 1;
+
+    def.format.video.cMIMEType = const_cast<char *>(mimeType);
+    def.format.video.pNativeRender = NULL;
+    /* size is initialized in updatePortDefinitions() */
+    def.format.video.nBitrate = 0;
+    def.format.video.xFramerate = 0;
+    def.format.video.bFlagErrorConcealment = OMX_FALSE;
+    def.format.video.eCompressionFormat = mCodingType;
+    def.format.video.eColorFormat = OMX_COLOR_FormatUnused;
+    def.format.video.pNativeWindow = NULL;
+
+    addPort(def);
+
+    def.nPortIndex = kOutputPortIndex;
+    def.eDir = OMX_DirOutput;
+    def.nBufferCountMin = numOutputBuffers;
+    def.nBufferCountActual = def.nBufferCountMin;
+    def.bEnabled = OMX_TRUE;
+    def.bPopulated = OMX_FALSE;
+    def.eDomain = OMX_PortDomainVideo;
+    def.bBuffersContiguous = OMX_FALSE;
+    def.nBufferAlignment = 2;
+
+    def.format.video.cMIMEType = const_cast<char *>("video/raw");
+    def.format.video.pNativeRender = NULL;
+    /* size is initialized in updatePortDefinitions() */
+    def.format.video.nBitrate = 0;
+    def.format.video.xFramerate = 0;
+    def.format.video.bFlagErrorConcealment = OMX_FALSE;
+    def.format.video.eCompressionFormat = OMX_VIDEO_CodingUnused;
+    def.format.video.eColorFormat = OMX_COLOR_FormatYUV420Planar;
+    def.format.video.pNativeWindow = NULL;
+
+    addPort(def);
+
+    updatePortDefinitions();
+}
+
+void SoftVideoDecoderOMXComponent::updatePortDefinitions() {
+    OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(kInputPortIndex)->mDef;
+    def->format.video.nFrameWidth = mWidth;
+    def->format.video.nFrameHeight = mHeight;
+    def->format.video.nStride = def->format.video.nFrameWidth;
+    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+
+    def = &editPortInfo(kOutputPortIndex)->mDef;
+    def->format.video.nFrameWidth = mWidth;
+    def->format.video.nFrameHeight = mHeight;
+    def->format.video.nStride = def->format.video.nFrameWidth;
+    def->format.video.nSliceHeight = def->format.video.nFrameHeight;
+
+    def->nBufferSize =
+            (def->format.video.nFrameWidth *
+             def->format.video.nFrameHeight * 3) / 2;
+
+    mCropLeft = 0;
+    mCropTop = 0;
+    mCropWidth = mWidth;
+    mCropHeight = mHeight;
+}
+
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalGetParameter(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamVideoPortFormat:
+        {
+            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > kMaxPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex != 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            if (formatParams->nPortIndex == kInputPortIndex) {
+                formatParams->eCompressionFormat = mCodingType;
+                formatParams->eColorFormat = OMX_COLOR_FormatUnused;
+                formatParams->xFramerate = 0;
+            } else {
+                CHECK_EQ(formatParams->nPortIndex, 1u);
+
+                formatParams->eCompressionFormat = OMX_VIDEO_CodingUnused;
+                formatParams->eColorFormat = OMX_COLOR_FormatYUV420Planar;
+                formatParams->xFramerate = 0;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoProfileLevelQuerySupported:
+        {
+            OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
+                  (OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
+
+            if (profileLevel->nPortIndex != kInputPortIndex) {
+                ALOGE("Invalid port index: %ld", profileLevel->nPortIndex);
+                return OMX_ErrorUnsupportedIndex;
+            }
+
+            if (index >= mNumProfileLevels) {
+                return OMX_ErrorNoMore;
+            }
+
+            profileLevel->eProfile = mProfileLevels[index].mProfile;
+            profileLevel->eLevel   = mProfileLevels[index].mLevel;
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalGetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::internalSetParameter(
+        OMX_INDEXTYPE index, const OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexParamStandardComponentRole:
+        {
+            const OMX_PARAM_COMPONENTROLETYPE *roleParams =
+                (const OMX_PARAM_COMPONENTROLETYPE *)params;
+
+            if (strncmp((const char *)roleParams->cRole,
+                        mComponentRole,
+                        OMX_MAX_STRINGNAME_SIZE - 1)) {
+                return OMX_ErrorUndefined;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        case OMX_IndexParamVideoPortFormat:
+        {
+            OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
+                (OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+
+            if (formatParams->nPortIndex > kMaxPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            if (formatParams->nIndex != 0) {
+                return OMX_ErrorNoMore;
+            }
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return SimpleSoftOMXComponent::internalSetParameter(index, params);
+    }
+}
+
+OMX_ERRORTYPE SoftVideoDecoderOMXComponent::getConfig(
+        OMX_INDEXTYPE index, OMX_PTR params) {
+    switch (index) {
+        case OMX_IndexConfigCommonOutputCrop:
+        {
+            OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
+
+            if (rectParams->nPortIndex != kOutputPortIndex) {
+                return OMX_ErrorUndefined;
+            }
+
+            rectParams->nLeft = mCropLeft;
+            rectParams->nTop = mCropTop;
+            rectParams->nWidth = mCropWidth;
+            rectParams->nHeight = mCropHeight;
+
+            return OMX_ErrorNone;
+        }
+
+        default:
+            return OMX_ErrorUnsupportedIndex;
+    }
+}
+
+void SoftVideoDecoderOMXComponent::onReset() {
+    mOutputPortSettingsChange = NONE;
+}
+
+void SoftVideoDecoderOMXComponent::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
+    if (portIndex != kOutputPortIndex) {
+        return;
+    }
+
+    switch (mOutputPortSettingsChange) {
+        case NONE:
+            break;
+
+        case AWAITING_DISABLED:
+        {
+            CHECK(!enabled);
+            mOutputPortSettingsChange = AWAITING_ENABLED;
+            break;
+        }
+
+        default:
+        {
+            CHECK_EQ((int)mOutputPortSettingsChange, (int)AWAITING_ENABLED);
+            CHECK(enabled);
+            mOutputPortSettingsChange = NONE;
+            break;
+        }
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index 9e2724d..e77c69c 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -51,7 +51,7 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_MODULE_TAGS := debug
+LOCAL_MODULE_TAGS := optional
 
 LOCAL_MODULE:= rtp_test
 
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index 061ae89..404b41e 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -4,10 +4,17 @@
 
 LOCAL_SRC_FILES:= \
         ANetworkSession.cpp             \
+        MediaReceiver.cpp               \
         MediaSender.cpp                 \
         Parameters.cpp                  \
         ParsedMessage.cpp               \
+        rtp/RTPAssembler.cpp            \
+        rtp/RTPReceiver.cpp             \
         rtp/RTPSender.cpp               \
+        sink/DirectRenderer.cpp         \
+        sink/WifiDisplaySink.cpp        \
+        SNTPClient.cpp                  \
+        TimeSyncer.cpp                  \
         source/Converter.cpp            \
         source/MediaPuller.cpp          \
         source/PlaybackSession.cpp      \
@@ -57,6 +64,67 @@
 
 LOCAL_MODULE:= wfd
 
-LOCAL_MODULE_TAGS := debug
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        udptest.cpp                 \
+
+LOCAL_SHARED_LIBRARIES:= \
+        libbinder                       \
+        libgui                          \
+        libmedia                        \
+        libstagefright                  \
+        libstagefright_foundation       \
+        libstagefright_wfd              \
+        libutils                        \
+        liblog                          \
+
+LOCAL_MODULE:= udptest
+
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        rtptest.cpp                 \
+
+LOCAL_SHARED_LIBRARIES:= \
+        libbinder                       \
+        libgui                          \
+        libmedia                        \
+        libstagefright                  \
+        libstagefright_foundation       \
+        libstagefright_wfd              \
+        libutils                        \
+        liblog                          \
+
+LOCAL_MODULE:= rtptest
+
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+        nettest.cpp                     \
+
+LOCAL_SHARED_LIBRARIES:= \
+        libbinder                       \
+        libgui                          \
+        libmedia                        \
+        libstagefright                  \
+        libstagefright_foundation       \
+        libstagefright_wfd              \
+        libutils                        \
+        liblog                          \
+
+LOCAL_MODULE:= nettest
 
 include $(BUILD_EXECUTABLE)
diff --git a/media/libstagefright/wifi-display/MediaReceiver.cpp b/media/libstagefright/wifi-display/MediaReceiver.cpp
new file mode 100644
index 0000000..364acb9
--- /dev/null
+++ b/media/libstagefright/wifi-display/MediaReceiver.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaReceiver"
+#include <utils/Log.h>
+
+#include "MediaReceiver.h"
+
+#include "ANetworkSession.h"
+#include "AnotherPacketSource.h"
+#include "rtp/RTPReceiver.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+MediaReceiver::MediaReceiver(
+        const sp<ANetworkSession> &netSession,
+        const sp<AMessage> &notify)
+    : mNetSession(netSession),
+      mNotify(notify),
+      mMode(MODE_UNDEFINED),
+      mGeneration(0),
+      mInitStatus(OK),
+      mInitDoneCount(0) {
+}
+
+MediaReceiver::~MediaReceiver() {
+}
+
+ssize_t MediaReceiver::addTrack(
+        RTPReceiver::TransportMode rtpMode,
+        RTPReceiver::TransportMode rtcpMode,
+        int32_t *localRTPPort) {
+    if (mMode != MODE_UNDEFINED) {
+        return INVALID_OPERATION;
+    }
+
+    size_t trackIndex = mTrackInfos.size();
+
+    TrackInfo info;
+
+    sp<AMessage> notify = new AMessage(kWhatReceiverNotify, id());
+    notify->setInt32("generation", mGeneration);
+    notify->setSize("trackIndex", trackIndex);
+
+    info.mReceiver = new RTPReceiver(mNetSession, notify);
+    looper()->registerHandler(info.mReceiver);
+
+    info.mReceiver->registerPacketType(
+            33, RTPReceiver::PACKETIZATION_TRANSPORT_STREAM);
+
+    info.mReceiver->registerPacketType(
+            96, RTPReceiver::PACKETIZATION_AAC);
+
+    info.mReceiver->registerPacketType(
+            97, RTPReceiver::PACKETIZATION_H264);
+
+    status_t err = info.mReceiver->initAsync(
+            rtpMode,
+            rtcpMode,
+            localRTPPort);
+
+    if (err != OK) {
+        looper()->unregisterHandler(info.mReceiver->id());
+        info.mReceiver.clear();
+
+        return err;
+    }
+
+    mTrackInfos.push_back(info);
+
+    return trackIndex;
+}
+
+status_t MediaReceiver::connectTrack(
+        size_t trackIndex,
+        const char *remoteHost,
+        int32_t remoteRTPPort,
+        int32_t remoteRTCPPort) {
+    if (trackIndex >= mTrackInfos.size()) {
+        return -ERANGE;
+    }
+
+    TrackInfo *info = &mTrackInfos.editItemAt(trackIndex);
+    return info->mReceiver->connect(remoteHost, remoteRTPPort, remoteRTCPPort);
+}
+
+status_t MediaReceiver::initAsync(Mode mode) {
+    if ((mode == MODE_TRANSPORT_STREAM || mode == MODE_TRANSPORT_STREAM_RAW)
+            && mTrackInfos.size() > 1) {
+        return INVALID_OPERATION;
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatInit, id());
+    msg->setInt32("mode", mode);
+    msg->post();
+
+    return OK;
+}
+
+void MediaReceiver::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatInit:
+        {
+            int32_t mode;
+            CHECK(msg->findInt32("mode", &mode));
+
+            CHECK_EQ(mMode, MODE_UNDEFINED);
+            mMode = (Mode)mode;
+
+            if (mInitStatus != OK || mInitDoneCount == mTrackInfos.size()) {
+                notifyInitDone(mInitStatus);
+            }
+
+            mTSParser = new ATSParser(
+                    ATSParser::ALIGNED_VIDEO_DATA
+                        | ATSParser::TS_TIMESTAMPS_ARE_ABSOLUTE);
+
+            mFormatKnownMask = 0;
+            break;
+        }
+
+        case kWhatReceiverNotify:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation != mGeneration) {
+                break;
+            }
+
+            onReceiverNotify(msg);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void MediaReceiver::onReceiverNotify(const sp<AMessage> &msg) {
+    int32_t what;
+    CHECK(msg->findInt32("what", &what));
+
+    switch (what) {
+        case RTPReceiver::kWhatInitDone:
+        {
+            ++mInitDoneCount;
+
+            int32_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            if (err != OK) {
+                mInitStatus = err;
+                ++mGeneration;
+            }
+
+            if (mMode != MODE_UNDEFINED) {
+                if (mInitStatus != OK || mInitDoneCount == mTrackInfos.size()) {
+                    notifyInitDone(mInitStatus);
+                }
+            }
+            break;
+        }
+
+        case RTPReceiver::kWhatError:
+        {
+            int32_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            notifyError(err);
+            break;
+        }
+
+        case RTPReceiver::kWhatAccessUnit:
+        {
+            size_t trackIndex;
+            CHECK(msg->findSize("trackIndex", &trackIndex));
+
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+            int32_t followsDiscontinuity;
+            if (!msg->findInt32(
+                        "followsDiscontinuity", &followsDiscontinuity)) {
+                followsDiscontinuity = 0;
+            }
+
+            if (mMode == MODE_TRANSPORT_STREAM) {
+                if (followsDiscontinuity) {
+                    mTSParser->signalDiscontinuity(
+                            ATSParser::DISCONTINUITY_TIME, NULL /* extra */);
+                }
+
+                for (size_t offset = 0;
+                        offset < accessUnit->size(); offset += 188) {
+                    status_t err = mTSParser->feedTSPacket(
+                             accessUnit->data() + offset, 188);
+
+                    if (err != OK) {
+                        notifyError(err);
+                        break;
+                    }
+                }
+
+                drainPackets(0 /* trackIndex */, ATSParser::VIDEO);
+                drainPackets(1 /* trackIndex */, ATSParser::AUDIO);
+            } else {
+                postAccessUnit(trackIndex, accessUnit, NULL);
+            }
+            break;
+        }
+
+        case RTPReceiver::kWhatPacketLost:
+        {
+            notifyPacketLost();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void MediaReceiver::drainPackets(
+        size_t trackIndex, ATSParser::SourceType type) {
+    sp<AnotherPacketSource> source =
+        static_cast<AnotherPacketSource *>(
+                mTSParser->getSource(type).get());
+
+    if (source == NULL) {
+        return;
+    }
+
+    sp<AMessage> format;
+    if (!(mFormatKnownMask & (1ul << trackIndex))) {
+        sp<MetaData> meta = source->getFormat();
+        CHECK(meta != NULL);
+
+        CHECK_EQ((status_t)OK, convertMetaDataToMessage(meta, &format));
+
+        mFormatKnownMask |= 1ul << trackIndex;
+    }
+
+    status_t finalResult;
+    while (source->hasBufferAvailable(&finalResult)) {
+        sp<ABuffer> accessUnit;
+        status_t err = source->dequeueAccessUnit(&accessUnit);
+        if (err == OK) {
+            postAccessUnit(trackIndex, accessUnit, format);
+            format.clear();
+        } else if (err != INFO_DISCONTINUITY) {
+            notifyError(err);
+        }
+    }
+
+    if (finalResult != OK) {
+        notifyError(finalResult);
+    }
+}
+
+void MediaReceiver::notifyInitDone(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatInitDone);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void MediaReceiver::notifyError(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void MediaReceiver::notifyPacketLost() {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatPacketLost);
+    notify->post();
+}
+
+void MediaReceiver::postAccessUnit(
+        size_t trackIndex,
+        const sp<ABuffer> &accessUnit,
+        const sp<AMessage> &format) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatAccessUnit);
+    notify->setSize("trackIndex", trackIndex);
+    notify->setBuffer("accessUnit", accessUnit);
+
+    if (format != NULL) {
+        notify->setMessage("format", format);
+    }
+
+    notify->post();
+}
+
+status_t MediaReceiver::informSender(
+        size_t trackIndex, const sp<AMessage> &params) {
+    if (trackIndex >= mTrackInfos.size()) {
+        return -ERANGE;
+    }
+
+    TrackInfo *info = &mTrackInfos.editItemAt(trackIndex);
+    return info->mReceiver->informSender(params);
+}
+
+}  // namespace android
+
+
diff --git a/media/libstagefright/wifi-display/MediaReceiver.h b/media/libstagefright/wifi-display/MediaReceiver.h
new file mode 100644
index 0000000..afbb407
--- /dev/null
+++ b/media/libstagefright/wifi-display/MediaReceiver.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/AHandler.h>
+
+#include "ATSParser.h"
+#include "rtp/RTPReceiver.h"
+
+namespace android {
+
+struct ABuffer;
+struct ANetworkSession;
+struct AMessage;
+struct ATSParser;
+
+// This class facilitates receiving of media data for one or more tracks
+// over RTP. Either a 1:1 track to RTP channel mapping is used or a single
+// RTP channel provides the data for a transport stream that is consequently
+// demuxed and its track's data provided to the observer.
+struct MediaReceiver : public AHandler {
+    enum {
+        kWhatInitDone,
+        kWhatError,
+        kWhatAccessUnit,
+        kWhatPacketLost,
+    };
+
+    MediaReceiver(
+            const sp<ANetworkSession> &netSession,
+            const sp<AMessage> &notify);
+
+    ssize_t addTrack(
+            RTPReceiver::TransportMode rtpMode,
+            RTPReceiver::TransportMode rtcpMode,
+            int32_t *localRTPPort);
+
+    status_t connectTrack(
+            size_t trackIndex,
+            const char *remoteHost,
+            int32_t remoteRTPPort,
+            int32_t remoteRTCPPort);
+
+    enum Mode {
+        MODE_UNDEFINED,
+        MODE_TRANSPORT_STREAM,
+        MODE_TRANSPORT_STREAM_RAW,
+        MODE_ELEMENTARY_STREAMS,
+    };
+    status_t initAsync(Mode mode);
+
+    status_t informSender(size_t trackIndex, const sp<AMessage> &params);
+
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+    virtual ~MediaReceiver();
+
+private:
+    enum {
+        kWhatInit,
+        kWhatReceiverNotify,
+    };
+
+    struct TrackInfo {
+        sp<RTPReceiver> mReceiver;
+    };
+
+    sp<ANetworkSession> mNetSession;
+    sp<AMessage> mNotify;
+
+    Mode mMode;
+    int32_t mGeneration;
+
+    Vector<TrackInfo> mTrackInfos;
+
+    status_t mInitStatus;
+    size_t mInitDoneCount;
+
+    sp<ATSParser> mTSParser;
+    uint32_t mFormatKnownMask;
+
+    void onReceiverNotify(const sp<AMessage> &msg);
+
+    void drainPackets(size_t trackIndex, ATSParser::SourceType type);
+
+    void notifyInitDone(status_t err);
+    void notifyError(status_t err);
+    void notifyPacketLost();
+
+    void postAccessUnit(
+            size_t trackIndex,
+            const sp<ABuffer> &accessUnit,
+            const sp<AMessage> &format);
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaReceiver);
+};
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index 8a3566f..a230cd8 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -27,9 +27,11 @@
 #include "include/avc_utils.h"
 
 #include <media/IHDCP.h>
+#include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <ui/GraphicBuffer.h>
 
 namespace android {
 
@@ -341,6 +343,22 @@
             break;
         }
 
+        case kWhatInformSender:
+        {
+            int64_t avgLatencyUs;
+            CHECK(msg->findInt64("avgLatencyUs", &avgLatencyUs));
+
+            int64_t maxLatencyUs;
+            CHECK(msg->findInt64("maxLatencyUs", &maxLatencyUs));
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatInformSender);
+            notify->setInt64("avgLatencyUs", avgLatencyUs);
+            notify->setInt64("maxLatencyUs", maxLatencyUs);
+            notify->post();
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -392,11 +410,36 @@
                     info.mPacketizerTrackIndex, accessUnit);
         }
 
-        status_t err = mHDCP->encrypt(
-                accessUnit->data(), accessUnit->size(),
-                trackIndex  /* streamCTR */,
-                &inputCTR,
-                accessUnit->data());
+        status_t err;
+        native_handle_t* handle;
+        if (accessUnit->meta()->findPointer("handle", (void**)&handle)
+                && handle != NULL) {
+            int32_t rangeLength, rangeOffset;
+            sp<AMessage> notify;
+            CHECK(accessUnit->meta()->findInt32("rangeOffset", &rangeOffset));
+            CHECK(accessUnit->meta()->findInt32("rangeLength", &rangeLength));
+            CHECK(accessUnit->meta()->findMessage("notify", &notify)
+                    && notify != NULL);
+            CHECK_GE(accessUnit->size(), rangeLength);
+
+            sp<GraphicBuffer> grbuf(new GraphicBuffer(
+                    rangeOffset + rangeLength, 1, HAL_PIXEL_FORMAT_Y8,
+                    GRALLOC_USAGE_HW_VIDEO_ENCODER, rangeOffset + rangeLength,
+                    handle, false));
+
+            err = mHDCP->encryptNative(
+                    grbuf, rangeOffset, rangeLength,
+                    trackIndex  /* streamCTR */,
+                    &inputCTR,
+                    accessUnit->data());
+            notify->post();
+        } else {
+            err = mHDCP->encrypt(
+                    accessUnit->data(), accessUnit->size(),
+                    trackIndex  /* streamCTR */,
+                    &inputCTR,
+                    accessUnit->data());
+        }
 
         if (err != OK) {
             ALOGE("Failed to HDCP-encrypt media data (err %d)",
diff --git a/media/libstagefright/wifi-display/MediaSender.h b/media/libstagefright/wifi-display/MediaSender.h
index 64722c5..04538ea 100644
--- a/media/libstagefright/wifi-display/MediaSender.h
+++ b/media/libstagefright/wifi-display/MediaSender.h
@@ -43,6 +43,7 @@
         kWhatInitDone,
         kWhatError,
         kWhatNetworkStall,
+        kWhatInformSender,
     };
 
     MediaSender(
diff --git a/media/libstagefright/wifi-display/SNTPClient.cpp b/media/libstagefright/wifi-display/SNTPClient.cpp
new file mode 100644
index 0000000..5c0af6a
--- /dev/null
+++ b/media/libstagefright/wifi-display/SNTPClient.cpp
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SNTPClient.h"
+
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/Utils.h>
+
+#include <arpa/inet.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+namespace android {
+
+SNTPClient::SNTPClient() {
+}
+
+status_t SNTPClient::requestTime(const char *host) {
+    struct hostent *ent;
+    int64_t requestTimeNTP, requestTimeUs;
+    ssize_t n;
+    int64_t responseTimeUs, responseTimeNTP;
+    int64_t originateTimeNTP, receiveTimeNTP, transmitTimeNTP;
+    int64_t roundTripTimeNTP, clockOffsetNTP;
+
+    status_t err = UNKNOWN_ERROR;
+
+    int s = socket(AF_INET, SOCK_DGRAM, 0);
+
+    if (s < 0) {
+        err = -errno;
+
+        goto bail;
+    }
+
+    ent = gethostbyname(host);
+
+    if (ent == NULL) {
+        err = -ENOENT;
+        goto bail2;
+    }
+
+    struct sockaddr_in hostAddr;
+    memset(hostAddr.sin_zero, 0, sizeof(hostAddr.sin_zero));
+    hostAddr.sin_family = AF_INET;
+    hostAddr.sin_port = htons(kNTPPort);
+    hostAddr.sin_addr.s_addr = *(in_addr_t *)ent->h_addr;
+
+    uint8_t packet[kNTPPacketSize];
+    memset(packet, 0, sizeof(packet));
+
+    packet[0] = kNTPModeClient | (kNTPVersion << 3);
+
+    requestTimeNTP = getNowNTP();
+    requestTimeUs = ALooper::GetNowUs();
+    writeTimeStamp(&packet[kNTPTransmitTimeOffset], requestTimeNTP);
+
+    n = sendto(
+            s, packet, sizeof(packet), 0,
+            (const struct sockaddr *)&hostAddr, sizeof(hostAddr));
+
+    if (n < 0) {
+        err = -errno;
+        goto bail2;
+    }
+
+    memset(packet, 0, sizeof(packet));
+
+    do {
+        n = recv(s, packet, sizeof(packet), 0);
+    } while (n < 0 && errno == EINTR);
+
+    if (n < 0) {
+        err = -errno;
+        goto bail2;
+    }
+
+    responseTimeUs = ALooper::GetNowUs();
+
+    responseTimeNTP = requestTimeNTP + makeNTP(responseTimeUs - requestTimeUs);
+
+    originateTimeNTP = readTimeStamp(&packet[kNTPOriginateTimeOffset]);
+    receiveTimeNTP = readTimeStamp(&packet[kNTPReceiveTimeOffset]);
+    transmitTimeNTP = readTimeStamp(&packet[kNTPTransmitTimeOffset]);
+
+    roundTripTimeNTP =
+        makeNTP(responseTimeUs - requestTimeUs)
+            - (transmitTimeNTP - receiveTimeNTP);
+
+    clockOffsetNTP =
+        ((receiveTimeNTP - originateTimeNTP)
+            + (transmitTimeNTP - responseTimeNTP)) / 2;
+
+    mTimeReferenceNTP = responseTimeNTP + clockOffsetNTP;
+    mTimeReferenceUs = responseTimeUs;
+    mRoundTripTimeNTP = roundTripTimeNTP;
+
+    err = OK;
+
+bail2:
+    close(s);
+    s = -1;
+
+bail:
+    return err;
+}
+
+int64_t SNTPClient::adjustTimeUs(int64_t timeUs) const {
+    uint64_t nowNTP =
+        mTimeReferenceNTP + makeNTP(timeUs - mTimeReferenceUs);
+
+    int64_t nowUs =
+        (nowNTP >> 32) * 1000000ll
+        + ((nowNTP & 0xffffffff) * 1000000ll) / (1ll << 32);
+
+    nowUs -= ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
+
+    return nowUs;
+}
+
+// static
+void SNTPClient::writeTimeStamp(uint8_t *dst, uint64_t ntpTime) {
+    *dst++ = (ntpTime >> 56) & 0xff;
+    *dst++ = (ntpTime >> 48) & 0xff;
+    *dst++ = (ntpTime >> 40) & 0xff;
+    *dst++ = (ntpTime >> 32) & 0xff;
+    *dst++ = (ntpTime >> 24) & 0xff;
+    *dst++ = (ntpTime >> 16) & 0xff;
+    *dst++ = (ntpTime >> 8) & 0xff;
+    *dst++ = ntpTime & 0xff;
+}
+
+// static
+uint64_t SNTPClient::readTimeStamp(const uint8_t *dst) {
+    return U64_AT(dst);
+}
+
+// static
+uint64_t SNTPClient::getNowNTP() {
+    struct timeval tv;
+    gettimeofday(&tv, NULL /* time zone */);
+
+    uint64_t nowUs = tv.tv_sec * 1000000ll + tv.tv_usec;
+
+    nowUs += ((70ll * 365 + 17) * 24) * 60 * 60 * 1000000ll;
+
+    return makeNTP(nowUs);
+}
+
+// static
+uint64_t SNTPClient::makeNTP(uint64_t deltaUs) {
+    uint64_t hi = deltaUs / 1000000ll;
+    uint64_t lo = ((1ll << 32) * (deltaUs % 1000000ll)) / 1000000ll;
+
+    return (hi << 32) | lo;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/SNTPClient.h b/media/libstagefright/wifi-display/SNTPClient.h
new file mode 100644
index 0000000..967d1fc
--- /dev/null
+++ b/media/libstagefright/wifi-display/SNTPClient.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SNTP_CLIENT_H_
+
+#define SNTP_CLIENT_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+// Implementation of the SNTP (Simple Network Time Protocol)
+struct SNTPClient {
+    SNTPClient();
+
+    status_t requestTime(const char *host);
+
+    // given a time obtained from ALooper::GetNowUs()
+    // return the number of us elapsed since Jan 1 1970 00:00:00 (UTC).
+    int64_t adjustTimeUs(int64_t timeUs) const;
+
+private:
+    enum {
+        kNTPPort = 123,
+        kNTPPacketSize = 48,
+        kNTPModeClient = 3,
+        kNTPVersion = 3,
+        kNTPTransmitTimeOffset = 40,
+        kNTPOriginateTimeOffset = 24,
+        kNTPReceiveTimeOffset = 32,
+    };
+
+    uint64_t mTimeReferenceNTP;
+    int64_t mTimeReferenceUs;
+    int64_t mRoundTripTimeNTP;
+
+    static void writeTimeStamp(uint8_t *dst, uint64_t ntpTime);
+    static uint64_t readTimeStamp(const uint8_t *dst);
+
+    static uint64_t getNowNTP();
+    static uint64_t makeNTP(uint64_t deltaUs);
+
+    DISALLOW_EVIL_CONSTRUCTORS(SNTPClient);
+};
+
+}  // namespace android
+
+#endif  // SNTP_CLIENT_H_
diff --git a/media/libstagefright/wifi-display/TimeSyncer.cpp b/media/libstagefright/wifi-display/TimeSyncer.cpp
new file mode 100644
index 0000000..cb429bc
--- /dev/null
+++ b/media/libstagefright/wifi-display/TimeSyncer.cpp
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "TimeSyncer"
+#include <utils/Log.h>
+
+#include "TimeSyncer.h"
+
+#include "ANetworkSession.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+TimeSyncer::TimeSyncer(
+        const sp<ANetworkSession> &netSession, const sp<AMessage> &notify)
+    : mNetSession(netSession),
+      mNotify(notify),
+      mIsServer(false),
+      mConnected(false),
+      mUDPSession(0),
+      mSeqNo(0),
+      mTotalTimeUs(0.0),
+      mPendingT1(0ll),
+      mTimeoutGeneration(0) {
+}
+
+TimeSyncer::~TimeSyncer() {
+}
+
+void TimeSyncer::startServer(unsigned localPort) {
+    sp<AMessage> msg = new AMessage(kWhatStartServer, id());
+    msg->setInt32("localPort", localPort);
+    msg->post();
+}
+
+void TimeSyncer::startClient(const char *remoteHost, unsigned remotePort) {
+    sp<AMessage> msg = new AMessage(kWhatStartClient, id());
+    msg->setString("remoteHost", remoteHost);
+    msg->setInt32("remotePort", remotePort);
+    msg->post();
+}
+
+void TimeSyncer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatStartClient:
+        {
+            AString remoteHost;
+            CHECK(msg->findString("remoteHost", &remoteHost));
+
+            int32_t remotePort;
+            CHECK(msg->findInt32("remotePort", &remotePort));
+
+            sp<AMessage> notify = new AMessage(kWhatUDPNotify, id());
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createUDPSession(
+                         0 /* localPort */,
+                         remoteHost.c_str(),
+                         remotePort,
+                         notify,
+                         &mUDPSession));
+
+            postSendPacket();
+            break;
+        }
+
+        case kWhatStartServer:
+        {
+            mIsServer = true;
+
+            int32_t localPort;
+            CHECK(msg->findInt32("localPort", &localPort));
+
+            sp<AMessage> notify = new AMessage(kWhatUDPNotify, id());
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createUDPSession(
+                         localPort, notify, &mUDPSession));
+
+            break;
+        }
+
+        case kWhatSendPacket:
+        {
+            if (mHistory.size() == 0) {
+                ALOGI("starting batch");
+            }
+
+            TimeInfo ti;
+            memset(&ti, 0, sizeof(ti));
+
+            ti.mT1 = ALooper::GetNowUs();
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->sendRequest(
+                         mUDPSession, &ti, sizeof(ti)));
+
+            mPendingT1 = ti.mT1;
+            postTimeout();
+            break;
+        }
+
+        case kWhatTimedOut:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mTimeoutGeneration) {
+                break;
+            }
+
+            ALOGI("timed out, sending another request");
+            postSendPacket();
+            break;
+        }
+
+        case kWhatUDPNotify:
+        {
+            int32_t reason;
+            CHECK(msg->findInt32("reason", &reason));
+
+            switch (reason) {
+                case ANetworkSession::kWhatError:
+                {
+                    int32_t sessionID;
+                    CHECK(msg->findInt32("sessionID", &sessionID));
+
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    AString detail;
+                    CHECK(msg->findString("detail", &detail));
+
+                    ALOGE("An error occurred in session %d (%d, '%s/%s').",
+                          sessionID,
+                          err,
+                          detail.c_str(),
+                          strerror(-err));
+
+                    mNetSession->destroySession(sessionID);
+
+                    cancelTimeout();
+
+                    notifyError(err);
+                    break;
+                }
+
+                case ANetworkSession::kWhatDatagram:
+                {
+                    int32_t sessionID;
+                    CHECK(msg->findInt32("sessionID", &sessionID));
+
+                    sp<ABuffer> packet;
+                    CHECK(msg->findBuffer("data", &packet));
+
+                    int64_t arrivalTimeUs;
+                    CHECK(packet->meta()->findInt64(
+                                "arrivalTimeUs", &arrivalTimeUs));
+
+                    CHECK_EQ(packet->size(), sizeof(TimeInfo));
+
+                    TimeInfo *ti = (TimeInfo *)packet->data();
+
+                    if (mIsServer) {
+                        if (!mConnected) {
+                            AString fromAddr;
+                            CHECK(msg->findString("fromAddr", &fromAddr));
+
+                            int32_t fromPort;
+                            CHECK(msg->findInt32("fromPort", &fromPort));
+
+                            CHECK_EQ((status_t)OK,
+                                     mNetSession->connectUDPSession(
+                                         mUDPSession, fromAddr.c_str(), fromPort));
+
+                            mConnected = true;
+                        }
+
+                        ti->mT2 = arrivalTimeUs;
+                        ti->mT3 = ALooper::GetNowUs();
+
+                        CHECK_EQ((status_t)OK,
+                                 mNetSession->sendRequest(
+                                     mUDPSession, ti, sizeof(*ti)));
+                    } else {
+                        if (ti->mT1 != mPendingT1) {
+                            break;
+                        }
+
+                        cancelTimeout();
+                        mPendingT1 = 0;
+
+                        ti->mT4 = arrivalTimeUs;
+
+                        // One way delay for a packet to travel from client
+                        // to server or back (assumed to be the same either way).
+                        int64_t delay =
+                            (ti->mT2 - ti->mT1 + ti->mT4 - ti->mT3) / 2;
+
+                        // Offset between the client clock (T1, T4) and the
+                        // server clock (T2, T3) timestamps.
+                        int64_t offset =
+                            (ti->mT2 - ti->mT1 - ti->mT4 + ti->mT3) / 2;
+
+                        mHistory.push_back(*ti);
+
+                        ALOGV("delay = %lld us,\toffset %lld us",
+                               delay,
+                               offset);
+
+                        if (mHistory.size() < kNumPacketsPerBatch) {
+                            postSendPacket(1000000ll / 30);
+                        } else {
+                            notifyOffset();
+
+                            ALOGI("batch done");
+
+                            mHistory.clear();
+                            postSendPacket(kBatchDelayUs);
+                        }
+                    }
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void TimeSyncer::postSendPacket(int64_t delayUs) {
+    (new AMessage(kWhatSendPacket, id()))->post(delayUs);
+}
+
+void TimeSyncer::postTimeout() {
+    sp<AMessage> msg = new AMessage(kWhatTimedOut, id());
+    msg->setInt32("generation", mTimeoutGeneration);
+    msg->post(kTimeoutDelayUs);
+}
+
+void TimeSyncer::cancelTimeout() {
+    ++mTimeoutGeneration;
+}
+
+void TimeSyncer::notifyError(status_t err) {
+    if (mNotify == NULL) {
+        looper()->stop();
+        return;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+// static
+int TimeSyncer::CompareRountripTime(const TimeInfo *ti1, const TimeInfo *ti2) {
+    int64_t rt1 = ti1->mT4 - ti1->mT1;
+    int64_t rt2 = ti2->mT4 - ti2->mT1;
+
+    if (rt1 < rt2) {
+        return -1;
+    } else if (rt1 > rt2) {
+        return 1;
+    }
+
+    return 0;
+}
+
+void TimeSyncer::notifyOffset() {
+    mHistory.sort(CompareRountripTime);
+
+    int64_t sum = 0ll;
+    size_t count = 0;
+
+    // Only consider the third of the information associated with the best
+    // (smallest) roundtrip times.
+    for (size_t i = 0; i < mHistory.size() / 3; ++i) {
+        const TimeInfo *ti = &mHistory[i];
+
+#if 0
+        // One way delay for a packet to travel from client
+        // to server or back (assumed to be the same either way).
+        int64_t delay =
+            (ti->mT2 - ti->mT1 + ti->mT4 - ti->mT3) / 2;
+#endif
+
+        // Offset between the client clock (T1, T4) and the
+        // server clock (T2, T3) timestamps.
+        int64_t offset =
+            (ti->mT2 - ti->mT1 - ti->mT4 + ti->mT3) / 2;
+
+        ALOGV("(%d) RT: %lld us, offset: %lld us",
+              i, ti->mT4 - ti->mT1, offset);
+
+        sum += offset;
+        ++count;
+    }
+
+    if (mNotify == NULL) {
+        ALOGI("avg. offset is %lld", sum / count);
+        return;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatTimeOffset);
+    notify->setInt64("offset", sum / count);
+    notify->post();
+}
+
+}  // namespace android
diff --git a/media/libstagefright/wifi-display/TimeSyncer.h b/media/libstagefright/wifi-display/TimeSyncer.h
new file mode 100644
index 0000000..4e7571f
--- /dev/null
+++ b/media/libstagefright/wifi-display/TimeSyncer.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TIME_SYNCER_H_
+
+#define TIME_SYNCER_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ANetworkSession;
+
+/*
+   TimeSyncer allows us to synchronize time between a client and a server.
+   The client sends a UDP packet containing its send-time to the server,
+   the server sends that packet back to the client amended with information
+   about when it was received as well as the time the reply was sent back.
+   Finally the client receives the reply and has now enough information to
+   compute the clock offset between client and server assuming that packet
+   exchange is symmetric, i.e. time for a packet client->server and
+   server->client is roughly equal.
+   This exchange is repeated a number of times and the average offset computed
+   over the 30% of packets that had the lowest roundtrip times.
+   The offset is determined every 10 secs to account for slight differences in
+   clock frequency.
+*/
+struct TimeSyncer : public AHandler {
+    enum {
+        kWhatError,
+        kWhatTimeOffset,
+    };
+    TimeSyncer(
+            const sp<ANetworkSession> &netSession,
+            const sp<AMessage> &notify);
+
+    void startServer(unsigned localPort);
+    void startClient(const char *remoteHost, unsigned remotePort);
+
+protected:
+    virtual ~TimeSyncer();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatStartServer,
+        kWhatStartClient,
+        kWhatUDPNotify,
+        kWhatSendPacket,
+        kWhatTimedOut,
+    };
+
+    struct TimeInfo {
+        int64_t mT1;  // client timestamp at send
+        int64_t mT2;  // server timestamp at receive
+        int64_t mT3;  // server timestamp at send
+        int64_t mT4;  // client timestamp at receive
+    };
+
+    enum {
+        kNumPacketsPerBatch = 30,
+    };
+    static const int64_t kTimeoutDelayUs = 500000ll;
+    static const int64_t kBatchDelayUs = 60000000ll;  // every minute
+
+    sp<ANetworkSession> mNetSession;
+    sp<AMessage> mNotify;
+
+    bool mIsServer;
+    bool mConnected;
+    int32_t mUDPSession;
+    uint32_t mSeqNo;
+    double mTotalTimeUs;
+
+    Vector<TimeInfo> mHistory;
+
+    int64_t mPendingT1;
+    int32_t mTimeoutGeneration;
+
+    void postSendPacket(int64_t delayUs = 0ll);
+
+    void postTimeout();
+    void cancelTimeout();
+
+    void notifyError(status_t err);
+    void notifyOffset();
+
+    static int CompareRountripTime(const TimeInfo *ti1, const TimeInfo *ti2);
+
+    DISALLOW_EVIL_CONSTRUCTORS(TimeSyncer);
+};
+
+}  // namespace android
+
+#endif  // TIME_SYNCER_H_
diff --git a/media/libstagefright/wifi-display/VideoFormats.cpp b/media/libstagefright/wifi-display/VideoFormats.cpp
index 458b163..04e02c1 100644
--- a/media/libstagefright/wifi-display/VideoFormats.cpp
+++ b/media/libstagefright/wifi-display/VideoFormats.cpp
@@ -24,7 +24,8 @@
 
 namespace android {
 
-VideoFormats::config_t VideoFormats::mConfigs[][32] = {
+// static
+const VideoFormats::config_t VideoFormats::mResolutionTable[][32] = {
     {
         // CEA Resolutions
         { 640, 480, 60, false, 0, 0},
@@ -133,6 +134,8 @@
 };
 
 VideoFormats::VideoFormats() {
+    memcpy(mConfigs, mResolutionTable, sizeof(mConfigs));
+
     for (size_t i = 0; i < kNumResolutionTypes; ++i) {
         mResolutionEnabled[i] = 0;
     }
@@ -175,6 +178,29 @@
     }
 }
 
+void VideoFormats::enableResolutionUpto(
+        ResolutionType type, size_t index,
+        ProfileType profile, LevelType level) {
+    size_t width, height, fps, score;
+    bool interlaced;
+    if (!GetConfiguration(type, index, &width, &height,
+            &fps, &interlaced)) {
+        ALOGE("Maximum resolution not found!");
+        return;
+    }
+    score = width * height * fps * (!interlaced + 1);
+    for (size_t i = 0; i < kNumResolutionTypes; ++i) {
+        for (size_t j = 0; j < 32; j++) {
+            if (GetConfiguration((ResolutionType)i, j,
+                    &width, &height, &fps, &interlaced)
+                    && score >= width * height * fps * (!interlaced + 1)) {
+                setResolutionEnabled((ResolutionType)i, j);
+                setProfileLevel((ResolutionType)i, j, profile, level);
+            }
+        }
+    }
+}
+
 void VideoFormats::setResolutionEnabled(
         ResolutionType type, size_t index, bool enabled) {
     CHECK_LT(type, kNumResolutionTypes);
@@ -182,11 +208,56 @@
 
     if (enabled) {
         mResolutionEnabled[type] |= (1ul << index);
+        mConfigs[type][index].profile = (1ul << PROFILE_CBP);
+        mConfigs[type][index].level = (1ul << LEVEL_31);
     } else {
         mResolutionEnabled[type] &= ~(1ul << index);
+        mConfigs[type][index].profile = 0;
+        mConfigs[type][index].level = 0;
     }
 }
 
+void VideoFormats::setProfileLevel(
+        ResolutionType type, size_t index,
+        ProfileType profile, LevelType level) {
+    CHECK_LT(type, kNumResolutionTypes);
+    CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
+
+    mConfigs[type][index].profile = (1ul << profile);
+    mConfigs[type][index].level = (1ul << level);
+}
+
+void VideoFormats::getProfileLevel(
+        ResolutionType type, size_t index,
+        ProfileType *profile, LevelType *level) const{
+    CHECK_LT(type, kNumResolutionTypes);
+    CHECK(GetConfiguration(type, index, NULL, NULL, NULL, NULL));
+
+    int i, bestProfile = -1, bestLevel = -1;
+
+    for (i = 0; i < kNumProfileTypes; ++i) {
+        if (mConfigs[type][index].profile & (1ul << i)) {
+            bestProfile = i;
+        }
+    }
+
+    for (i = 0; i < kNumLevelTypes; ++i) {
+        if (mConfigs[type][index].level & (1ul << i)) {
+            bestLevel = i;
+        }
+    }
+
+    if (bestProfile == -1 || bestLevel == -1) {
+        ALOGE("Profile or level not set for resolution type %d, index %d",
+              type, index);
+        bestProfile = PROFILE_CBP;
+        bestLevel = LEVEL_31;
+    }
+
+    *profile = (ProfileType) bestProfile;
+    *level = (LevelType) bestLevel;
+}
+
 bool VideoFormats::isResolutionEnabled(
         ResolutionType type, size_t index) const {
     CHECK_LT(type, kNumResolutionTypes);
@@ -207,7 +278,7 @@
         return false;
     }
 
-    const config_t *config = &mConfigs[type][index];
+    const config_t *config = &mResolutionTable[type][index];
 
     if (config->width == 0) {
         return false;
@@ -251,9 +322,12 @@
             if (res[i] & (1ul << j)){
                 mResolutionEnabled[i] |= (1ul << j);
                 if (profile > mConfigs[i][j].profile) {
+                    // prefer higher profile (even if level is lower)
                     mConfigs[i][j].profile = profile;
-                    if (level > mConfigs[i][j].level)
-                        mConfigs[i][j].level = level;
+                    mConfigs[i][j].level = level;
+                } else if (profile == mConfigs[i][j].profile &&
+                           level > mConfigs[i][j].level) {
+                    mConfigs[i][j].level = level;
                 }
             }
         }
@@ -262,9 +336,51 @@
     return true;
 }
 
+// static
+bool VideoFormats::GetProfileLevel(
+        ProfileType profile, LevelType level, unsigned *profileIdc,
+        unsigned *levelIdc, unsigned *constraintSet) {
+    CHECK_LT(profile, kNumProfileTypes);
+    CHECK_LT(level, kNumLevelTypes);
+
+    static const unsigned kProfileIDC[kNumProfileTypes] = {
+        66,     // PROFILE_CBP
+        100,    // PROFILE_CHP
+    };
+
+    static const unsigned kLevelIDC[kNumLevelTypes] = {
+        31,     // LEVEL_31
+        32,     // LEVEL_32
+        40,     // LEVEL_40
+        41,     // LEVEL_41
+        42,     // LEVEL_42
+    };
+
+    static const unsigned kConstraintSet[kNumProfileTypes] = {
+        0xc0,   // PROFILE_CBP
+        0x0c,   // PROFILE_CHP
+    };
+
+    if (profileIdc) {
+        *profileIdc = kProfileIDC[profile];
+    }
+
+    if (levelIdc) {
+        *levelIdc = kLevelIDC[level];
+    }
+
+    if (constraintSet) {
+        *constraintSet = kConstraintSet[profile];
+    }
+
+    return true;
+}
+
 bool VideoFormats::parseFormatSpec(const char *spec) {
     CHECK_EQ(kNumResolutionTypes, 3);
 
+    disableAll();
+
     unsigned native, dummy;
     unsigned res[3];
     size_t size = strlen(spec);
@@ -320,8 +436,10 @@
     //   max-vres (none or 2 byte)
 
     return StringPrintf(
-            "%02x 00 02 02 %08x %08x %08x 00 0000 0000 00 none none",
+            "%02x 00 %02x %02x %08x %08x %08x 00 0000 0000 00 none none",
             forM4Message ? 0x00 : ((mNativeIndex << 3) | mNativeType),
+            mConfigs[mNativeType][mNativeIndex].profile,
+            mConfigs[mNativeType][mNativeIndex].level,
             mResolutionEnabled[0],
             mResolutionEnabled[1],
             mResolutionEnabled[2]);
@@ -332,7 +450,9 @@
         const VideoFormats &sinkSupported,
         const VideoFormats &sourceSupported,
         ResolutionType *chosenType,
-        size_t *chosenIndex) {
+        size_t *chosenIndex,
+        ProfileType *chosenProfile,
+        LevelType *chosenLevel) {
 #if 0
     // Support for the native format is a great idea, the spec includes
     // these features, but nobody supports it and the tests don't validate it.
@@ -412,6 +532,18 @@
     *chosenType = (ResolutionType)bestType;
     *chosenIndex = bestIndex;
 
+    // Pick the best profile/level supported by both sink and source.
+    ProfileType srcProfile, sinkProfile;
+    LevelType srcLevel, sinkLevel;
+    sourceSupported.getProfileLevel(
+                        (ResolutionType)bestType, bestIndex,
+                        &srcProfile, &srcLevel);
+    sinkSupported.getProfileLevel(
+                        (ResolutionType)bestType, bestIndex,
+                        &sinkProfile, &sinkLevel);
+    *chosenProfile = srcProfile < sinkProfile ? srcProfile : sinkProfile;
+    *chosenLevel = srcLevel < sinkLevel ? srcLevel : sinkLevel;
+
     return true;
 }
 
diff --git a/media/libstagefright/wifi-display/VideoFormats.h b/media/libstagefright/wifi-display/VideoFormats.h
index 01de246..fd38fd1 100644
--- a/media/libstagefright/wifi-display/VideoFormats.h
+++ b/media/libstagefright/wifi-display/VideoFormats.h
@@ -69,17 +69,33 @@
 
     void disableAll();
     void enableAll();
+    void enableResolutionUpto(
+            ResolutionType type, size_t index,
+            ProfileType profile, LevelType level);
 
     void setResolutionEnabled(
             ResolutionType type, size_t index, bool enabled = true);
 
     bool isResolutionEnabled(ResolutionType type, size_t index) const;
 
+    void setProfileLevel(
+            ResolutionType type, size_t index,
+            ProfileType profile, LevelType level);
+
+    void getProfileLevel(
+            ResolutionType type, size_t index,
+            ProfileType *profile, LevelType *level) const;
+
     static bool GetConfiguration(
             ResolutionType type, size_t index,
             size_t *width, size_t *height, size_t *framesPerSecond,
             bool *interlaced);
 
+    static bool GetProfileLevel(
+            ProfileType profile, LevelType level,
+            unsigned *profileIdc, unsigned *levelIdc,
+            unsigned *constraintSet);
+
     bool parseFormatSpec(const char *spec);
     AString getFormatSpec(bool forM4Message = false) const;
 
@@ -87,7 +103,9 @@
             const VideoFormats &sinkSupported,
             const VideoFormats &sourceSupported,
             ResolutionType *chosenType,
-            size_t *chosenIndex);
+            size_t *chosenIndex,
+            ProfileType *chosenProfile,
+            LevelType *chosenLevel);
 
 private:
     bool parseH264Codec(const char *spec);
@@ -95,7 +113,8 @@
     size_t mNativeIndex;
 
     uint32_t mResolutionEnabled[kNumResolutionTypes];
-    static config_t mConfigs[kNumResolutionTypes][32];
+    static const config_t mResolutionTable[kNumResolutionTypes][32];
+    config_t mConfigs[kNumResolutionTypes][32];
 
     DISALLOW_EVIL_CONSTRUCTORS(VideoFormats);
 };
diff --git a/media/libstagefright/wifi-display/nettest.cpp b/media/libstagefright/wifi-display/nettest.cpp
new file mode 100644
index 0000000..0779bf5
--- /dev/null
+++ b/media/libstagefright/wifi-display/nettest.cpp
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "nettest"
+#include <utils/Log.h>
+
+#include "ANetworkSession.h"
+#include "TimeSyncer.h"
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+struct TestHandler : public AHandler {
+    TestHandler(const sp<ANetworkSession> &netSession);
+
+    void listen(int32_t port);
+    void connect(const char *host, int32_t port);
+
+protected:
+    virtual ~TestHandler();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kTimeSyncerPort = 8123,
+    };
+
+    enum {
+        kWhatListen,
+        kWhatConnect,
+        kWhatTimeSyncerNotify,
+        kWhatNetNotify,
+        kWhatSendMore,
+        kWhatStop,
+    };
+
+    sp<ANetworkSession> mNetSession;
+    sp<TimeSyncer> mTimeSyncer;
+
+    int32_t mServerSessionID;
+    int32_t mSessionID;
+
+    int64_t mTimeOffsetUs;
+    bool mTimeOffsetValid;
+
+    int32_t mCounter;
+
+    int64_t mMaxDelayMs;
+
+    void dumpDelay(int32_t counter, int64_t delayMs);
+
+    DISALLOW_EVIL_CONSTRUCTORS(TestHandler);
+};
+
+TestHandler::TestHandler(const sp<ANetworkSession> &netSession)
+    : mNetSession(netSession),
+      mServerSessionID(0),
+      mSessionID(0),
+      mTimeOffsetUs(-1ll),
+      mTimeOffsetValid(false),
+      mCounter(0),
+      mMaxDelayMs(-1ll) {
+}
+
+TestHandler::~TestHandler() {
+}
+
+void TestHandler::listen(int32_t port) {
+    sp<AMessage> msg = new AMessage(kWhatListen, id());
+    msg->setInt32("port", port);
+    msg->post();
+}
+
+void TestHandler::connect(const char *host, int32_t port) {
+    sp<AMessage> msg = new AMessage(kWhatConnect, id());
+    msg->setString("host", host);
+    msg->setInt32("port", port);
+    msg->post();
+}
+
+void TestHandler::dumpDelay(int32_t counter, int64_t delayMs) {
+    static const int64_t kMinDelayMs = 0;
+    static const int64_t kMaxDelayMs = 300;
+
+    const char *kPattern = "########################################";
+    size_t kPatternSize = strlen(kPattern);
+
+    int n = (kPatternSize * (delayMs - kMinDelayMs))
+                / (kMaxDelayMs - kMinDelayMs);
+
+    if (n < 0) {
+        n = 0;
+    } else if ((size_t)n > kPatternSize) {
+        n = kPatternSize;
+    }
+
+    if (delayMs > mMaxDelayMs) {
+        mMaxDelayMs = delayMs;
+    }
+
+    ALOGI("[%d] (%4lld ms / %4lld ms) %s",
+          counter,
+          delayMs,
+          mMaxDelayMs,
+          kPattern + kPatternSize - n);
+}
+
+void TestHandler::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatListen:
+        {
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+
+            notify = new AMessage(kWhatNetNotify, id());
+
+            int32_t port;
+            CHECK(msg->findInt32("port", &port));
+
+            struct in_addr ifaceAddr;
+            ifaceAddr.s_addr = INADDR_ANY;
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createTCPDatagramSession(
+                         ifaceAddr,
+                         port,
+                         notify,
+                         &mServerSessionID));
+            break;
+        }
+
+        case kWhatConnect:
+        {
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+            mTimeSyncer->startServer(kTimeSyncerPort);
+
+            AString host;
+            CHECK(msg->findString("host", &host));
+
+            int32_t port;
+            CHECK(msg->findInt32("port", &port));
+
+            notify = new AMessage(kWhatNetNotify, id());
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->createTCPDatagramSession(
+                         0 /* localPort */,
+                         host.c_str(),
+                         port,
+                         notify,
+                         &mSessionID));
+            break;
+        }
+
+        case kWhatNetNotify:
+        {
+            int32_t reason;
+            CHECK(msg->findInt32("reason", &reason));
+
+            switch (reason) {
+                case ANetworkSession::kWhatConnected:
+                {
+                    ALOGI("kWhatConnected");
+
+                    (new AMessage(kWhatSendMore, id()))->post();
+                    break;
+                }
+
+                case ANetworkSession::kWhatClientConnected:
+                {
+                    ALOGI("kWhatClientConnected");
+
+                    CHECK_EQ(mSessionID, 0);
+                    CHECK(msg->findInt32("sessionID", &mSessionID));
+
+                    AString clientIP;
+                    CHECK(msg->findString("client-ip", &clientIP));
+
+                    mTimeSyncer->startClient(clientIP.c_str(), kTimeSyncerPort);
+                    break;
+                }
+
+                case ANetworkSession::kWhatDatagram:
+                {
+                    sp<ABuffer> packet;
+                    CHECK(msg->findBuffer("data", &packet));
+
+                    CHECK_EQ(packet->size(), 12u);
+
+                    int32_t counter = U32_AT(packet->data());
+                    int64_t timeUs = U64_AT(packet->data() + 4);
+
+                    if (mTimeOffsetValid) {
+                        timeUs -= mTimeOffsetUs;
+                        int64_t nowUs = ALooper::GetNowUs();
+                        int64_t delayMs = (nowUs - timeUs) / 1000ll;
+
+                        dumpDelay(counter, delayMs);
+                    } else {
+                        ALOGI("received %d", counter);
+                    }
+                    break;
+                }
+
+                case ANetworkSession::kWhatError:
+                {
+                    ALOGE("kWhatError");
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+            break;
+        }
+
+        case kWhatTimeSyncerNotify:
+        {
+            CHECK(msg->findInt64("offset", &mTimeOffsetUs));
+            mTimeOffsetValid = true;
+            break;
+        }
+
+        case kWhatSendMore:
+        {
+            uint8_t buffer[4 + 8];
+            buffer[0] = mCounter >> 24;
+            buffer[1] = (mCounter >> 16) & 0xff;
+            buffer[2] = (mCounter >> 8) & 0xff;
+            buffer[3] = mCounter & 0xff;
+
+            int64_t nowUs = ALooper::GetNowUs();
+
+            buffer[4] = nowUs >> 56;
+            buffer[5] = (nowUs >> 48) & 0xff;
+            buffer[6] = (nowUs >> 40) & 0xff;
+            buffer[7] = (nowUs >> 32) & 0xff;
+            buffer[8] = (nowUs >> 24) & 0xff;
+            buffer[9] = (nowUs >> 16) & 0xff;
+            buffer[10] = (nowUs >> 8) & 0xff;
+            buffer[11] = nowUs & 0xff;
+
+            ++mCounter;
+
+            CHECK_EQ((status_t)OK,
+                     mNetSession->sendRequest(
+                         mSessionID,
+                         buffer,
+                         sizeof(buffer),
+                         true /* timeValid */,
+                         nowUs));
+
+            msg->post(100000ll);
+            break;
+        }
+
+        case kWhatStop:
+        {
+            if (mSessionID != 0) {
+                mNetSession->destroySession(mSessionID);
+                mSessionID = 0;
+            }
+
+            if (mServerSessionID != 0) {
+                mNetSession->destroySession(mServerSessionID);
+                mServerSessionID = 0;
+            }
+
+            looper()->stop();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+}  // namespace android
+
+static void usage(const char *me) {
+    fprintf(stderr,
+            "usage: %s -c host:port\tconnect to remote host\n"
+            "               -l port   \tlisten\n",
+            me);
+}
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    // srand(time(NULL));
+
+    ProcessState::self()->startThreadPool();
+
+    DataSource::RegisterDefaultSniffers();
+
+    int32_t connectToPort = -1;
+    AString connectToHost;
+
+    int32_t listenOnPort = -1;
+
+    int res;
+    while ((res = getopt(argc, argv, "hc:l:")) >= 0) {
+        switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    usage(argv[0]);
+                    exit(1);
+                }
+
+                connectToHost.setTo(optarg, colonPos - optarg);
+
+                char *end;
+                connectToPort = strtol(colonPos + 1, &end, 10);
+
+                if (*end != '\0' || end == colonPos + 1
+                        || connectToPort < 0 || connectToPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case 'l':
+            {
+                char *end;
+                listenOnPort = strtol(optarg, &end, 10);
+
+                if (*end != '\0' || end == optarg
+                        || listenOnPort < 0 || listenOnPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case '?':
+            case 'h':
+                usage(argv[0]);
+                exit(1);
+        }
+    }
+
+    if ((listenOnPort < 0 && connectToPort < 0)
+            || (listenOnPort >= 0 && connectToPort >= 0)) {
+        fprintf(stderr,
+                "You need to select either client or server mode.\n");
+        exit(1);
+    }
+
+    sp<ANetworkSession> netSession = new ANetworkSession;
+    netSession->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<TestHandler> handler = new TestHandler(netSession);
+    looper->registerHandler(handler);
+
+    if (listenOnPort) {
+        handler->listen(listenOnPort);
+    }
+
+    if (connectToPort >= 0) {
+        handler->connect(connectToHost.c_str(), connectToPort);
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    return 0;
+}
diff --git a/media/libstagefright/wifi-display/rtp/RTPAssembler.cpp b/media/libstagefright/wifi-display/rtp/RTPAssembler.cpp
new file mode 100644
index 0000000..7a96081
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPAssembler.cpp
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RTPAssembler"
+#include <utils/Log.h>
+
+#include "RTPAssembler.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+
+namespace android {
+
+RTPReceiver::Assembler::Assembler(const sp<AMessage> &notify)
+    : mNotify(notify) {
+}
+
+void RTPReceiver::Assembler::postAccessUnit(
+        const sp<ABuffer> &accessUnit, bool followsDiscontinuity) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", RTPReceiver::kWhatAccessUnit);
+    notify->setBuffer("accessUnit", accessUnit);
+    notify->setInt32("followsDiscontinuity", followsDiscontinuity);
+    notify->post();
+}
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::TSAssembler::TSAssembler(const sp<AMessage> &notify)
+    : Assembler(notify),
+      mSawDiscontinuity(false) {
+}
+
+void RTPReceiver::TSAssembler::signalDiscontinuity() {
+    mSawDiscontinuity = true;
+}
+
+status_t RTPReceiver::TSAssembler::processPacket(const sp<ABuffer> &packet) {
+    int32_t rtpTime;
+    CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+
+    packet->meta()->setInt64("timeUs", (rtpTime * 100ll) / 9);
+
+    postAccessUnit(packet, mSawDiscontinuity);
+
+    if (mSawDiscontinuity) {
+        mSawDiscontinuity = false;
+    }
+
+    return OK;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::H264Assembler::H264Assembler(const sp<AMessage> &notify)
+    : Assembler(notify),
+      mState(0),
+      mIndicator(0),
+      mNALType(0),
+      mAccessUnitRTPTime(0) {
+}
+
+void RTPReceiver::H264Assembler::signalDiscontinuity() {
+    reset();
+}
+
+status_t RTPReceiver::H264Assembler::processPacket(const sp<ABuffer> &packet) {
+    status_t err = internalProcessPacket(packet);
+
+    if (err != OK) {
+        reset();
+    }
+
+    return err;
+}
+
+status_t RTPReceiver::H264Assembler::internalProcessPacket(
+        const sp<ABuffer> &packet) {
+    const uint8_t *data = packet->data();
+    size_t size = packet->size();
+
+    switch (mState) {
+        case 0:
+        {
+            if (size < 1 || (data[0] & 0x80)) {
+                ALOGV("Malformed H264 RTP packet (empty or F-bit set)");
+                return ERROR_MALFORMED;
+            }
+
+            unsigned nalType = data[0] & 0x1f;
+            if (nalType >= 1 && nalType <= 23) {
+                addSingleNALUnit(packet);
+                ALOGV("added single NAL packet");
+            } else if (nalType == 28) {
+                // FU-A
+                unsigned indicator = data[0];
+                CHECK((indicator & 0x1f) == 28);
+
+                if (size < 2) {
+                    ALOGV("Malformed H264 FU-A packet (single byte)");
+                    return ERROR_MALFORMED;
+                }
+
+                if (!(data[1] & 0x80)) {
+                    ALOGV("Malformed H264 FU-A packet (no start bit)");
+                    return ERROR_MALFORMED;
+                }
+
+                mIndicator = data[0];
+                mNALType = data[1] & 0x1f;
+                uint32_t nri = (data[0] >> 5) & 3;
+
+                clearAccumulator();
+
+                uint8_t byte = mNALType | (nri << 5);
+                appendToAccumulator(&byte, 1);
+                appendToAccumulator(data + 2, size - 2);
+
+                int32_t rtpTime;
+                CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+                mAccumulator->meta()->setInt32("rtp-time", rtpTime);
+
+                if (data[1] & 0x40) {
+                    // Huh? End bit also set on the first buffer.
+                    addSingleNALUnit(mAccumulator);
+                    clearAccumulator();
+
+                    ALOGV("added FU-A");
+                    break;
+                }
+
+                mState = 1;
+            } else if (nalType == 24) {
+                // STAP-A
+
+                status_t err = addSingleTimeAggregationPacket(packet);
+                if (err != OK) {
+                    return err;
+                }
+            } else {
+                ALOGV("Malformed H264 packet (unknown type %d)", nalType);
+                return ERROR_UNSUPPORTED;
+            }
+            break;
+        }
+
+        case 1:
+        {
+            if (size < 2
+                    || data[0] != mIndicator
+                    || (data[1] & 0x1f) != mNALType
+                    || (data[1] & 0x80)) {
+                ALOGV("Malformed H264 FU-A packet (indicator, "
+                      "type or start bit mismatch)");
+
+                return ERROR_MALFORMED;
+            }
+
+            appendToAccumulator(data + 2, size - 2);
+
+            if (data[1] & 0x40) {
+                addSingleNALUnit(mAccumulator);
+
+                clearAccumulator();
+                mState = 0;
+
+                ALOGV("added FU-A");
+            }
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+
+    int32_t marker;
+    CHECK(packet->meta()->findInt32("M", &marker));
+
+    if (marker) {
+        flushAccessUnit();
+    }
+
+    return OK;
+}
+
+void RTPReceiver::H264Assembler::reset() {
+    mNALUnits.clear();
+
+    clearAccumulator();
+    mState = 0;
+}
+
+void RTPReceiver::H264Assembler::clearAccumulator() {
+    if (mAccumulator != NULL) {
+        // XXX Too expensive.
+        mAccumulator.clear();
+    }
+}
+
+void RTPReceiver::H264Assembler::appendToAccumulator(
+        const void *data, size_t size) {
+    if (mAccumulator == NULL) {
+        mAccumulator = new ABuffer(size);
+        memcpy(mAccumulator->data(), data, size);
+        return;
+    }
+
+    if (mAccumulator->size() + size > mAccumulator->capacity()) {
+        sp<ABuffer> buf = new ABuffer(mAccumulator->size() + size);
+        memcpy(buf->data(), mAccumulator->data(), mAccumulator->size());
+        buf->setRange(0, mAccumulator->size());
+
+        int32_t rtpTime;
+        if (mAccumulator->meta()->findInt32("rtp-time", &rtpTime)) {
+            buf->meta()->setInt32("rtp-time", rtpTime);
+        }
+
+        mAccumulator = buf;
+    }
+
+    memcpy(mAccumulator->data() + mAccumulator->size(), data, size);
+    mAccumulator->setRange(0, mAccumulator->size() + size);
+}
+
+void RTPReceiver::H264Assembler::addSingleNALUnit(const sp<ABuffer> &packet) {
+    if (mNALUnits.empty()) {
+        int32_t rtpTime;
+        CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+
+        mAccessUnitRTPTime = rtpTime;
+    }
+
+    mNALUnits.push_back(packet);
+}
+
+void RTPReceiver::H264Assembler::flushAccessUnit() {
+    if (mNALUnits.empty()) {
+        return;
+    }
+
+    size_t totalSize = 0;
+    for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
+            it != mNALUnits.end(); ++it) {
+        totalSize += 4 + (*it)->size();
+    }
+
+    sp<ABuffer> accessUnit = new ABuffer(totalSize);
+    size_t offset = 0;
+    for (List<sp<ABuffer> >::iterator it = mNALUnits.begin();
+            it != mNALUnits.end(); ++it) {
+        const sp<ABuffer> nalUnit = *it;
+
+        memcpy(accessUnit->data() + offset, "\x00\x00\x00\x01", 4);
+
+        memcpy(accessUnit->data() + offset + 4,
+               nalUnit->data(),
+               nalUnit->size());
+
+        offset += 4 + nalUnit->size();
+    }
+
+    mNALUnits.clear();
+
+    accessUnit->meta()->setInt64("timeUs", mAccessUnitRTPTime * 100ll / 9ll);
+    postAccessUnit(accessUnit, false /* followsDiscontinuity */);
+}
+
+status_t RTPReceiver::H264Assembler::addSingleTimeAggregationPacket(
+        const sp<ABuffer> &packet) {
+    const uint8_t *data = packet->data();
+    size_t size = packet->size();
+
+    if (size < 3) {
+        ALOGV("Malformed H264 STAP-A packet (too small)");
+        return ERROR_MALFORMED;
+    }
+
+    int32_t rtpTime;
+    CHECK(packet->meta()->findInt32("rtp-time", &rtpTime));
+
+    ++data;
+    --size;
+    while (size >= 2) {
+        size_t nalSize = (data[0] << 8) | data[1];
+
+        if (size < nalSize + 2) {
+            ALOGV("Malformed H264 STAP-A packet (incomplete NAL unit)");
+            return ERROR_MALFORMED;
+        }
+
+        sp<ABuffer> unit = new ABuffer(nalSize);
+        memcpy(unit->data(), &data[2], nalSize);
+
+        unit->meta()->setInt32("rtp-time", rtpTime);
+
+        addSingleNALUnit(unit);
+
+        data += 2 + nalSize;
+        size -= 2 + nalSize;
+    }
+
+    if (size != 0) {
+        ALOGV("Unexpected padding at end of STAP-A packet.");
+    }
+
+    ALOGV("added STAP-A");
+
+    return OK;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/rtp/RTPAssembler.h b/media/libstagefright/wifi-display/rtp/RTPAssembler.h
new file mode 100644
index 0000000..e456d32
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPAssembler.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RTP_ASSEMBLER_H_
+
+#define RTP_ASSEMBLER_H_
+
+#include "RTPReceiver.h"
+
+namespace android {
+
+// A helper class to reassemble the payload of RTP packets into access
+// units depending on the packetization scheme.
+struct RTPReceiver::Assembler : public RefBase {
+    Assembler(const sp<AMessage> &notify);
+
+    virtual void signalDiscontinuity() = 0;
+    virtual status_t processPacket(const sp<ABuffer> &packet) = 0;
+
+protected:
+    virtual ~Assembler() {}
+
+    void postAccessUnit(
+            const sp<ABuffer> &accessUnit, bool followsDiscontinuity);
+
+private:
+    sp<AMessage> mNotify;
+
+    DISALLOW_EVIL_CONSTRUCTORS(Assembler);
+};
+
+struct RTPReceiver::TSAssembler : public RTPReceiver::Assembler {
+    TSAssembler(const sp<AMessage> &notify);
+
+    virtual void signalDiscontinuity();
+    virtual status_t processPacket(const sp<ABuffer> &packet);
+
+private:
+    bool mSawDiscontinuity;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TSAssembler);
+};
+
+struct RTPReceiver::H264Assembler : public RTPReceiver::Assembler {
+    H264Assembler(const sp<AMessage> &notify);
+
+    virtual void signalDiscontinuity();
+    virtual status_t processPacket(const sp<ABuffer> &packet);
+
+private:
+    int32_t mState;
+
+    uint8_t mIndicator;
+    uint8_t mNALType;
+
+    sp<ABuffer> mAccumulator;
+
+    List<sp<ABuffer> > mNALUnits;
+    int32_t mAccessUnitRTPTime;
+
+    status_t internalProcessPacket(const sp<ABuffer> &packet);
+
+    void addSingleNALUnit(const sp<ABuffer> &packet);
+    status_t addSingleTimeAggregationPacket(const sp<ABuffer> &packet);
+
+    void flushAccessUnit();
+
+    void clearAccumulator();
+    void appendToAccumulator(const void *data, size_t size);
+
+    void reset();
+
+    DISALLOW_EVIL_CONSTRUCTORS(H264Assembler);
+};
+
+}  // namespace android
+
+#endif  // RTP_ASSEMBLER_H_
+
diff --git a/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp b/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp
new file mode 100644
index 0000000..2d22e79
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPReceiver.cpp
@@ -0,0 +1,1153 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RTPReceiver"
+#include <utils/Log.h>
+
+#include "RTPAssembler.h"
+#include "RTPReceiver.h"
+
+#include "ANetworkSession.h"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+
+#define TRACK_PACKET_LOSS       0
+
+namespace android {
+
+////////////////////////////////////////////////////////////////////////////////
+
+struct RTPReceiver::Source : public AHandler {
+    Source(RTPReceiver *receiver, uint32_t ssrc);
+
+    void onPacketReceived(uint16_t seq, const sp<ABuffer> &buffer);
+
+    void addReportBlock(uint32_t ssrc, const sp<ABuffer> &buf);
+
+protected:
+    virtual ~Source();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatRetransmit,
+        kWhatDeclareLost,
+    };
+
+    static const uint32_t kMinSequential = 2;
+    static const uint32_t kMaxDropout = 3000;
+    static const uint32_t kMaxMisorder = 100;
+    static const uint32_t kRTPSeqMod = 1u << 16;
+    static const int64_t kReportIntervalUs = 10000000ll;
+
+    RTPReceiver *mReceiver;
+    uint32_t mSSRC;
+    bool mFirst;
+    uint16_t mMaxSeq;
+    uint32_t mCycles;
+    uint32_t mBaseSeq;
+    uint32_t mReceived;
+    uint32_t mExpectedPrior;
+    uint32_t mReceivedPrior;
+
+    int64_t mFirstArrivalTimeUs;
+    int64_t mFirstRTPTimeUs;
+
+    // Ordered by extended seq number.
+    List<sp<ABuffer> > mPackets;
+
+    enum StatusBits {
+        STATUS_DECLARED_LOST            = 1,
+        STATUS_REQUESTED_RETRANSMISSION = 2,
+        STATUS_ARRIVED_LATE             = 4,
+    };
+#if TRACK_PACKET_LOSS
+    KeyedVector<int32_t, uint32_t> mLostPackets;
+#endif
+
+    void modifyPacketStatus(int32_t extSeqNo, uint32_t mask);
+
+    int32_t mAwaitingExtSeqNo;
+    bool mRequestedRetransmission;
+
+    int32_t mActivePacketType;
+    sp<Assembler> mActiveAssembler;
+
+    int64_t mNextReportTimeUs;
+
+    int32_t mNumDeclaredLost;
+    int32_t mNumDeclaredLostPrior;
+
+    int32_t mRetransmitGeneration;
+    int32_t mDeclareLostGeneration;
+    bool mDeclareLostTimerPending;
+
+    void queuePacket(const sp<ABuffer> &packet);
+    void dequeueMore();
+
+    sp<ABuffer> getNextPacket();
+    void resync();
+
+    void postRetransmitTimer(int64_t delayUs);
+    void postDeclareLostTimer(int64_t delayUs);
+    void cancelTimers();
+
+    DISALLOW_EVIL_CONSTRUCTORS(Source);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::Source::Source(RTPReceiver *receiver, uint32_t ssrc)
+    : mReceiver(receiver),
+      mSSRC(ssrc),
+      mFirst(true),
+      mMaxSeq(0),
+      mCycles(0),
+      mBaseSeq(0),
+      mReceived(0),
+      mExpectedPrior(0),
+      mReceivedPrior(0),
+      mFirstArrivalTimeUs(-1ll),
+      mFirstRTPTimeUs(-1ll),
+      mAwaitingExtSeqNo(-1),
+      mRequestedRetransmission(false),
+      mActivePacketType(-1),
+      mNextReportTimeUs(-1ll),
+      mNumDeclaredLost(0),
+      mNumDeclaredLostPrior(0),
+      mRetransmitGeneration(0),
+      mDeclareLostGeneration(0),
+      mDeclareLostTimerPending(false) {
+}
+
+RTPReceiver::Source::~Source() {
+}
+
+void RTPReceiver::Source::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatRetransmit:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mRetransmitGeneration) {
+                break;
+            }
+
+            mRequestedRetransmission = true;
+            mReceiver->requestRetransmission(mSSRC, mAwaitingExtSeqNo);
+
+            modifyPacketStatus(
+                    mAwaitingExtSeqNo, STATUS_REQUESTED_RETRANSMISSION);
+            break;
+        }
+
+        case kWhatDeclareLost:
+        {
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+
+            if (generation != mDeclareLostGeneration) {
+                break;
+            }
+
+            cancelTimers();
+
+            ALOGV("Lost packet extSeqNo %d %s",
+                  mAwaitingExtSeqNo,
+                  mRequestedRetransmission ? "*" : "");
+
+            mRequestedRetransmission = false;
+            if (mActiveAssembler != NULL) {
+                mActiveAssembler->signalDiscontinuity();
+            }
+
+            modifyPacketStatus(mAwaitingExtSeqNo, STATUS_DECLARED_LOST);
+
+            // resync();
+            ++mAwaitingExtSeqNo;
+            ++mNumDeclaredLost;
+
+            mReceiver->notifyPacketLost();
+
+            dequeueMore();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void RTPReceiver::Source::onPacketReceived(
+        uint16_t seq, const sp<ABuffer> &buffer) {
+    if (mFirst) {
+        buffer->setInt32Data(mCycles | seq);
+        queuePacket(buffer);
+
+        mFirst = false;
+        mBaseSeq = seq;
+        mMaxSeq = seq;
+        ++mReceived;
+        return;
+    }
+
+    uint16_t udelta = seq - mMaxSeq;
+
+    if (udelta < kMaxDropout) {
+        // In order, with permissible gap.
+
+        if (seq < mMaxSeq) {
+            // Sequence number wrapped - count another 64K cycle
+            mCycles += kRTPSeqMod;
+        }
+
+        mMaxSeq = seq;
+
+        ++mReceived;
+    } else if (udelta <= kRTPSeqMod - kMaxMisorder) {
+        // The sequence number made a very large jump
+        return;
+    } else {
+        // Duplicate or reordered packet.
+    }
+
+    buffer->setInt32Data(mCycles | seq);
+    queuePacket(buffer);
+}
+
+void RTPReceiver::Source::queuePacket(const sp<ABuffer> &packet) {
+    int32_t newExtendedSeqNo = packet->int32Data();
+
+    if (mFirstArrivalTimeUs < 0ll) {
+        mFirstArrivalTimeUs = ALooper::GetNowUs();
+
+        uint32_t rtpTime;
+        CHECK(packet->meta()->findInt32("rtp-time", (int32_t *)&rtpTime));
+
+        mFirstRTPTimeUs = (rtpTime * 100ll) / 9ll;
+    }
+
+    if (mAwaitingExtSeqNo >= 0 && newExtendedSeqNo < mAwaitingExtSeqNo) {
+        // We're no longer interested in these. They're old.
+        ALOGV("dropping stale extSeqNo %d", newExtendedSeqNo);
+
+        modifyPacketStatus(newExtendedSeqNo, STATUS_ARRIVED_LATE);
+        return;
+    }
+
+    if (mPackets.empty()) {
+        mPackets.push_back(packet);
+        dequeueMore();
+        return;
+    }
+
+    List<sp<ABuffer> >::iterator firstIt = mPackets.begin();
+    List<sp<ABuffer> >::iterator it = --mPackets.end();
+    for (;;) {
+        int32_t extendedSeqNo = (*it)->int32Data();
+
+        if (extendedSeqNo == newExtendedSeqNo) {
+            // Duplicate packet.
+            return;
+        }
+
+        if (extendedSeqNo < newExtendedSeqNo) {
+            // Insert new packet after the one at "it".
+            mPackets.insert(++it, packet);
+            break;
+        }
+
+        if (it == firstIt) {
+            // Insert new packet before the first existing one.
+            mPackets.insert(it, packet);
+            break;
+        }
+
+        --it;
+    }
+
+    dequeueMore();
+}
+
+void RTPReceiver::Source::dequeueMore() {
+    int64_t nowUs = ALooper::GetNowUs();
+    if (mNextReportTimeUs < 0ll || nowUs >= mNextReportTimeUs) {
+        if (mNextReportTimeUs >= 0ll) {
+            uint32_t expected = (mMaxSeq | mCycles) - mBaseSeq + 1;
+
+            uint32_t expectedInterval = expected - mExpectedPrior;
+            mExpectedPrior = expected;
+
+            uint32_t receivedInterval = mReceived - mReceivedPrior;
+            mReceivedPrior = mReceived;
+
+            int64_t lostInterval =
+                (int64_t)expectedInterval - (int64_t)receivedInterval;
+
+            int32_t declaredLostInterval =
+                mNumDeclaredLost - mNumDeclaredLostPrior;
+
+            mNumDeclaredLostPrior = mNumDeclaredLost;
+
+            if (declaredLostInterval > 0) {
+                ALOGI("lost %lld packets (%.2f %%), declared %d lost\n",
+                      lostInterval,
+                      100.0f * lostInterval / expectedInterval,
+                      declaredLostInterval);
+            }
+        }
+
+        mNextReportTimeUs = nowUs + kReportIntervalUs;
+
+#if TRACK_PACKET_LOSS
+        for (size_t i = 0; i < mLostPackets.size(); ++i) {
+            int32_t key = mLostPackets.keyAt(i);
+            uint32_t value = mLostPackets.valueAt(i);
+
+            AString status;
+            if (value & STATUS_REQUESTED_RETRANSMISSION) {
+                status.append("retrans ");
+            }
+            if (value & STATUS_ARRIVED_LATE) {
+                status.append("arrived-late ");
+            }
+            ALOGI("Packet %d declared lost %s", key, status.c_str());
+        }
+#endif
+    }
+
+    sp<ABuffer> packet;
+    while ((packet = getNextPacket()) != NULL) {
+        if (mDeclareLostTimerPending) {
+            cancelTimers();
+        }
+
+        CHECK_GE(mAwaitingExtSeqNo, 0);
+#if TRACK_PACKET_LOSS
+        mLostPackets.removeItem(mAwaitingExtSeqNo);
+#endif
+
+        int32_t packetType;
+        CHECK(packet->meta()->findInt32("PT", &packetType));
+
+        if (packetType != mActivePacketType) {
+            mActiveAssembler = mReceiver->makeAssembler(packetType);
+            mActivePacketType = packetType;
+        }
+
+        if (mActiveAssembler != NULL) {
+            status_t err = mActiveAssembler->processPacket(packet);
+            if (err != OK) {
+                ALOGV("assembler returned error %d", err);
+            }
+        }
+
+        ++mAwaitingExtSeqNo;
+    }
+
+    if (mDeclareLostTimerPending) {
+        return;
+    }
+
+    if (mPackets.empty()) {
+        return;
+    }
+
+    CHECK_GE(mAwaitingExtSeqNo, 0);
+
+    const sp<ABuffer> &firstPacket = *mPackets.begin();
+
+    uint32_t rtpTime;
+    CHECK(firstPacket->meta()->findInt32(
+                "rtp-time", (int32_t *)&rtpTime));
+
+
+    int64_t rtpUs = (rtpTime * 100ll) / 9ll;
+
+    int64_t maxArrivalTimeUs =
+        mFirstArrivalTimeUs + rtpUs - mFirstRTPTimeUs;
+
+    nowUs = ALooper::GetNowUs();
+
+    CHECK_LT(mAwaitingExtSeqNo, firstPacket->int32Data());
+
+    ALOGV("waiting for %d, comparing against %d, %lld us left",
+          mAwaitingExtSeqNo,
+          firstPacket->int32Data(),
+          maxArrivalTimeUs - nowUs);
+
+    postDeclareLostTimer(maxArrivalTimeUs + kPacketLostAfterUs);
+
+    if (kRequestRetransmissionAfterUs > 0ll) {
+        postRetransmitTimer(
+                maxArrivalTimeUs + kRequestRetransmissionAfterUs);
+    }
+}
+
+sp<ABuffer> RTPReceiver::Source::getNextPacket() {
+    if (mPackets.empty()) {
+        return NULL;
+    }
+
+    int32_t extSeqNo = (*mPackets.begin())->int32Data();
+
+    if (mAwaitingExtSeqNo < 0) {
+        mAwaitingExtSeqNo = extSeqNo;
+    } else if (extSeqNo != mAwaitingExtSeqNo) {
+        return NULL;
+    }
+
+    sp<ABuffer> packet = *mPackets.begin();
+    mPackets.erase(mPackets.begin());
+
+    return packet;
+}
+
+void RTPReceiver::Source::resync() {
+    mAwaitingExtSeqNo = -1;
+}
+
+void RTPReceiver::Source::addReportBlock(
+        uint32_t ssrc, const sp<ABuffer> &buf) {
+    uint32_t extMaxSeq = mMaxSeq | mCycles;
+    uint32_t expected = extMaxSeq - mBaseSeq + 1;
+
+    int64_t lost = (int64_t)expected - (int64_t)mReceived;
+    if (lost > 0x7fffff) {
+        lost = 0x7fffff;
+    } else if (lost < -0x800000) {
+        lost = -0x800000;
+    }
+
+    uint32_t expectedInterval = expected - mExpectedPrior;
+    mExpectedPrior = expected;
+
+    uint32_t receivedInterval = mReceived - mReceivedPrior;
+    mReceivedPrior = mReceived;
+
+    int64_t lostInterval = expectedInterval - receivedInterval;
+
+    uint8_t fractionLost;
+    if (expectedInterval == 0 || lostInterval <=0) {
+        fractionLost = 0;
+    } else {
+        fractionLost = (lostInterval << 8) / expectedInterval;
+    }
+
+    uint8_t *ptr = buf->data() + buf->size();
+
+    ptr[0] = ssrc >> 24;
+    ptr[1] = (ssrc >> 16) & 0xff;
+    ptr[2] = (ssrc >> 8) & 0xff;
+    ptr[3] = ssrc & 0xff;
+
+    ptr[4] = fractionLost;
+
+    ptr[5] = (lost >> 16) & 0xff;
+    ptr[6] = (lost >> 8) & 0xff;
+    ptr[7] = lost & 0xff;
+
+    ptr[8] = extMaxSeq >> 24;
+    ptr[9] = (extMaxSeq >> 16) & 0xff;
+    ptr[10] = (extMaxSeq >> 8) & 0xff;
+    ptr[11] = extMaxSeq & 0xff;
+
+    // XXX TODO:
+
+    ptr[12] = 0x00;  // interarrival jitter
+    ptr[13] = 0x00;
+    ptr[14] = 0x00;
+    ptr[15] = 0x00;
+
+    ptr[16] = 0x00;  // last SR
+    ptr[17] = 0x00;
+    ptr[18] = 0x00;
+    ptr[19] = 0x00;
+
+    ptr[20] = 0x00;  // delay since last SR
+    ptr[21] = 0x00;
+    ptr[22] = 0x00;
+    ptr[23] = 0x00;
+
+    buf->setRange(buf->offset(), buf->size() + 24);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+RTPReceiver::RTPReceiver(
+        const sp<ANetworkSession> &netSession,
+        const sp<AMessage> &notify,
+        uint32_t flags)
+    : mNetSession(netSession),
+      mNotify(notify),
+      mFlags(flags),
+      mRTPMode(TRANSPORT_UNDEFINED),
+      mRTCPMode(TRANSPORT_UNDEFINED),
+      mRTPSessionID(0),
+      mRTCPSessionID(0),
+      mRTPConnected(false),
+      mRTCPConnected(false),
+      mRTPClientSessionID(0),
+      mRTCPClientSessionID(0) {
+}
+
+RTPReceiver::~RTPReceiver() {
+    if (mRTCPClientSessionID != 0) {
+        mNetSession->destroySession(mRTCPClientSessionID);
+        mRTCPClientSessionID = 0;
+    }
+
+    if (mRTPClientSessionID != 0) {
+        mNetSession->destroySession(mRTPClientSessionID);
+        mRTPClientSessionID = 0;
+    }
+
+    if (mRTCPSessionID != 0) {
+        mNetSession->destroySession(mRTCPSessionID);
+        mRTCPSessionID = 0;
+    }
+
+    if (mRTPSessionID != 0) {
+        mNetSession->destroySession(mRTPSessionID);
+        mRTPSessionID = 0;
+    }
+}
+
+status_t RTPReceiver::initAsync(
+        TransportMode rtpMode,
+        TransportMode rtcpMode,
+        int32_t *outLocalRTPPort) {
+    if (mRTPMode != TRANSPORT_UNDEFINED
+            || rtpMode == TRANSPORT_UNDEFINED
+            || rtpMode == TRANSPORT_NONE
+            || rtcpMode == TRANSPORT_UNDEFINED) {
+        return INVALID_OPERATION;
+    }
+
+    CHECK_NE(rtpMode, TRANSPORT_TCP_INTERLEAVED);
+    CHECK_NE(rtcpMode, TRANSPORT_TCP_INTERLEAVED);
+
+    sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id());
+
+    sp<AMessage> rtcpNotify;
+    if (rtcpMode != TRANSPORT_NONE) {
+        rtcpNotify = new AMessage(kWhatRTCPNotify, id());
+    }
+
+    CHECK_EQ(mRTPSessionID, 0);
+    CHECK_EQ(mRTCPSessionID, 0);
+
+    int32_t localRTPPort;
+
+    struct in_addr ifaceAddr;
+    ifaceAddr.s_addr = INADDR_ANY;
+
+    for (;;) {
+        localRTPPort = PickRandomRTPPort();
+
+        status_t err;
+        if (rtpMode == TRANSPORT_UDP) {
+            err = mNetSession->createUDPSession(
+                    localRTPPort,
+                    rtpNotify,
+                    &mRTPSessionID);
+        } else {
+            CHECK_EQ(rtpMode, TRANSPORT_TCP);
+            err = mNetSession->createTCPDatagramSession(
+                    ifaceAddr,
+                    localRTPPort,
+                    rtpNotify,
+                    &mRTPSessionID);
+        }
+
+        if (err != OK) {
+            continue;
+        }
+
+        if (rtcpMode == TRANSPORT_NONE) {
+            break;
+        } else if (rtcpMode == TRANSPORT_UDP) {
+            err = mNetSession->createUDPSession(
+                    localRTPPort + 1,
+                    rtcpNotify,
+                    &mRTCPSessionID);
+        } else {
+            CHECK_EQ(rtpMode, TRANSPORT_TCP);
+            err = mNetSession->createTCPDatagramSession(
+                    ifaceAddr,
+                    localRTPPort + 1,
+                    rtcpNotify,
+                    &mRTCPSessionID);
+        }
+
+        if (err == OK) {
+            break;
+        }
+
+        mNetSession->destroySession(mRTPSessionID);
+        mRTPSessionID = 0;
+    }
+
+    mRTPMode = rtpMode;
+    mRTCPMode = rtcpMode;
+    *outLocalRTPPort = localRTPPort;
+
+    return OK;
+}
+
+status_t RTPReceiver::connect(
+        const char *remoteHost, int32_t remoteRTPPort, int32_t remoteRTCPPort) {
+    status_t err;
+
+    if (mRTPMode == TRANSPORT_UDP) {
+        CHECK(!mRTPConnected);
+
+        err = mNetSession->connectUDPSession(
+                mRTPSessionID, remoteHost, remoteRTPPort);
+
+        if (err != OK) {
+            notifyInitDone(err);
+            return err;
+        }
+
+        ALOGI("connectUDPSession RTP successful.");
+
+        mRTPConnected = true;
+    }
+
+    if (mRTCPMode == TRANSPORT_UDP) {
+        CHECK(!mRTCPConnected);
+
+        err = mNetSession->connectUDPSession(
+                mRTCPSessionID, remoteHost, remoteRTCPPort);
+
+        if (err != OK) {
+            notifyInitDone(err);
+            return err;
+        }
+
+        scheduleSendRR();
+
+        ALOGI("connectUDPSession RTCP successful.");
+
+        mRTCPConnected = true;
+    }
+
+    if (mRTPConnected
+            && (mRTCPConnected || mRTCPMode == TRANSPORT_NONE)) {
+        notifyInitDone(OK);
+    }
+
+    return OK;
+}
+
+status_t RTPReceiver::informSender(const sp<AMessage> &params) {
+    if (!mRTCPConnected) {
+        return INVALID_OPERATION;
+    }
+
+    int64_t avgLatencyUs;
+    CHECK(params->findInt64("avgLatencyUs", &avgLatencyUs));
+
+    int64_t maxLatencyUs;
+    CHECK(params->findInt64("maxLatencyUs", &maxLatencyUs));
+
+    sp<ABuffer> buf = new ABuffer(28);
+
+    uint8_t *ptr = buf->data();
+    ptr[0] = 0x80 | 0;
+    ptr[1] = 204;  // APP
+    ptr[2] = 0;
+
+    CHECK((buf->size() % 4) == 0u);
+    ptr[3] = (buf->size() / 4) - 1;
+
+    ptr[4] = kSourceID >> 24;  // SSRC
+    ptr[5] = (kSourceID >> 16) & 0xff;
+    ptr[6] = (kSourceID >> 8) & 0xff;
+    ptr[7] = kSourceID & 0xff;
+    ptr[8] = 'l';
+    ptr[9] = 'a';
+    ptr[10] = 't';
+    ptr[11] = 'e';
+
+    ptr[12] = avgLatencyUs >> 56;
+    ptr[13] = (avgLatencyUs >> 48) & 0xff;
+    ptr[14] = (avgLatencyUs >> 40) & 0xff;
+    ptr[15] = (avgLatencyUs >> 32) & 0xff;
+    ptr[16] = (avgLatencyUs >> 24) & 0xff;
+    ptr[17] = (avgLatencyUs >> 16) & 0xff;
+    ptr[18] = (avgLatencyUs >> 8) & 0xff;
+    ptr[19] = avgLatencyUs & 0xff;
+
+    ptr[20] = maxLatencyUs >> 56;
+    ptr[21] = (maxLatencyUs >> 48) & 0xff;
+    ptr[22] = (maxLatencyUs >> 40) & 0xff;
+    ptr[23] = (maxLatencyUs >> 32) & 0xff;
+    ptr[24] = (maxLatencyUs >> 24) & 0xff;
+    ptr[25] = (maxLatencyUs >> 16) & 0xff;
+    ptr[26] = (maxLatencyUs >> 8) & 0xff;
+    ptr[27] = maxLatencyUs & 0xff;
+
+    mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+
+    return OK;
+}
+
+void RTPReceiver::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatRTPNotify:
+        case kWhatRTCPNotify:
+            onNetNotify(msg->what() == kWhatRTPNotify, msg);
+            break;
+
+        case kWhatSendRR:
+        {
+            onSendRR();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void RTPReceiver::onNetNotify(bool isRTP, const sp<AMessage> &msg) {
+    int32_t reason;
+    CHECK(msg->findInt32("reason", &reason));
+
+    switch (reason) {
+        case ANetworkSession::kWhatError:
+        {
+            int32_t sessionID;
+            CHECK(msg->findInt32("sessionID", &sessionID));
+
+            int32_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            int32_t errorOccuredDuringSend;
+            CHECK(msg->findInt32("send", &errorOccuredDuringSend));
+
+            AString detail;
+            CHECK(msg->findString("detail", &detail));
+
+            ALOGE("An error occurred during %s in session %d "
+                  "(%d, '%s' (%s)).",
+                  errorOccuredDuringSend ? "send" : "receive",
+                  sessionID,
+                  err,
+                  detail.c_str(),
+                  strerror(-err));
+
+            mNetSession->destroySession(sessionID);
+
+            if (sessionID == mRTPSessionID) {
+                mRTPSessionID = 0;
+            } else if (sessionID == mRTCPSessionID) {
+                mRTCPSessionID = 0;
+            } else if (sessionID == mRTPClientSessionID) {
+                mRTPClientSessionID = 0;
+            } else if (sessionID == mRTCPClientSessionID) {
+                mRTCPClientSessionID = 0;
+            }
+
+            if (!mRTPConnected
+                    || (mRTCPMode != TRANSPORT_NONE && !mRTCPConnected)) {
+                notifyInitDone(err);
+                break;
+            }
+
+            notifyError(err);
+            break;
+        }
+
+        case ANetworkSession::kWhatDatagram:
+        {
+            sp<ABuffer> data;
+            CHECK(msg->findBuffer("data", &data));
+
+            if (isRTP) {
+                if (mFlags & FLAG_AUTO_CONNECT) {
+                    AString fromAddr;
+                    CHECK(msg->findString("fromAddr", &fromAddr));
+
+                    int32_t fromPort;
+                    CHECK(msg->findInt32("fromPort", &fromPort));
+
+                    CHECK_EQ((status_t)OK,
+                             connect(
+                                 fromAddr.c_str(), fromPort, fromPort + 1));
+
+                    mFlags &= ~FLAG_AUTO_CONNECT;
+                }
+
+                onRTPData(data);
+            } else {
+                onRTCPData(data);
+            }
+            break;
+        }
+
+        case ANetworkSession::kWhatClientConnected:
+        {
+            int32_t sessionID;
+            CHECK(msg->findInt32("sessionID", &sessionID));
+
+            if (isRTP) {
+                CHECK_EQ(mRTPMode, TRANSPORT_TCP);
+
+                if (mRTPClientSessionID != 0) {
+                    // We only allow a single client connection.
+                    mNetSession->destroySession(sessionID);
+                    sessionID = 0;
+                    break;
+                }
+
+                mRTPClientSessionID = sessionID;
+                mRTPConnected = true;
+            } else {
+                CHECK_EQ(mRTCPMode, TRANSPORT_TCP);
+
+                if (mRTCPClientSessionID != 0) {
+                    // We only allow a single client connection.
+                    mNetSession->destroySession(sessionID);
+                    sessionID = 0;
+                    break;
+                }
+
+                mRTCPClientSessionID = sessionID;
+                mRTCPConnected = true;
+            }
+
+            if (mRTPConnected
+                    && (mRTCPConnected || mRTCPMode == TRANSPORT_NONE)) {
+                notifyInitDone(OK);
+            }
+            break;
+        }
+    }
+}
+
+void RTPReceiver::notifyInitDone(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatInitDone);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void RTPReceiver::notifyError(status_t err) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+void RTPReceiver::notifyPacketLost() {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatPacketLost);
+    notify->post();
+}
+
+status_t RTPReceiver::onRTPData(const sp<ABuffer> &buffer) {
+    size_t size = buffer->size();
+    if (size < 12) {
+        // Too short to be a valid RTP header.
+        return ERROR_MALFORMED;
+    }
+
+    const uint8_t *data = buffer->data();
+
+    if ((data[0] >> 6) != 2) {
+        // Unsupported version.
+        return ERROR_UNSUPPORTED;
+    }
+
+    if (data[0] & 0x20) {
+        // Padding present.
+
+        size_t paddingLength = data[size - 1];
+
+        if (paddingLength + 12 > size) {
+            // If we removed this much padding we'd end up with something
+            // that's too short to be a valid RTP header.
+            return ERROR_MALFORMED;
+        }
+
+        size -= paddingLength;
+    }
+
+    int numCSRCs = data[0] & 0x0f;
+
+    size_t payloadOffset = 12 + 4 * numCSRCs;
+
+    if (size < payloadOffset) {
+        // Not enough data to fit the basic header and all the CSRC entries.
+        return ERROR_MALFORMED;
+    }
+
+    if (data[0] & 0x10) {
+        // Header eXtension present.
+
+        if (size < payloadOffset + 4) {
+            // Not enough data to fit the basic header, all CSRC entries
+            // and the first 4 bytes of the extension header.
+
+            return ERROR_MALFORMED;
+        }
+
+        const uint8_t *extensionData = &data[payloadOffset];
+
+        size_t extensionLength =
+            4 * (extensionData[2] << 8 | extensionData[3]);
+
+        if (size < payloadOffset + 4 + extensionLength) {
+            return ERROR_MALFORMED;
+        }
+
+        payloadOffset += 4 + extensionLength;
+    }
+
+    uint32_t srcId = U32_AT(&data[8]);
+    uint32_t rtpTime = U32_AT(&data[4]);
+    uint16_t seqNo = U16_AT(&data[2]);
+
+    sp<AMessage> meta = buffer->meta();
+    meta->setInt32("ssrc", srcId);
+    meta->setInt32("rtp-time", rtpTime);
+    meta->setInt32("PT", data[1] & 0x7f);
+    meta->setInt32("M", data[1] >> 7);
+
+    buffer->setRange(payloadOffset, size - payloadOffset);
+
+    ssize_t index = mSources.indexOfKey(srcId);
+    sp<Source> source;
+    if (index < 0) {
+        source = new Source(this, srcId);
+        looper()->registerHandler(source);
+
+        mSources.add(srcId, source);
+    } else {
+        source = mSources.valueAt(index);
+    }
+
+    source->onPacketReceived(seqNo, buffer);
+
+    return OK;
+}
+
+status_t RTPReceiver::onRTCPData(const sp<ABuffer> &data) {
+    ALOGI("onRTCPData");
+    return OK;
+}
+
+void RTPReceiver::addSDES(const sp<ABuffer> &buffer) {
+    uint8_t *data = buffer->data() + buffer->size();
+    data[0] = 0x80 | 1;
+    data[1] = 202;  // SDES
+    data[4] = kSourceID >> 24;  // SSRC
+    data[5] = (kSourceID >> 16) & 0xff;
+    data[6] = (kSourceID >> 8) & 0xff;
+    data[7] = kSourceID & 0xff;
+
+    size_t offset = 8;
+
+    data[offset++] = 1;  // CNAME
+
+    AString cname = "stagefright@somewhere";
+    data[offset++] = cname.size();
+
+    memcpy(&data[offset], cname.c_str(), cname.size());
+    offset += cname.size();
+
+    data[offset++] = 6;  // TOOL
+
+    AString tool = "stagefright/1.0";
+    data[offset++] = tool.size();
+
+    memcpy(&data[offset], tool.c_str(), tool.size());
+    offset += tool.size();
+
+    data[offset++] = 0;
+
+    if ((offset % 4) > 0) {
+        size_t count = 4 - (offset % 4);
+        switch (count) {
+            case 3:
+                data[offset++] = 0;
+            case 2:
+                data[offset++] = 0;
+            case 1:
+                data[offset++] = 0;
+        }
+    }
+
+    size_t numWords = (offset / 4) - 1;
+    data[2] = numWords >> 8;
+    data[3] = numWords & 0xff;
+
+    buffer->setRange(buffer->offset(), buffer->size() + offset);
+}
+
+void RTPReceiver::scheduleSendRR() {
+    (new AMessage(kWhatSendRR, id()))->post(5000000ll);
+}
+
+void RTPReceiver::onSendRR() {
+    sp<ABuffer> buf = new ABuffer(kMaxUDPPacketSize);
+    buf->setRange(0, 0);
+
+    uint8_t *ptr = buf->data();
+    ptr[0] = 0x80 | 0;
+    ptr[1] = 201;  // RR
+    ptr[2] = 0;
+    ptr[3] = 1;
+    ptr[4] = kSourceID >> 24;  // SSRC
+    ptr[5] = (kSourceID >> 16) & 0xff;
+    ptr[6] = (kSourceID >> 8) & 0xff;
+    ptr[7] = kSourceID & 0xff;
+
+    buf->setRange(0, 8);
+
+    size_t numReportBlocks = 0;
+    for (size_t i = 0; i < mSources.size(); ++i) {
+        uint32_t ssrc = mSources.keyAt(i);
+        sp<Source> source = mSources.valueAt(i);
+
+        if (numReportBlocks > 31 || buf->size() + 24 > buf->capacity()) {
+            // Cannot fit another report block.
+            break;
+        }
+
+        source->addReportBlock(ssrc, buf);
+        ++numReportBlocks;
+    }
+
+    ptr[0] |= numReportBlocks;  // 5 bit
+
+    size_t sizeInWordsMinus1 = 1 + 6 * numReportBlocks;
+    ptr[2] = sizeInWordsMinus1 >> 8;
+    ptr[3] = sizeInWordsMinus1 & 0xff;
+
+    buf->setRange(0, (sizeInWordsMinus1 + 1) * 4);
+
+    addSDES(buf);
+
+    mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+
+    scheduleSendRR();
+}
+
+status_t RTPReceiver::registerPacketType(
+        uint8_t packetType, PacketizationMode mode) {
+    mPacketTypes.add(packetType, mode);
+
+    return OK;
+}
+
+sp<RTPReceiver::Assembler> RTPReceiver::makeAssembler(uint8_t packetType) {
+    ssize_t index = mPacketTypes.indexOfKey(packetType);
+    if (index < 0) {
+        return NULL;
+    }
+
+    PacketizationMode mode = mPacketTypes.valueAt(index);
+
+    switch (mode) {
+        case PACKETIZATION_NONE:
+        case PACKETIZATION_TRANSPORT_STREAM:
+            return new TSAssembler(mNotify);
+
+        case PACKETIZATION_H264:
+            return new H264Assembler(mNotify);
+
+        default:
+            return NULL;
+    }
+}
+
+void RTPReceiver::requestRetransmission(uint32_t senderSSRC, int32_t extSeqNo) {
+    int32_t blp = 0;
+
+    sp<ABuffer> buf = new ABuffer(16);
+    buf->setRange(0, 0);
+
+    uint8_t *ptr = buf->data();
+    ptr[0] = 0x80 | 1;  // generic NACK
+    ptr[1] = 205;  // TSFB
+    ptr[2] = 0;
+    ptr[3] = 3;
+    ptr[8] = (senderSSRC >> 24) & 0xff;
+    ptr[9] = (senderSSRC >> 16) & 0xff;
+    ptr[10] = (senderSSRC >> 8) & 0xff;
+    ptr[11] = (senderSSRC & 0xff);
+    ptr[8] = (kSourceID >> 24) & 0xff;
+    ptr[9] = (kSourceID >> 16) & 0xff;
+    ptr[10] = (kSourceID >> 8) & 0xff;
+    ptr[11] = (kSourceID & 0xff);
+    ptr[12] = (extSeqNo >> 8) & 0xff;
+    ptr[13] = (extSeqNo & 0xff);
+    ptr[14] = (blp >> 8) & 0xff;
+    ptr[15] = (blp & 0xff);
+
+    buf->setRange(0, 16);
+
+     mNetSession->sendRequest(mRTCPSessionID, buf->data(), buf->size());
+}
+
+void RTPReceiver::Source::modifyPacketStatus(int32_t extSeqNo, uint32_t mask) {
+#if TRACK_PACKET_LOSS
+    ssize_t index = mLostPackets.indexOfKey(extSeqNo);
+    if (index < 0) {
+        mLostPackets.add(extSeqNo, mask);
+    } else {
+        mLostPackets.editValueAt(index) |= mask;
+    }
+#endif
+}
+
+void RTPReceiver::Source::postRetransmitTimer(int64_t timeUs) {
+    int64_t delayUs = timeUs - ALooper::GetNowUs();
+    sp<AMessage> msg = new AMessage(kWhatRetransmit, id());
+    msg->setInt32("generation", mRetransmitGeneration);
+    msg->post(delayUs);
+}
+
+void RTPReceiver::Source::postDeclareLostTimer(int64_t timeUs) {
+    CHECK(!mDeclareLostTimerPending);
+    mDeclareLostTimerPending = true;
+
+    int64_t delayUs = timeUs - ALooper::GetNowUs();
+    sp<AMessage> msg = new AMessage(kWhatDeclareLost, id());
+    msg->setInt32("generation", mDeclareLostGeneration);
+    msg->post(delayUs);
+}
+
+void RTPReceiver::Source::cancelTimers() {
+    ++mRetransmitGeneration;
+    ++mDeclareLostGeneration;
+    mDeclareLostTimerPending = false;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/rtp/RTPReceiver.h b/media/libstagefright/wifi-display/rtp/RTPReceiver.h
new file mode 100644
index 0000000..240ab2e
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtp/RTPReceiver.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RTP_RECEIVER_H_
+
+#define RTP_RECEIVER_H_
+
+#include "RTPBase.h"
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ABuffer;
+struct ANetworkSession;
+
+// An object of this class facilitates receiving of media data on an RTP
+// channel. The channel is established over a UDP or TCP connection depending
+// on which "TransportMode" was chosen. In addition different RTP packetization
+// schemes are supported such as "Transport Stream Packets over RTP",
+// or "AVC/H.264 encapsulation as specified in RFC 3984 (non-interleaved mode)"
+struct RTPReceiver : public RTPBase, public AHandler {
+    enum {
+        kWhatInitDone,
+        kWhatError,
+        kWhatAccessUnit,
+        kWhatPacketLost,
+    };
+
+    enum Flags {
+        FLAG_AUTO_CONNECT = 1,
+    };
+    RTPReceiver(
+            const sp<ANetworkSession> &netSession,
+            const sp<AMessage> &notify,
+            uint32_t flags = 0);
+
+    status_t registerPacketType(
+            uint8_t packetType, PacketizationMode mode);
+
+    status_t initAsync(
+            TransportMode rtpMode,
+            TransportMode rtcpMode,
+            int32_t *outLocalRTPPort);
+
+    status_t connect(
+            const char *remoteHost,
+            int32_t remoteRTPPort,
+            int32_t remoteRTCPPort);
+
+    status_t informSender(const sp<AMessage> &params);
+
+protected:
+    virtual ~RTPReceiver();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatRTPNotify,
+        kWhatRTCPNotify,
+        kWhatSendRR,
+    };
+
+    enum {
+        kSourceID                       = 0xdeadbeef,
+        kPacketLostAfterUs              = 100000,
+        kRequestRetransmissionAfterUs   = -1,
+    };
+
+    struct Assembler;
+    struct H264Assembler;
+    struct Source;
+    struct TSAssembler;
+
+    sp<ANetworkSession> mNetSession;
+    sp<AMessage> mNotify;
+    uint32_t mFlags;
+    TransportMode mRTPMode;
+    TransportMode mRTCPMode;
+    int32_t mRTPSessionID;
+    int32_t mRTCPSessionID;
+    bool mRTPConnected;
+    bool mRTCPConnected;
+
+    int32_t mRTPClientSessionID;  // in TRANSPORT_TCP mode.
+    int32_t mRTCPClientSessionID;  // in TRANSPORT_TCP mode.
+
+    KeyedVector<uint8_t, PacketizationMode> mPacketTypes;
+    KeyedVector<uint32_t, sp<Source> > mSources;
+
+    void onNetNotify(bool isRTP, const sp<AMessage> &msg);
+    status_t onRTPData(const sp<ABuffer> &data);
+    status_t onRTCPData(const sp<ABuffer> &data);
+    void onSendRR();
+
+    void scheduleSendRR();
+    void addSDES(const sp<ABuffer> &buffer);
+
+    void notifyInitDone(status_t err);
+    void notifyError(status_t err);
+    void notifyPacketLost();
+
+    sp<Assembler> makeAssembler(uint8_t packetType);
+
+    void requestRetransmission(uint32_t senderSSRC, int32_t extSeqNo);
+
+    DISALLOW_EVIL_CONSTRUCTORS(RTPReceiver);
+};
+
+}  // namespace android
+
+#endif  // RTP_RECEIVER_H_
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
index 095fd97..6bbe650 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.cpp
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
@@ -767,6 +767,17 @@
 }
 
 status_t RTPSender::parseAPP(const uint8_t *data, size_t size) {
+    if (!memcmp("late", &data[8], 4)) {
+        int64_t avgLatencyUs = (int64_t)U64_AT(&data[12]);
+        int64_t maxLatencyUs = (int64_t)U64_AT(&data[20]);
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatInformSender);
+        notify->setInt64("avgLatencyUs", avgLatencyUs);
+        notify->setInt64("maxLatencyUs", maxLatencyUs);
+        notify->post();
+    }
+
     return OK;
 }
 
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.h b/media/libstagefright/wifi-display/rtp/RTPSender.h
index 7dc138a..fefcab7 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.h
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.h
@@ -37,6 +37,7 @@
         kWhatInitDone,
         kWhatError,
         kWhatNetworkStall,
+        kWhatInformSender,
     };
     RTPSender(
             const sp<ANetworkSession> &netSession,
diff --git a/media/libstagefright/wifi-display/rtptest.cpp b/media/libstagefright/wifi-display/rtptest.cpp
new file mode 100644
index 0000000..764a38b
--- /dev/null
+++ b/media/libstagefright/wifi-display/rtptest.cpp
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2013, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "rtptest"
+#include <utils/Log.h>
+
+#include "ANetworkSession.h"
+#include "rtp/RTPSender.h"
+#include "rtp/RTPReceiver.h"
+#include "TimeSyncer.h"
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <media/stagefright/Utils.h>
+
+#define MEDIA_FILENAME "/sdcard/Frame Counter HD 30FPS_1080p.mp4"
+
+namespace android {
+
+struct PacketSource : public RefBase {
+    PacketSource() {}
+
+    virtual sp<ABuffer> getNextAccessUnit() = 0;
+
+protected:
+    virtual ~PacketSource() {}
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(PacketSource);
+};
+
+struct MediaPacketSource : public PacketSource {
+    MediaPacketSource()
+        : mMaxSampleSize(1024 * 1024) {
+        mExtractor = new NuMediaExtractor;
+        CHECK_EQ((status_t)OK,
+                 mExtractor->setDataSource(MEDIA_FILENAME));
+
+        bool haveVideo = false;
+        for (size_t i = 0; i < mExtractor->countTracks(); ++i) {
+            sp<AMessage> format;
+            CHECK_EQ((status_t)OK, mExtractor->getTrackFormat(i, &format));
+
+            AString mime;
+            CHECK(format->findString("mime", &mime));
+
+            if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str())) {
+                mExtractor->selectTrack(i);
+                haveVideo = true;
+                break;
+            }
+        }
+
+        CHECK(haveVideo);
+    }
+
+    virtual sp<ABuffer> getNextAccessUnit() {
+        int64_t timeUs;
+        status_t err = mExtractor->getSampleTime(&timeUs);
+
+        if (err != OK) {
+            return NULL;
+        }
+
+        sp<ABuffer> accessUnit = new ABuffer(mMaxSampleSize);
+        CHECK_EQ((status_t)OK, mExtractor->readSampleData(accessUnit));
+
+        accessUnit->meta()->setInt64("timeUs", timeUs);
+
+        CHECK_EQ((status_t)OK, mExtractor->advance());
+
+        return accessUnit;
+    }
+
+protected:
+    virtual ~MediaPacketSource() {
+    }
+
+private:
+    sp<NuMediaExtractor> mExtractor;
+    size_t mMaxSampleSize;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaPacketSource);
+};
+
+struct SimplePacketSource : public PacketSource {
+    SimplePacketSource()
+        : mCounter(0) {
+    }
+
+    virtual sp<ABuffer> getNextAccessUnit() {
+        sp<ABuffer> buffer = new ABuffer(4);
+        uint8_t *dst = buffer->data();
+        dst[0] = mCounter >> 24;
+        dst[1] = (mCounter >> 16) & 0xff;
+        dst[2] = (mCounter >> 8) & 0xff;
+        dst[3] = mCounter & 0xff;
+
+        buffer->meta()->setInt64("timeUs", mCounter * 1000000ll / kFrameRate);
+
+        ++mCounter;
+
+        return buffer;
+    }
+
+protected:
+    virtual ~SimplePacketSource() {
+    }
+
+private:
+    enum {
+        kFrameRate = 30
+    };
+
+    uint32_t mCounter;
+
+    DISALLOW_EVIL_CONSTRUCTORS(SimplePacketSource);
+};
+
+struct TestHandler : public AHandler {
+    TestHandler(const sp<ANetworkSession> &netSession);
+
+    void listen();
+    void connect(const char *host, int32_t port);
+
+protected:
+    virtual ~TestHandler();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatListen,
+        kWhatConnect,
+        kWhatReceiverNotify,
+        kWhatSenderNotify,
+        kWhatSendMore,
+        kWhatStop,
+        kWhatTimeSyncerNotify,
+    };
+
+#if 1
+    static const RTPBase::TransportMode kRTPMode = RTPBase::TRANSPORT_UDP;
+    static const RTPBase::TransportMode kRTCPMode = RTPBase::TRANSPORT_UDP;
+#else
+    static const RTPBase::TransportMode kRTPMode = RTPBase::TRANSPORT_TCP;
+    static const RTPBase::TransportMode kRTCPMode = RTPBase::TRANSPORT_NONE;
+#endif
+
+#if 1
+    static const RTPBase::PacketizationMode kPacketizationMode
+        = RTPBase::PACKETIZATION_H264;
+#else
+    static const RTPBase::PacketizationMode kPacketizationMode
+        = RTPBase::PACKETIZATION_NONE;
+#endif
+
+    sp<ANetworkSession> mNetSession;
+    sp<PacketSource> mSource;
+    sp<RTPSender> mSender;
+    sp<RTPReceiver> mReceiver;
+
+    sp<TimeSyncer> mTimeSyncer;
+    bool mTimeSyncerStarted;
+
+    int64_t mFirstTimeRealUs;
+    int64_t mFirstTimeMediaUs;
+
+    int64_t mTimeOffsetUs;
+    bool mTimeOffsetValid;
+
+    status_t readMore();
+
+    DISALLOW_EVIL_CONSTRUCTORS(TestHandler);
+};
+
+TestHandler::TestHandler(const sp<ANetworkSession> &netSession)
+    : mNetSession(netSession),
+      mTimeSyncerStarted(false),
+      mFirstTimeRealUs(-1ll),
+      mFirstTimeMediaUs(-1ll),
+      mTimeOffsetUs(-1ll),
+      mTimeOffsetValid(false) {
+}
+
+TestHandler::~TestHandler() {
+}
+
+void TestHandler::listen() {
+    sp<AMessage> msg = new AMessage(kWhatListen, id());
+    msg->post();
+}
+
+void TestHandler::connect(const char *host, int32_t port) {
+    sp<AMessage> msg = new AMessage(kWhatConnect, id());
+    msg->setString("host", host);
+    msg->setInt32("port", port);
+    msg->post();
+}
+
+static void dumpDelay(int64_t delayMs) {
+    static const int64_t kMinDelayMs = 0;
+    static const int64_t kMaxDelayMs = 300;
+
+    const char *kPattern = "########################################";
+    size_t kPatternSize = strlen(kPattern);
+
+    int n = (kPatternSize * (delayMs - kMinDelayMs))
+                / (kMaxDelayMs - kMinDelayMs);
+
+    if (n < 0) {
+        n = 0;
+    } else if ((size_t)n > kPatternSize) {
+        n = kPatternSize;
+    }
+
+    ALOGI("(%4lld ms) %s\n",
+          delayMs,
+          kPattern + kPatternSize - n);
+}
+
+void TestHandler::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatListen:
+        {
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+
+            notify = new AMessage(kWhatReceiverNotify, id());
+            mReceiver = new RTPReceiver(
+                    mNetSession, notify, RTPReceiver::FLAG_AUTO_CONNECT);
+            looper()->registerHandler(mReceiver);
+
+            CHECK_EQ((status_t)OK,
+                     mReceiver->registerPacketType(33, kPacketizationMode));
+
+            int32_t receiverRTPPort;
+            CHECK_EQ((status_t)OK,
+                     mReceiver->initAsync(
+                         kRTPMode,
+                         kRTCPMode,
+                         &receiverRTPPort));
+
+            printf("picked receiverRTPPort %d\n", receiverRTPPort);
+
+#if 0
+            CHECK_EQ((status_t)OK,
+                     mReceiver->connect(
+                         "127.0.0.1", senderRTPPort, senderRTPPort + 1));
+#endif
+            break;
+        }
+
+        case kWhatConnect:
+        {
+            AString host;
+            CHECK(msg->findString("host", &host));
+
+            sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+            mTimeSyncer = new TimeSyncer(mNetSession, notify);
+            looper()->registerHandler(mTimeSyncer);
+            mTimeSyncer->startServer(8123);
+
+            int32_t receiverRTPPort;
+            CHECK(msg->findInt32("port", &receiverRTPPort));
+
+#if 1
+            mSource = new MediaPacketSource;
+#else
+            mSource = new SimplePacketSource;
+#endif
+
+            notify = new AMessage(kWhatSenderNotify, id());
+            mSender = new RTPSender(mNetSession, notify);
+
+            looper()->registerHandler(mSender);
+
+            int32_t senderRTPPort;
+            CHECK_EQ((status_t)OK,
+                     mSender->initAsync(
+                         host.c_str(),
+                         receiverRTPPort,
+                         kRTPMode,
+                         kRTCPMode == RTPBase::TRANSPORT_NONE
+                            ? -1 : receiverRTPPort + 1,
+                         kRTCPMode,
+                         &senderRTPPort));
+
+            printf("picked senderRTPPort %d\n", senderRTPPort);
+            break;
+        }
+
+        case kWhatSenderNotify:
+        {
+            ALOGI("kWhatSenderNotify");
+
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            switch (what) {
+                case RTPSender::kWhatInitDone:
+                {
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    ALOGI("RTPSender::initAsync completed w/ err %d", err);
+
+                    if (err == OK) {
+                        err = readMore();
+
+                        if (err != OK) {
+                            (new AMessage(kWhatStop, id()))->post();
+                        }
+                    }
+                    break;
+                }
+
+                case RTPSender::kWhatError:
+                    break;
+            }
+            break;
+        }
+
+        case kWhatReceiverNotify:
+        {
+            ALOGV("kWhatReceiverNotify");
+
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            switch (what) {
+                case RTPReceiver::kWhatInitDone:
+                {
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    ALOGI("RTPReceiver::initAsync completed w/ err %d", err);
+                    break;
+                }
+
+                case RTPReceiver::kWhatError:
+                    break;
+
+                case RTPReceiver::kWhatAccessUnit:
+                {
+#if 0
+                    if (!mTimeSyncerStarted) {
+                        mTimeSyncer->startClient("172.18.41.216", 8123);
+                        mTimeSyncerStarted = true;
+                    }
+
+                    sp<ABuffer> accessUnit;
+                    CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+                    int64_t timeUs;
+                    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+                    if (mTimeOffsetValid) {
+                        timeUs -= mTimeOffsetUs;
+                        int64_t nowUs = ALooper::GetNowUs();
+                        int64_t delayMs = (nowUs - timeUs) / 1000ll;
+
+                        dumpDelay(delayMs);
+                    }
+#endif
+                    break;
+                }
+
+                case RTPReceiver::kWhatPacketLost:
+                    ALOGV("kWhatPacketLost");
+                    break;
+
+                default:
+                    TRESPASS();
+            }
+            break;
+        }
+
+        case kWhatSendMore:
+        {
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+            CHECK_EQ((status_t)OK,
+                     mSender->queueBuffer(
+                         accessUnit,
+                         33,
+                         kPacketizationMode));
+
+            status_t err = readMore();
+
+            if (err != OK) {
+                (new AMessage(kWhatStop, id()))->post();
+            }
+            break;
+        }
+
+        case kWhatStop:
+        {
+            if (mReceiver != NULL) {
+                looper()->unregisterHandler(mReceiver->id());
+                mReceiver.clear();
+            }
+
+            if (mSender != NULL) {
+                looper()->unregisterHandler(mSender->id());
+                mSender.clear();
+            }
+
+            mSource.clear();
+
+            looper()->stop();
+            break;
+        }
+
+        case kWhatTimeSyncerNotify:
+        {
+            CHECK(msg->findInt64("offset", &mTimeOffsetUs));
+            mTimeOffsetValid = true;
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+status_t TestHandler::readMore() {
+    sp<ABuffer> accessUnit = mSource->getNextAccessUnit();
+
+    if (accessUnit == NULL) {
+        return ERROR_END_OF_STREAM;
+    }
+
+    int64_t timeUs;
+    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+    int64_t nowUs = ALooper::GetNowUs();
+    int64_t whenUs;
+
+    if (mFirstTimeRealUs < 0ll) {
+        mFirstTimeRealUs = whenUs = nowUs;
+        mFirstTimeMediaUs = timeUs;
+    } else {
+        whenUs = mFirstTimeRealUs + timeUs - mFirstTimeMediaUs;
+    }
+
+    accessUnit->meta()->setInt64("timeUs", whenUs);
+
+    sp<AMessage> msg = new AMessage(kWhatSendMore, id());
+    msg->setBuffer("accessUnit", accessUnit);
+    msg->post(whenUs - nowUs);
+
+    return OK;
+}
+
+}  // namespace android
+
+static void usage(const char *me) {
+    fprintf(stderr,
+            "usage: %s -c host:port\tconnect to remote host\n"
+            "               -l       \tlisten\n",
+            me);
+}
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    // srand(time(NULL));
+
+    ProcessState::self()->startThreadPool();
+
+    DataSource::RegisterDefaultSniffers();
+
+    bool listen = false;
+    int32_t connectToPort = -1;
+    AString connectToHost;
+
+    int res;
+    while ((res = getopt(argc, argv, "hc:l")) >= 0) {
+        switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    usage(argv[0]);
+                    exit(1);
+                }
+
+                connectToHost.setTo(optarg, colonPos - optarg);
+
+                char *end;
+                connectToPort = strtol(colonPos + 1, &end, 10);
+
+                if (*end != '\0' || end == colonPos + 1
+                        || connectToPort < 1 || connectToPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case 'l':
+            {
+                listen = true;
+                break;
+            }
+
+            case '?':
+            case 'h':
+                usage(argv[0]);
+                exit(1);
+        }
+    }
+
+    if (!listen && connectToPort < 0) {
+        fprintf(stderr,
+                "You need to select either client or server mode.\n");
+        exit(1);
+    }
+
+    sp<ANetworkSession> netSession = new ANetworkSession;
+    netSession->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<TestHandler> handler = new TestHandler(netSession);
+    looper->registerHandler(handler);
+
+    if (listen) {
+        handler->listen();
+    }
+
+    if (connectToPort >= 0) {
+        handler->connect(connectToHost.c_str(), connectToPort);
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    return 0;
+}
+
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.cpp b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
new file mode 100644
index 0000000..15f9c88
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.cpp
@@ -0,0 +1,625 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DirectRenderer"
+#include <utils/Log.h>
+
+#include "DirectRenderer.h"
+
+#include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
+#include <media/AudioTrack.h>
+#include <media/ICrypto.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/hexdump.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+/*
+   Drives the decoding process using a MediaCodec instance. Input buffers
+   queued by calls to "queueInputBuffer" are fed to the decoder as soon
+   as the decoder is ready for them, the client is notified about output
+   buffers as the decoder spits them out.
+*/
+struct DirectRenderer::DecoderContext : public AHandler {
+    enum {
+        kWhatOutputBufferReady,
+    };
+    DecoderContext(const sp<AMessage> &notify);
+
+    status_t init(
+            const sp<AMessage> &format,
+            const sp<IGraphicBufferProducer> &surfaceTex);
+
+    void queueInputBuffer(const sp<ABuffer> &accessUnit);
+
+    status_t renderOutputBufferAndRelease(size_t index);
+    status_t releaseOutputBuffer(size_t index);
+
+protected:
+    virtual ~DecoderContext();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatDecoderNotify,
+    };
+
+    sp<AMessage> mNotify;
+    sp<ALooper> mDecoderLooper;
+    sp<MediaCodec> mDecoder;
+    Vector<sp<ABuffer> > mDecoderInputBuffers;
+    Vector<sp<ABuffer> > mDecoderOutputBuffers;
+    List<size_t> mDecoderInputBuffersAvailable;
+    bool mDecoderNotificationPending;
+
+    List<sp<ABuffer> > mAccessUnits;
+
+    void onDecoderNotify();
+    void scheduleDecoderNotification();
+    void queueDecoderInputBuffers();
+
+    void queueOutputBuffer(
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+    DISALLOW_EVIL_CONSTRUCTORS(DecoderContext);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+/*
+   A "push" audio renderer. The primary function of this renderer is to use
+   an AudioTrack in push mode and making sure not to block the event loop
+   be ensuring that calls to AudioTrack::write never block. This is done by
+   estimating an upper bound of data that can be written to the AudioTrack
+   buffer without delay.
+*/
+struct DirectRenderer::AudioRenderer : public AHandler {
+    AudioRenderer(const sp<DecoderContext> &decoderContext);
+
+    void queueInputBuffer(
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+protected:
+    virtual ~AudioRenderer();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatPushAudio,
+    };
+
+    struct BufferInfo {
+        size_t mIndex;
+        int64_t mTimeUs;
+        sp<ABuffer> mBuffer;
+    };
+
+    sp<DecoderContext> mDecoderContext;
+    sp<AudioTrack> mAudioTrack;
+
+    List<BufferInfo> mInputBuffers;
+    bool mPushPending;
+
+    size_t mNumFramesWritten;
+
+    void schedulePushIfNecessary();
+    void onPushAudio();
+
+    ssize_t writeNonBlocking(const uint8_t *data, size_t size);
+
+    DISALLOW_EVIL_CONSTRUCTORS(AudioRenderer);
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DecoderContext::DecoderContext(const sp<AMessage> &notify)
+    : mNotify(notify),
+      mDecoderNotificationPending(false) {
+}
+
+DirectRenderer::DecoderContext::~DecoderContext() {
+    if (mDecoder != NULL) {
+        mDecoder->release();
+        mDecoder.clear();
+
+        mDecoderLooper->stop();
+        mDecoderLooper.clear();
+    }
+}
+
+status_t DirectRenderer::DecoderContext::init(
+        const sp<AMessage> &format,
+        const sp<IGraphicBufferProducer> &surfaceTex) {
+    CHECK(mDecoder == NULL);
+
+    AString mime;
+    CHECK(format->findString("mime", &mime));
+
+    mDecoderLooper = new ALooper;
+    mDecoderLooper->setName("video codec looper");
+
+    mDecoderLooper->start(
+            false /* runOnCallingThread */,
+            false /* canCallJava */,
+            PRIORITY_DEFAULT);
+
+    mDecoder = MediaCodec::CreateByType(
+            mDecoderLooper, mime.c_str(), false /* encoder */);
+
+    CHECK(mDecoder != NULL);
+
+    status_t err = mDecoder->configure(
+            format,
+            surfaceTex == NULL
+                ? NULL : new Surface(surfaceTex),
+            NULL /* crypto */,
+            0 /* flags */);
+    CHECK_EQ(err, (status_t)OK);
+
+    err = mDecoder->start();
+    CHECK_EQ(err, (status_t)OK);
+
+    err = mDecoder->getInputBuffers(
+            &mDecoderInputBuffers);
+    CHECK_EQ(err, (status_t)OK);
+
+    err = mDecoder->getOutputBuffers(
+            &mDecoderOutputBuffers);
+    CHECK_EQ(err, (status_t)OK);
+
+    scheduleDecoderNotification();
+
+    return OK;
+}
+
+void DirectRenderer::DecoderContext::queueInputBuffer(
+        const sp<ABuffer> &accessUnit) {
+    CHECK(mDecoder != NULL);
+
+    mAccessUnits.push_back(accessUnit);
+    queueDecoderInputBuffers();
+}
+
+status_t DirectRenderer::DecoderContext::renderOutputBufferAndRelease(
+        size_t index) {
+    return mDecoder->renderOutputBufferAndRelease(index);
+}
+
+status_t DirectRenderer::DecoderContext::releaseOutputBuffer(size_t index) {
+    return mDecoder->releaseOutputBuffer(index);
+}
+
+void DirectRenderer::DecoderContext::queueDecoderInputBuffers() {
+    if (mDecoder == NULL) {
+        return;
+    }
+
+    bool submittedMore = false;
+
+    while (!mAccessUnits.empty()
+            && !mDecoderInputBuffersAvailable.empty()) {
+        size_t index = *mDecoderInputBuffersAvailable.begin();
+
+        mDecoderInputBuffersAvailable.erase(
+                mDecoderInputBuffersAvailable.begin());
+
+        sp<ABuffer> srcBuffer = *mAccessUnits.begin();
+        mAccessUnits.erase(mAccessUnits.begin());
+
+        const sp<ABuffer> &dstBuffer =
+            mDecoderInputBuffers.itemAt(index);
+
+        memcpy(dstBuffer->data(), srcBuffer->data(), srcBuffer->size());
+
+        int64_t timeUs;
+        CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
+
+        status_t err = mDecoder->queueInputBuffer(
+                index,
+                0 /* offset */,
+                srcBuffer->size(),
+                timeUs,
+                0 /* flags */);
+        CHECK_EQ(err, (status_t)OK);
+
+        submittedMore = true;
+    }
+
+    if (submittedMore) {
+        scheduleDecoderNotification();
+    }
+}
+
+void DirectRenderer::DecoderContext::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDecoderNotify:
+        {
+            onDecoderNotify();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::DecoderContext::onDecoderNotify() {
+    mDecoderNotificationPending = false;
+
+    for (;;) {
+        size_t index;
+        status_t err = mDecoder->dequeueInputBuffer(&index);
+
+        if (err == OK) {
+            mDecoderInputBuffersAvailable.push_back(index);
+        } else if (err == -EAGAIN) {
+            break;
+        } else {
+            TRESPASS();
+        }
+    }
+
+    queueDecoderInputBuffers();
+
+    for (;;) {
+        size_t index;
+        size_t offset;
+        size_t size;
+        int64_t timeUs;
+        uint32_t flags;
+        status_t err = mDecoder->dequeueOutputBuffer(
+                &index,
+                &offset,
+                &size,
+                &timeUs,
+                &flags);
+
+        if (err == OK) {
+            queueOutputBuffer(
+                    index, timeUs, mDecoderOutputBuffers.itemAt(index));
+        } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+            err = mDecoder->getOutputBuffers(
+                    &mDecoderOutputBuffers);
+            CHECK_EQ(err, (status_t)OK);
+        } else if (err == INFO_FORMAT_CHANGED) {
+            // We don't care.
+        } else if (err == -EAGAIN) {
+            break;
+        } else {
+            TRESPASS();
+        }
+    }
+
+    scheduleDecoderNotification();
+}
+
+void DirectRenderer::DecoderContext::scheduleDecoderNotification() {
+    if (mDecoderNotificationPending) {
+        return;
+    }
+
+    sp<AMessage> notify =
+        new AMessage(kWhatDecoderNotify, id());
+
+    mDecoder->requestActivityNotification(notify);
+    mDecoderNotificationPending = true;
+}
+
+void DirectRenderer::DecoderContext::queueOutputBuffer(
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatOutputBufferReady);
+    msg->setSize("index", index);
+    msg->setInt64("timeUs", timeUs);
+    msg->setBuffer("buffer", buffer);
+    msg->post();
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::AudioRenderer::AudioRenderer(
+        const sp<DecoderContext> &decoderContext)
+    : mDecoderContext(decoderContext),
+      mPushPending(false),
+      mNumFramesWritten(0) {
+    mAudioTrack = new AudioTrack(
+            AUDIO_STREAM_DEFAULT,
+            48000.0f,
+            AUDIO_FORMAT_PCM,
+            AUDIO_CHANNEL_OUT_STEREO,
+            (int)0 /* frameCount */);
+
+    CHECK_EQ((status_t)OK, mAudioTrack->initCheck());
+
+    mAudioTrack->start();
+}
+
+DirectRenderer::AudioRenderer::~AudioRenderer() {
+}
+
+void DirectRenderer::AudioRenderer::queueInputBuffer(
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    BufferInfo info;
+    info.mIndex = index;
+    info.mTimeUs = timeUs;
+    info.mBuffer = buffer;
+
+    mInputBuffers.push_back(info);
+    schedulePushIfNecessary();
+}
+
+void DirectRenderer::AudioRenderer::onMessageReceived(
+        const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatPushAudio:
+        {
+            onPushAudio();
+            break;
+        }
+
+        default:
+            break;
+    }
+}
+
+void DirectRenderer::AudioRenderer::schedulePushIfNecessary() {
+    if (mPushPending || mInputBuffers.empty()) {
+        return;
+    }
+
+    mPushPending = true;
+
+    uint32_t numFramesPlayed;
+    CHECK_EQ(mAudioTrack->getPosition(&numFramesPlayed),
+             (status_t)OK);
+
+    uint32_t numFramesPendingPlayout = mNumFramesWritten - numFramesPlayed;
+
+    // This is how long the audio sink will have data to
+    // play back.
+    const float msecsPerFrame = 1000.0f / mAudioTrack->getSampleRate();
+
+    int64_t delayUs =
+        msecsPerFrame * numFramesPendingPlayout * 1000ll;
+
+    // Let's give it more data after about half that time
+    // has elapsed.
+    (new AMessage(kWhatPushAudio, id()))->post(delayUs / 2);
+}
+
+void DirectRenderer::AudioRenderer::onPushAudio() {
+    mPushPending = false;
+
+    while (!mInputBuffers.empty()) {
+        const BufferInfo &info = *mInputBuffers.begin();
+
+        ssize_t n = writeNonBlocking(
+                info.mBuffer->data(), info.mBuffer->size());
+
+        if (n < (ssize_t)info.mBuffer->size()) {
+            CHECK_GE(n, 0);
+
+            info.mBuffer->setRange(
+                    info.mBuffer->offset() + n, info.mBuffer->size() - n);
+            break;
+        }
+
+        mDecoderContext->releaseOutputBuffer(info.mIndex);
+
+        mInputBuffers.erase(mInputBuffers.begin());
+    }
+
+    schedulePushIfNecessary();
+}
+
+ssize_t DirectRenderer::AudioRenderer::writeNonBlocking(
+        const uint8_t *data, size_t size) {
+    uint32_t numFramesPlayed;
+    status_t err = mAudioTrack->getPosition(&numFramesPlayed);
+    if (err != OK) {
+        return err;
+    }
+
+    ssize_t numFramesAvailableToWrite =
+        mAudioTrack->frameCount() - (mNumFramesWritten - numFramesPlayed);
+
+    size_t numBytesAvailableToWrite =
+        numFramesAvailableToWrite * mAudioTrack->frameSize();
+
+    if (size > numBytesAvailableToWrite) {
+        size = numBytesAvailableToWrite;
+    }
+
+    CHECK_EQ(mAudioTrack->write(data, size), (ssize_t)size);
+
+    size_t numFramesWritten = size / mAudioTrack->frameSize();
+    mNumFramesWritten += numFramesWritten;
+
+    return size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+DirectRenderer::DirectRenderer(
+        const sp<IGraphicBufferProducer> &bufferProducer)
+    : mSurfaceTex(bufferProducer),
+      mVideoRenderPending(false),
+      mNumFramesLate(0),
+      mNumFrames(0) {
+}
+
+DirectRenderer::~DirectRenderer() {
+}
+
+void DirectRenderer::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDecoderNotify:
+        {
+            onDecoderNotify(msg);
+            break;
+        }
+
+        case kWhatRenderVideo:
+        {
+            onRenderVideo();
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::setFormat(size_t trackIndex, const sp<AMessage> &format) {
+    CHECK_LT(trackIndex, 2u);
+
+    CHECK(mDecoderContext[trackIndex] == NULL);
+
+    sp<AMessage> notify = new AMessage(kWhatDecoderNotify, id());
+    notify->setSize("trackIndex", trackIndex);
+
+    mDecoderContext[trackIndex] = new DecoderContext(notify);
+    looper()->registerHandler(mDecoderContext[trackIndex]);
+
+    CHECK_EQ((status_t)OK,
+             mDecoderContext[trackIndex]->init(
+                 format, trackIndex == 0 ? mSurfaceTex : NULL));
+
+    if (trackIndex == 1) {
+        // Audio
+        mAudioRenderer = new AudioRenderer(mDecoderContext[1]);
+        looper()->registerHandler(mAudioRenderer);
+    }
+}
+
+void DirectRenderer::queueAccessUnit(
+        size_t trackIndex, const sp<ABuffer> &accessUnit) {
+    CHECK_LT(trackIndex, 2u);
+
+    if (mDecoderContext[trackIndex] == NULL) {
+        CHECK_EQ(trackIndex, 0u);
+
+        sp<AMessage> format = new AMessage;
+        format->setString("mime", "video/avc");
+        format->setInt32("width", 640);
+        format->setInt32("height", 360);
+
+        setFormat(trackIndex, format);
+    }
+
+    mDecoderContext[trackIndex]->queueInputBuffer(accessUnit);
+}
+
+void DirectRenderer::onDecoderNotify(const sp<AMessage> &msg) {
+    size_t trackIndex;
+    CHECK(msg->findSize("trackIndex", &trackIndex));
+
+    int32_t what;
+    CHECK(msg->findInt32("what", &what));
+
+    switch (what) {
+        case DecoderContext::kWhatOutputBufferReady:
+        {
+            size_t index;
+            CHECK(msg->findSize("index", &index));
+
+            int64_t timeUs;
+            CHECK(msg->findInt64("timeUs", &timeUs));
+
+            sp<ABuffer> buffer;
+            CHECK(msg->findBuffer("buffer", &buffer));
+
+            queueOutputBuffer(trackIndex, index, timeUs, buffer);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void DirectRenderer::queueOutputBuffer(
+        size_t trackIndex,
+        size_t index, int64_t timeUs, const sp<ABuffer> &buffer) {
+    if (trackIndex == 1) {
+        // Audio
+        mAudioRenderer->queueInputBuffer(index, timeUs, buffer);
+        return;
+    }
+
+    OutputInfo info;
+    info.mIndex = index;
+    info.mTimeUs = timeUs;
+    info.mBuffer = buffer;
+    mVideoOutputBuffers.push_back(info);
+
+    scheduleVideoRenderIfNecessary();
+}
+
+void DirectRenderer::scheduleVideoRenderIfNecessary() {
+    if (mVideoRenderPending || mVideoOutputBuffers.empty()) {
+        return;
+    }
+
+    mVideoRenderPending = true;
+
+    int64_t timeUs = (*mVideoOutputBuffers.begin()).mTimeUs;
+    int64_t nowUs = ALooper::GetNowUs();
+
+    int64_t delayUs = timeUs - nowUs;
+
+    (new AMessage(kWhatRenderVideo, id()))->post(delayUs);
+}
+
+void DirectRenderer::onRenderVideo() {
+    mVideoRenderPending = false;
+
+    int64_t nowUs = ALooper::GetNowUs();
+
+    while (!mVideoOutputBuffers.empty()) {
+        const OutputInfo &info = *mVideoOutputBuffers.begin();
+
+        if (info.mTimeUs > nowUs) {
+            break;
+        }
+
+        if (info.mTimeUs + 15000ll < nowUs) {
+            ++mNumFramesLate;
+        }
+        ++mNumFrames;
+
+        status_t err =
+            mDecoderContext[0]->renderOutputBufferAndRelease(info.mIndex);
+        CHECK_EQ(err, (status_t)OK);
+
+        mVideoOutputBuffers.erase(mVideoOutputBuffers.begin());
+    }
+
+    scheduleVideoRenderIfNecessary();
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/wifi-display/sink/DirectRenderer.h b/media/libstagefright/wifi-display/sink/DirectRenderer.h
new file mode 100644
index 0000000..c5a4a83
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/DirectRenderer.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DIRECT_RENDERER_H_
+
+#define DIRECT_RENDERER_H_
+
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct ABuffer;
+struct AudioTrack;
+struct IGraphicBufferProducer;
+struct MediaCodec;
+
+// Renders audio and video data queued by calls to "queueAccessUnit".
+struct DirectRenderer : public AHandler {
+    DirectRenderer(const sp<IGraphicBufferProducer> &bufferProducer);
+
+    void setFormat(size_t trackIndex, const sp<AMessage> &format);
+    void queueAccessUnit(size_t trackIndex, const sp<ABuffer> &accessUnit);
+
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+    virtual ~DirectRenderer();
+
+private:
+    struct DecoderContext;
+    struct AudioRenderer;
+
+    enum {
+        kWhatDecoderNotify,
+        kWhatRenderVideo,
+    };
+
+    struct OutputInfo {
+        size_t mIndex;
+        int64_t mTimeUs;
+        sp<ABuffer> mBuffer;
+    };
+
+    sp<IGraphicBufferProducer> mSurfaceTex;
+
+    sp<DecoderContext> mDecoderContext[2];
+    List<OutputInfo> mVideoOutputBuffers;
+
+    bool mVideoRenderPending;
+
+    sp<AudioRenderer> mAudioRenderer;
+
+    int32_t mNumFramesLate;
+    int32_t mNumFrames;
+
+    void onDecoderNotify(const sp<AMessage> &msg);
+
+    void queueOutputBuffer(
+            size_t trackIndex,
+            size_t index, int64_t timeUs, const sp<ABuffer> &buffer);
+
+    void scheduleVideoRenderIfNecessary();
+    void onRenderVideo();
+
+    DISALLOW_EVIL_CONSTRUCTORS(DirectRenderer);
+};
+
+}  // namespace android
+
+#endif  // DIRECT_RENDERER_H_
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
new file mode 100644
index 0000000..5db2099
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.cpp
@@ -0,0 +1,917 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WifiDisplaySink"
+#include <utils/Log.h>
+
+#include "WifiDisplaySink.h"
+
+#include "DirectRenderer.h"
+#include "MediaReceiver.h"
+#include "ParsedMessage.h"
+#include "TimeSyncer.h"
+
+#include <cutils/properties.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+// static
+const AString WifiDisplaySink::sUserAgent = MakeUserAgent();
+
+WifiDisplaySink::WifiDisplaySink(
+        uint32_t flags,
+        const sp<ANetworkSession> &netSession,
+        const sp<IGraphicBufferProducer> &bufferProducer,
+        const sp<AMessage> &notify)
+    : mState(UNDEFINED),
+      mFlags(flags),
+      mNetSession(netSession),
+      mSurfaceTex(bufferProducer),
+      mNotify(notify),
+      mUsingTCPTransport(false),
+      mUsingTCPInterleaving(false),
+      mSessionID(0),
+      mNextCSeq(1),
+      mIDRFrameRequestPending(false),
+      mTimeOffsetUs(0ll),
+      mTimeOffsetValid(false),
+      mSetupDeferred(false),
+      mLatencyCount(0),
+      mLatencySumUs(0ll),
+      mLatencyMaxUs(0ll),
+      mMaxDelayMs(-1ll) {
+    // We support any and all resolutions, but prefer 720p30
+    mSinkSupportedVideoFormats.setNativeResolution(
+            VideoFormats::RESOLUTION_CEA, 5);  // 1280 x 720 p30
+
+    mSinkSupportedVideoFormats.enableAll();
+}
+
+WifiDisplaySink::~WifiDisplaySink() {
+}
+
+void WifiDisplaySink::start(const char *sourceHost, int32_t sourcePort) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    msg->setString("sourceHost", sourceHost);
+    msg->setInt32("sourcePort", sourcePort);
+    msg->post();
+}
+
+void WifiDisplaySink::start(const char *uri) {
+    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    msg->setString("setupURI", uri);
+    msg->post();
+}
+
+// static
+bool WifiDisplaySink::ParseURL(
+        const char *url, AString *host, int32_t *port, AString *path,
+        AString *user, AString *pass) {
+    host->clear();
+    *port = 0;
+    path->clear();
+    user->clear();
+    pass->clear();
+
+    if (strncasecmp("rtsp://", url, 7)) {
+        return false;
+    }
+
+    const char *slashPos = strchr(&url[7], '/');
+
+    if (slashPos == NULL) {
+        host->setTo(&url[7]);
+        path->setTo("/");
+    } else {
+        host->setTo(&url[7], slashPos - &url[7]);
+        path->setTo(slashPos);
+    }
+
+    ssize_t atPos = host->find("@");
+
+    if (atPos >= 0) {
+        // Split of user:pass@ from hostname.
+
+        AString userPass(*host, 0, atPos);
+        host->erase(0, atPos + 1);
+
+        ssize_t colonPos = userPass.find(":");
+
+        if (colonPos < 0) {
+            *user = userPass;
+        } else {
+            user->setTo(userPass, 0, colonPos);
+            pass->setTo(userPass, colonPos + 1, userPass.size() - colonPos - 1);
+        }
+    }
+
+    const char *colonPos = strchr(host->c_str(), ':');
+
+    if (colonPos != NULL) {
+        char *end;
+        unsigned long x = strtoul(colonPos + 1, &end, 10);
+
+        if (end == colonPos + 1 || *end != '\0' || x >= 65536) {
+            return false;
+        }
+
+        *port = x;
+
+        size_t colonOffset = colonPos - host->c_str();
+        size_t trailing = host->size() - colonOffset;
+        host->erase(colonOffset, trailing);
+    } else {
+        *port = 554;
+    }
+
+    return true;
+}
+
+void WifiDisplaySink::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatStart:
+        {
+            sleep(2);  // XXX
+
+            int32_t sourcePort;
+            CHECK(msg->findString("sourceHost", &mRTSPHost));
+            CHECK(msg->findInt32("sourcePort", &sourcePort));
+
+            sp<AMessage> notify = new AMessage(kWhatRTSPNotify, id());
+
+            status_t err = mNetSession->createRTSPClient(
+                    mRTSPHost.c_str(), sourcePort, notify, &mSessionID);
+            CHECK_EQ(err, (status_t)OK);
+
+            mState = CONNECTING;
+            break;
+        }
+
+        case kWhatRTSPNotify:
+        {
+            int32_t reason;
+            CHECK(msg->findInt32("reason", &reason));
+
+            switch (reason) {
+                case ANetworkSession::kWhatError:
+                {
+                    int32_t sessionID;
+                    CHECK(msg->findInt32("sessionID", &sessionID));
+
+                    int32_t err;
+                    CHECK(msg->findInt32("err", &err));
+
+                    AString detail;
+                    CHECK(msg->findString("detail", &detail));
+
+                    ALOGE("An error occurred in session %d (%d, '%s/%s').",
+                          sessionID,
+                          err,
+                          detail.c_str(),
+                          strerror(-err));
+
+                    if (sessionID == mSessionID) {
+                        ALOGI("Lost control connection.");
+
+                        // The control connection is dead now.
+                        mNetSession->destroySession(mSessionID);
+                        mSessionID = 0;
+
+                        if (mNotify == NULL) {
+                            looper()->stop();
+                        } else {
+                            sp<AMessage> notify = mNotify->dup();
+                            notify->setInt32("what", kWhatDisconnected);
+                            notify->post();
+                        }
+                    }
+                    break;
+                }
+
+                case ANetworkSession::kWhatConnected:
+                {
+                    ALOGI("We're now connected.");
+                    mState = CONNECTED;
+
+                    if (mFlags & FLAG_SPECIAL_MODE) {
+                        sp<AMessage> notify = new AMessage(
+                                kWhatTimeSyncerNotify, id());
+
+                        mTimeSyncer = new TimeSyncer(mNetSession, notify);
+                        looper()->registerHandler(mTimeSyncer);
+
+                        mTimeSyncer->startClient(mRTSPHost.c_str(), 8123);
+                    }
+                    break;
+                }
+
+                case ANetworkSession::kWhatData:
+                {
+                    onReceiveClientData(msg);
+                    break;
+                }
+
+                default:
+                    TRESPASS();
+            }
+            break;
+        }
+
+        case kWhatStop:
+        {
+            looper()->stop();
+            break;
+        }
+
+        case kWhatMediaReceiverNotify:
+        {
+            onMediaReceiverNotify(msg);
+            break;
+        }
+
+        case kWhatTimeSyncerNotify:
+        {
+            int32_t what;
+            CHECK(msg->findInt32("what", &what));
+
+            if (what == TimeSyncer::kWhatTimeOffset) {
+                CHECK(msg->findInt64("offset", &mTimeOffsetUs));
+                mTimeOffsetValid = true;
+
+                if (mSetupDeferred) {
+                    CHECK_EQ((status_t)OK,
+                             sendSetup(
+                                mSessionID,
+                                "rtsp://x.x.x.x:x/wfd1.0/streamid=0"));
+
+                    mSetupDeferred = false;
+                }
+            }
+            break;
+        }
+
+        case kWhatReportLateness:
+        {
+            if (mLatencyCount > 0) {
+                int64_t avgLatencyUs = mLatencySumUs / mLatencyCount;
+
+                ALOGV("avg. latency = %lld ms (max %lld ms)",
+                      avgLatencyUs / 1000ll,
+                      mLatencyMaxUs / 1000ll);
+
+                sp<AMessage> params = new AMessage;
+                params->setInt64("avgLatencyUs", avgLatencyUs);
+                params->setInt64("maxLatencyUs", mLatencyMaxUs);
+                mMediaReceiver->informSender(0 /* trackIndex */, params);
+            }
+
+            mLatencyCount = 0;
+            mLatencySumUs = 0ll;
+            mLatencyMaxUs = 0ll;
+
+            msg->post(kReportLatenessEveryUs);
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void WifiDisplaySink::dumpDelay(size_t trackIndex, int64_t timeUs) {
+    int64_t delayMs = (ALooper::GetNowUs() - timeUs) / 1000ll;
+
+    if (delayMs > mMaxDelayMs) {
+        mMaxDelayMs = delayMs;
+    }
+
+    static const int64_t kMinDelayMs = 0;
+    static const int64_t kMaxDelayMs = 300;
+
+    const char *kPattern = "########################################";
+    size_t kPatternSize = strlen(kPattern);
+
+    int n = (kPatternSize * (delayMs - kMinDelayMs))
+                / (kMaxDelayMs - kMinDelayMs);
+
+    if (n < 0) {
+        n = 0;
+    } else if ((size_t)n > kPatternSize) {
+        n = kPatternSize;
+    }
+
+    ALOGI("[%lld]: (%4lld ms / %4lld ms) %s",
+          timeUs / 1000,
+          delayMs,
+          mMaxDelayMs,
+          kPattern + kPatternSize - n);
+}
+
+void WifiDisplaySink::onMediaReceiverNotify(const sp<AMessage> &msg) {
+    int32_t what;
+    CHECK(msg->findInt32("what", &what));
+
+    switch (what) {
+        case MediaReceiver::kWhatInitDone:
+        {
+            status_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            ALOGI("MediaReceiver initialization completed w/ err %d", err);
+            break;
+        }
+
+        case MediaReceiver::kWhatError:
+        {
+            status_t err;
+            CHECK(msg->findInt32("err", &err));
+
+            ALOGE("MediaReceiver signaled error %d", err);
+            break;
+        }
+
+        case MediaReceiver::kWhatAccessUnit:
+        {
+            if (mRenderer == NULL) {
+                mRenderer = new DirectRenderer(mSurfaceTex);
+                looper()->registerHandler(mRenderer);
+            }
+
+            sp<ABuffer> accessUnit;
+            CHECK(msg->findBuffer("accessUnit", &accessUnit));
+
+            int64_t timeUs;
+            CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
+            if (!mTimeOffsetValid && !(mFlags & FLAG_SPECIAL_MODE)) {
+                mTimeOffsetUs = timeUs - ALooper::GetNowUs();
+                mTimeOffsetValid = true;
+            }
+
+            CHECK(mTimeOffsetValid);
+
+            // We are the timesync _client_,
+            // client time = server time - time offset.
+            timeUs -= mTimeOffsetUs;
+
+            size_t trackIndex;
+            CHECK(msg->findSize("trackIndex", &trackIndex));
+
+            int64_t nowUs = ALooper::GetNowUs();
+            int64_t delayUs = nowUs - timeUs;
+
+            mLatencySumUs += delayUs;
+            if (mLatencyCount == 0 || delayUs > mLatencyMaxUs) {
+                mLatencyMaxUs = delayUs;
+            }
+            ++mLatencyCount;
+
+            // dumpDelay(trackIndex, timeUs);
+
+            timeUs += 220000ll;  // Assume 220 ms of latency
+            accessUnit->meta()->setInt64("timeUs", timeUs);
+
+            sp<AMessage> format;
+            if (msg->findMessage("format", &format)) {
+                mRenderer->setFormat(trackIndex, format);
+            }
+
+            mRenderer->queueAccessUnit(trackIndex, accessUnit);
+            break;
+        }
+
+        case MediaReceiver::kWhatPacketLost:
+        {
+#if 0
+            if (!mIDRFrameRequestPending) {
+                ALOGI("requesting IDR frame");
+
+                sendIDRFrameRequest(mSessionID);
+            }
+#endif
+            break;
+        }
+
+        default:
+            TRESPASS();
+    }
+}
+
+void WifiDisplaySink::registerResponseHandler(
+        int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func) {
+    ResponseID id;
+    id.mSessionID = sessionID;
+    id.mCSeq = cseq;
+    mResponseHandlers.add(id, func);
+}
+
+status_t WifiDisplaySink::sendM2(int32_t sessionID) {
+    AString request = "OPTIONS * RTSP/1.0\r\n";
+    AppendCommonResponse(&request, mNextCSeq);
+
+    request.append(
+            "Require: org.wfa.wfd1.0\r\n"
+            "\r\n");
+
+    status_t err =
+        mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID, mNextCSeq, &WifiDisplaySink::onReceiveM2Response);
+
+    ++mNextCSeq;
+
+    return OK;
+}
+
+status_t WifiDisplaySink::onReceiveM2Response(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    int32_t statusCode;
+    if (!msg->getStatusCode(&statusCode)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (statusCode != 200) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    return OK;
+}
+
+status_t WifiDisplaySink::onReceiveSetupResponse(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    int32_t statusCode;
+    if (!msg->getStatusCode(&statusCode)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (statusCode != 200) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    if (!msg->findString("session", &mPlaybackSessionID)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (!ParsedMessage::GetInt32Attribute(
+                mPlaybackSessionID.c_str(),
+                "timeout",
+                &mPlaybackSessionTimeoutSecs)) {
+        mPlaybackSessionTimeoutSecs = -1;
+    }
+
+    ssize_t colonPos = mPlaybackSessionID.find(";");
+    if (colonPos >= 0) {
+        // Strip any options from the returned session id.
+        mPlaybackSessionID.erase(
+                colonPos, mPlaybackSessionID.size() - colonPos);
+    }
+
+    status_t err = configureTransport(msg);
+
+    if (err != OK) {
+        return err;
+    }
+
+    mState = PAUSED;
+
+    return sendPlay(
+            sessionID,
+            "rtsp://x.x.x.x:x/wfd1.0/streamid=0");
+}
+
+status_t WifiDisplaySink::configureTransport(const sp<ParsedMessage> &msg) {
+    if (mUsingTCPTransport && !(mFlags & FLAG_SPECIAL_MODE)) {
+        // In "special" mode we still use a UDP RTCP back-channel that
+        // needs connecting.
+        return OK;
+    }
+
+    AString transport;
+    if (!msg->findString("transport", &transport)) {
+        ALOGE("Missing 'transport' field in SETUP response.");
+        return ERROR_MALFORMED;
+    }
+
+    AString sourceHost;
+    if (!ParsedMessage::GetAttribute(
+                transport.c_str(), "source", &sourceHost)) {
+        sourceHost = mRTSPHost;
+    }
+
+    AString serverPortStr;
+    if (!ParsedMessage::GetAttribute(
+                transport.c_str(), "server_port", &serverPortStr)) {
+        ALOGE("Missing 'server_port' in Transport field.");
+        return ERROR_MALFORMED;
+    }
+
+    int rtpPort, rtcpPort;
+    if (sscanf(serverPortStr.c_str(), "%d-%d", &rtpPort, &rtcpPort) != 2
+            || rtpPort <= 0 || rtpPort > 65535
+            || rtcpPort <=0 || rtcpPort > 65535
+            || rtcpPort != rtpPort + 1) {
+        ALOGE("Invalid server_port description '%s'.",
+                serverPortStr.c_str());
+
+        return ERROR_MALFORMED;
+    }
+
+    if (rtpPort & 1) {
+        ALOGW("Server picked an odd numbered RTP port.");
+    }
+
+    return mMediaReceiver->connectTrack(
+            0 /* trackIndex */, sourceHost.c_str(), rtpPort, rtcpPort);
+}
+
+status_t WifiDisplaySink::onReceivePlayResponse(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    int32_t statusCode;
+    if (!msg->getStatusCode(&statusCode)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (statusCode != 200) {
+        return ERROR_UNSUPPORTED;
+    }
+
+    mState = PLAYING;
+
+    (new AMessage(kWhatReportLateness, id()))->post(kReportLatenessEveryUs);
+
+    return OK;
+}
+
+status_t WifiDisplaySink::onReceiveIDRFrameRequestResponse(
+        int32_t sessionID, const sp<ParsedMessage> &msg) {
+    CHECK(mIDRFrameRequestPending);
+    mIDRFrameRequestPending = false;
+
+    return OK;
+}
+
+void WifiDisplaySink::onReceiveClientData(const sp<AMessage> &msg) {
+    int32_t sessionID;
+    CHECK(msg->findInt32("sessionID", &sessionID));
+
+    sp<RefBase> obj;
+    CHECK(msg->findObject("data", &obj));
+
+    sp<ParsedMessage> data =
+        static_cast<ParsedMessage *>(obj.get());
+
+    ALOGV("session %d received '%s'",
+          sessionID, data->debugString().c_str());
+
+    AString method;
+    AString uri;
+    data->getRequestField(0, &method);
+
+    int32_t cseq;
+    if (!data->findInt32("cseq", &cseq)) {
+        sendErrorResponse(sessionID, "400 Bad Request", -1 /* cseq */);
+        return;
+    }
+
+    if (method.startsWith("RTSP/")) {
+        // This is a response.
+
+        ResponseID id;
+        id.mSessionID = sessionID;
+        id.mCSeq = cseq;
+
+        ssize_t index = mResponseHandlers.indexOfKey(id);
+
+        if (index < 0) {
+            ALOGW("Received unsolicited server response, cseq %d", cseq);
+            return;
+        }
+
+        HandleRTSPResponseFunc func = mResponseHandlers.valueAt(index);
+        mResponseHandlers.removeItemsAt(index);
+
+        status_t err = (this->*func)(sessionID, data);
+        CHECK_EQ(err, (status_t)OK);
+    } else {
+        AString version;
+        data->getRequestField(2, &version);
+        if (!(version == AString("RTSP/1.0"))) {
+            sendErrorResponse(sessionID, "505 RTSP Version not supported", cseq);
+            return;
+        }
+
+        if (method == "OPTIONS") {
+            onOptionsRequest(sessionID, cseq, data);
+        } else if (method == "GET_PARAMETER") {
+            onGetParameterRequest(sessionID, cseq, data);
+        } else if (method == "SET_PARAMETER") {
+            onSetParameterRequest(sessionID, cseq, data);
+        } else {
+            sendErrorResponse(sessionID, "405 Method Not Allowed", cseq);
+        }
+    }
+}
+
+void WifiDisplaySink::onOptionsRequest(
+        int32_t sessionID,
+        int32_t cseq,
+        const sp<ParsedMessage> &data) {
+    AString response = "RTSP/1.0 200 OK\r\n";
+    AppendCommonResponse(&response, cseq);
+    response.append("Public: org.wfa.wfd1.0, GET_PARAMETER, SET_PARAMETER\r\n");
+    response.append("\r\n");
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+
+    err = sendM2(sessionID);
+    CHECK_EQ(err, (status_t)OK);
+}
+
+void WifiDisplaySink::onGetParameterRequest(
+        int32_t sessionID,
+        int32_t cseq,
+        const sp<ParsedMessage> &data) {
+    AString body;
+
+    if (mState == CONNECTED) {
+        mUsingTCPTransport = false;
+        mUsingTCPInterleaving = false;
+
+        char val[PROPERTY_VALUE_MAX];
+        if (property_get("media.wfd-sink.tcp-mode", val, NULL)) {
+            if (!strcasecmp("true", val) || !strcmp("1", val)) {
+                ALOGI("Using TCP unicast transport.");
+                mUsingTCPTransport = true;
+                mUsingTCPInterleaving = false;
+            } else if (!strcasecmp("interleaved", val)) {
+                ALOGI("Using TCP interleaved transport.");
+                mUsingTCPTransport = true;
+                mUsingTCPInterleaving = true;
+            }
+        } else if (mFlags & FLAG_SPECIAL_MODE) {
+            mUsingTCPTransport = true;
+        }
+
+        body = "wfd_video_formats: ";
+        body.append(mSinkSupportedVideoFormats.getFormatSpec());
+
+        body.append(
+                "\r\nwfd_audio_codecs: AAC 0000000F 00\r\n"
+                "wfd_client_rtp_ports: RTP/AVP/");
+
+        if (mUsingTCPTransport) {
+            body.append("TCP;");
+            if (mUsingTCPInterleaving) {
+                body.append("interleaved");
+            } else {
+                body.append("unicast 19000 0");
+            }
+        } else {
+            body.append("UDP;unicast 19000 0");
+        }
+
+        body.append(" mode=play\r\n");
+    }
+
+    AString response = "RTSP/1.0 200 OK\r\n";
+    AppendCommonResponse(&response, cseq);
+    response.append("Content-Type: text/parameters\r\n");
+    response.append(StringPrintf("Content-Length: %d\r\n", body.size()));
+    response.append("\r\n");
+    response.append(body);
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+}
+
+status_t WifiDisplaySink::sendSetup(int32_t sessionID, const char *uri) {
+    sp<AMessage> notify = new AMessage(kWhatMediaReceiverNotify, id());
+
+    mMediaReceiverLooper = new ALooper;
+    mMediaReceiverLooper->setName("media_receiver");
+
+    mMediaReceiverLooper->start(
+            false /* runOnCallingThread */,
+            false /* canCallJava */,
+            PRIORITY_AUDIO);
+
+    mMediaReceiver = new MediaReceiver(mNetSession, notify);
+    mMediaReceiverLooper->registerHandler(mMediaReceiver);
+
+    RTPReceiver::TransportMode rtpMode = RTPReceiver::TRANSPORT_UDP;
+    if (mUsingTCPTransport) {
+        if (mUsingTCPInterleaving) {
+            rtpMode = RTPReceiver::TRANSPORT_TCP_INTERLEAVED;
+        } else {
+            rtpMode = RTPReceiver::TRANSPORT_TCP;
+        }
+    }
+
+    int32_t localRTPPort;
+    status_t err = mMediaReceiver->addTrack(
+            rtpMode, RTPReceiver::TRANSPORT_UDP /* rtcpMode */, &localRTPPort);
+
+    if (err == OK) {
+        err = mMediaReceiver->initAsync(MediaReceiver::MODE_TRANSPORT_STREAM);
+    }
+
+    if (err != OK) {
+        mMediaReceiverLooper->unregisterHandler(mMediaReceiver->id());
+        mMediaReceiver.clear();
+
+        mMediaReceiverLooper->stop();
+        mMediaReceiverLooper.clear();
+
+        return err;
+    }
+
+    AString request = StringPrintf("SETUP %s RTSP/1.0\r\n", uri);
+
+    AppendCommonResponse(&request, mNextCSeq);
+
+    if (rtpMode == RTPReceiver::TRANSPORT_TCP_INTERLEAVED) {
+        request.append("Transport: RTP/AVP/TCP;interleaved=0-1\r\n");
+    } else if (rtpMode == RTPReceiver::TRANSPORT_TCP) {
+        if (mFlags & FLAG_SPECIAL_MODE) {
+            // This isn't quite true, since the RTP connection is through TCP
+            // and the RTCP connection through UDP...
+            request.append(
+                    StringPrintf(
+                        "Transport: RTP/AVP/TCP;unicast;client_port=%d-%d\r\n",
+                        localRTPPort, localRTPPort + 1));
+        } else {
+            request.append(
+                    StringPrintf(
+                        "Transport: RTP/AVP/TCP;unicast;client_port=%d\r\n",
+                        localRTPPort));
+        }
+    } else {
+        request.append(
+                StringPrintf(
+                    "Transport: RTP/AVP/UDP;unicast;client_port=%d-%d\r\n",
+                    localRTPPort,
+                    localRTPPort + 1));
+    }
+
+    request.append("\r\n");
+
+    ALOGV("request = '%s'", request.c_str());
+
+    err = mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID, mNextCSeq, &WifiDisplaySink::onReceiveSetupResponse);
+
+    ++mNextCSeq;
+
+    return OK;
+}
+
+status_t WifiDisplaySink::sendPlay(int32_t sessionID, const char *uri) {
+    AString request = StringPrintf("PLAY %s RTSP/1.0\r\n", uri);
+
+    AppendCommonResponse(&request, mNextCSeq);
+
+    request.append(StringPrintf("Session: %s\r\n", mPlaybackSessionID.c_str()));
+    request.append("\r\n");
+
+    status_t err =
+        mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID, mNextCSeq, &WifiDisplaySink::onReceivePlayResponse);
+
+    ++mNextCSeq;
+
+    return OK;
+}
+
+status_t WifiDisplaySink::sendIDRFrameRequest(int32_t sessionID) {
+    CHECK(!mIDRFrameRequestPending);
+
+    AString request = "SET_PARAMETER rtsp://localhost/wfd1.0 RTSP/1.0\r\n";
+
+    AppendCommonResponse(&request, mNextCSeq);
+
+    AString content = "wfd_idr_request\r\n";
+
+    request.append(StringPrintf("Session: %s\r\n", mPlaybackSessionID.c_str()));
+    request.append(StringPrintf("Content-Length: %d\r\n", content.size()));
+    request.append("\r\n");
+    request.append(content);
+
+    status_t err =
+        mNetSession->sendRequest(sessionID, request.c_str(), request.size());
+
+    if (err != OK) {
+        return err;
+    }
+
+    registerResponseHandler(
+            sessionID,
+            mNextCSeq,
+            &WifiDisplaySink::onReceiveIDRFrameRequestResponse);
+
+    ++mNextCSeq;
+
+    mIDRFrameRequestPending = true;
+
+    return OK;
+}
+
+void WifiDisplaySink::onSetParameterRequest(
+        int32_t sessionID,
+        int32_t cseq,
+        const sp<ParsedMessage> &data) {
+    const char *content = data->getContent();
+
+    if (strstr(content, "wfd_trigger_method: SETUP\r\n") != NULL) {
+        if ((mFlags & FLAG_SPECIAL_MODE) && !mTimeOffsetValid) {
+            mSetupDeferred = true;
+        } else {
+            status_t err =
+                sendSetup(
+                        sessionID,
+                        "rtsp://x.x.x.x:x/wfd1.0/streamid=0");
+
+            CHECK_EQ(err, (status_t)OK);
+        }
+    }
+
+    AString response = "RTSP/1.0 200 OK\r\n";
+    AppendCommonResponse(&response, cseq);
+    response.append("\r\n");
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+}
+
+void WifiDisplaySink::sendErrorResponse(
+        int32_t sessionID,
+        const char *errorDetail,
+        int32_t cseq) {
+    AString response;
+    response.append("RTSP/1.0 ");
+    response.append(errorDetail);
+    response.append("\r\n");
+
+    AppendCommonResponse(&response, cseq);
+
+    response.append("\r\n");
+
+    status_t err = mNetSession->sendRequest(sessionID, response.c_str());
+    CHECK_EQ(err, (status_t)OK);
+}
+
+// static
+void WifiDisplaySink::AppendCommonResponse(AString *response, int32_t cseq) {
+    time_t now = time(NULL);
+    struct tm *now2 = gmtime(&now);
+    char buf[128];
+    strftime(buf, sizeof(buf), "%a, %d %b %Y %H:%M:%S %z", now2);
+
+    response->append("Date: ");
+    response->append(buf);
+    response->append("\r\n");
+
+    response->append(StringPrintf("User-Agent: %s\r\n", sUserAgent.c_str()));
+
+    if (cseq >= 0) {
+        response->append(StringPrintf("CSeq: %d\r\n", cseq));
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/wifi-display/sink/WifiDisplaySink.h b/media/libstagefright/wifi-display/sink/WifiDisplaySink.h
new file mode 100644
index 0000000..adb9d89
--- /dev/null
+++ b/media/libstagefright/wifi-display/sink/WifiDisplaySink.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WIFI_DISPLAY_SINK_H_
+
+#define WIFI_DISPLAY_SINK_H_
+
+#include "ANetworkSession.h"
+
+#include "VideoFormats.h"
+
+#include <gui/Surface.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+namespace android {
+
+struct AMessage;
+struct DirectRenderer;
+struct MediaReceiver;
+struct ParsedMessage;
+struct TimeSyncer;
+
+// Represents the RTSP client acting as a wifi display sink.
+// Connects to a wifi display source and renders the incoming
+// transport stream using a MediaPlayer instance.
+struct WifiDisplaySink : public AHandler {
+    enum {
+        kWhatDisconnected,
+    };
+
+    enum Flags {
+        FLAG_SPECIAL_MODE = 1,
+    };
+
+    // If no notification message is specified (notify == NULL)
+    // the sink will stop its looper() once the session ends,
+    // otherwise it will post an appropriate notification but leave
+    // the looper() running.
+    WifiDisplaySink(
+            uint32_t flags,
+            const sp<ANetworkSession> &netSession,
+            const sp<IGraphicBufferProducer> &bufferProducer = NULL,
+            const sp<AMessage> &notify = NULL);
+
+    void start(const char *sourceHost, int32_t sourcePort);
+    void start(const char *uri);
+
+protected:
+    virtual ~WifiDisplaySink();
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum State {
+        UNDEFINED,
+        CONNECTING,
+        CONNECTED,
+        PAUSED,
+        PLAYING,
+    };
+
+    enum {
+        kWhatStart,
+        kWhatRTSPNotify,
+        kWhatStop,
+        kWhatMediaReceiverNotify,
+        kWhatTimeSyncerNotify,
+        kWhatReportLateness,
+    };
+
+    struct ResponseID {
+        int32_t mSessionID;
+        int32_t mCSeq;
+
+        bool operator<(const ResponseID &other) const {
+            return mSessionID < other.mSessionID
+                || (mSessionID == other.mSessionID
+                        && mCSeq < other.mCSeq);
+        }
+    };
+
+    typedef status_t (WifiDisplaySink::*HandleRTSPResponseFunc)(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    static const int64_t kReportLatenessEveryUs = 1000000ll;
+
+    static const AString sUserAgent;
+
+    State mState;
+    uint32_t mFlags;
+    VideoFormats mSinkSupportedVideoFormats;
+    sp<ANetworkSession> mNetSession;
+    sp<IGraphicBufferProducer> mSurfaceTex;
+    sp<AMessage> mNotify;
+    sp<TimeSyncer> mTimeSyncer;
+    bool mUsingTCPTransport;
+    bool mUsingTCPInterleaving;
+    AString mRTSPHost;
+    int32_t mSessionID;
+
+    int32_t mNextCSeq;
+
+    KeyedVector<ResponseID, HandleRTSPResponseFunc> mResponseHandlers;
+
+    sp<ALooper> mMediaReceiverLooper;
+    sp<MediaReceiver> mMediaReceiver;
+    sp<DirectRenderer> mRenderer;
+
+    AString mPlaybackSessionID;
+    int32_t mPlaybackSessionTimeoutSecs;
+
+    bool mIDRFrameRequestPending;
+
+    int64_t mTimeOffsetUs;
+    bool mTimeOffsetValid;
+
+    bool mSetupDeferred;
+
+    size_t mLatencyCount;
+    int64_t mLatencySumUs;
+    int64_t mLatencyMaxUs;
+
+    int64_t mMaxDelayMs;
+
+    status_t sendM2(int32_t sessionID);
+    status_t sendSetup(int32_t sessionID, const char *uri);
+    status_t sendPlay(int32_t sessionID, const char *uri);
+    status_t sendIDRFrameRequest(int32_t sessionID);
+
+    status_t onReceiveM2Response(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    status_t onReceiveSetupResponse(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    status_t configureTransport(const sp<ParsedMessage> &msg);
+
+    status_t onReceivePlayResponse(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    status_t onReceiveIDRFrameRequestResponse(
+            int32_t sessionID, const sp<ParsedMessage> &msg);
+
+    void registerResponseHandler(
+            int32_t sessionID, int32_t cseq, HandleRTSPResponseFunc func);
+
+    void onReceiveClientData(const sp<AMessage> &msg);
+
+    void onOptionsRequest(
+            int32_t sessionID,
+            int32_t cseq,
+            const sp<ParsedMessage> &data);
+
+    void onGetParameterRequest(
+            int32_t sessionID,
+            int32_t cseq,
+            const sp<ParsedMessage> &data);
+
+    void onSetParameterRequest(
+            int32_t sessionID,
+            int32_t cseq,
+            const sp<ParsedMessage> &data);
+
+    void onMediaReceiverNotify(const sp<AMessage> &msg);
+
+    void sendErrorResponse(
+            int32_t sessionID,
+            const char *errorDetail,
+            int32_t cseq);
+
+    static void AppendCommonResponse(AString *response, int32_t cseq);
+
+    bool ParseURL(
+            const char *url, AString *host, int32_t *port, AString *path,
+            AString *user, AString *pass);
+
+    void dumpDelay(size_t trackIndex, int64_t timeUs);
+
+    DISALLOW_EVIL_CONSTRUCTORS(WifiDisplaySink);
+};
+
+}  // namespace android
+
+#endif  // WIFI_DISPLAY_SINK_H_
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 5344623..0214520 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -40,14 +40,13 @@
 Converter::Converter(
         const sp<AMessage> &notify,
         const sp<ALooper> &codecLooper,
-        const sp<AMessage> &format,
-        bool usePCMAudio)
+        const sp<AMessage> &outputFormat)
     : mInitCheck(NO_INIT),
       mNotify(notify),
       mCodecLooper(codecLooper),
-      mInputFormat(format),
+      mOutputFormat(outputFormat),
       mIsVideo(false),
-      mIsPCMAudio(usePCMAudio),
+      mIsPCMAudio(false),
       mNeedToManuallyPrependSPSPPS(false),
       mDoMoreWorkPending(false)
 #if ENABLE_SILENCE_DETECTION
@@ -58,14 +57,14 @@
       ,mNumFramesToDrop(0)
     {
     AString mime;
-    CHECK(mInputFormat->findString("mime", &mime));
+    CHECK(mOutputFormat->findString("mime", &mime));
 
     if (!strncasecmp("video/", mime.c_str(), 6)) {
         mIsVideo = true;
+    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime.c_str())) {
+        mIsPCMAudio = true;
     }
 
-    CHECK(!usePCMAudio || !mIsVideo);
-
     mInitCheck = initEncoder();
 
     if (mInitCheck != OK) {
@@ -152,23 +151,10 @@
 }
 
 status_t Converter::initEncoder() {
-    AString inputMIME;
-    CHECK(mInputFormat->findString("mime", &inputMIME));
-
     AString outputMIME;
-    bool isAudio = false;
-    if (!strcasecmp(inputMIME.c_str(), MEDIA_MIMETYPE_AUDIO_RAW)) {
-        if (mIsPCMAudio) {
-            outputMIME = MEDIA_MIMETYPE_AUDIO_RAW;
-        } else {
-            outputMIME = MEDIA_MIMETYPE_AUDIO_AAC;
-        }
-        isAudio = true;
-    } else if (!strcasecmp(inputMIME.c_str(), MEDIA_MIMETYPE_VIDEO_RAW)) {
-        outputMIME = MEDIA_MIMETYPE_VIDEO_AVC;
-    } else {
-        TRESPASS();
-    }
+    CHECK(mOutputFormat->findString("mime", &outputMIME));
+
+    bool isAudio = !strncasecmp(outputMIME.c_str(), "audio/", 6);
 
     if (!mIsPCMAudio) {
         mEncoder = MediaCodec::CreateByType(
@@ -179,14 +165,10 @@
         }
     }
 
-    mOutputFormat = mInputFormat->dup();
-
     if (mIsPCMAudio) {
         return OK;
     }
 
-    mOutputFormat->setString("mime", outputMIME.c_str());
-
     int32_t audioBitrate = GetInt32Property("media.wfd.audio-bitrate", 128000);
     int32_t videoBitrate = GetInt32Property("media.wfd.video-bitrate", 5000000);
     mPrevVideoBitrate = videoBitrate;
@@ -427,7 +409,7 @@
             releaseEncoder();
 
             AString mime;
-            CHECK(mInputFormat->findString("mime", &mime));
+            CHECK(mOutputFormat->findString("mime", &mime));
             ALOGI("encoder (%s) shut down.", mime.c_str());
             break;
         }
@@ -438,6 +420,17 @@
             break;
         }
 
+        case kWhatReleaseOutputBuffer:
+        {
+            if (mEncoder != NULL) {
+                size_t bufferIndex;
+                CHECK(msg->findInt32("bufferIndex", (int32_t*)&bufferIndex));
+                CHECK(bufferIndex < mEncoderOutputBuffers.size());
+                mEncoder->releaseOutputBuffer(bufferIndex);
+            }
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -645,6 +638,7 @@
         size_t size;
         int64_t timeUs;
         uint32_t flags;
+        native_handle_t* handle = NULL;
         err = mEncoder->dequeueOutputBuffer(
                 &bufferIndex, &offset, &size, &timeUs, &flags);
 
@@ -667,18 +661,52 @@
             notify->setInt32("what", kWhatEOS);
             notify->post();
         } else {
-            sp<ABuffer> buffer = new ABuffer(size);
+#if 0
+            if (mIsVideo) {
+                int32_t videoBitrate = GetInt32Property(
+                        "media.wfd.video-bitrate", 5000000);
+
+                setVideoBitrate(videoBitrate);
+            }
+#endif
+
+            sp<ABuffer> buffer;
+            sp<ABuffer> outbuf = mEncoderOutputBuffers.itemAt(bufferIndex);
+
+            if (outbuf->meta()->findPointer("handle", (void**)&handle) &&
+                    handle != NULL) {
+                int32_t rangeLength, rangeOffset;
+                CHECK(outbuf->meta()->findInt32("rangeOffset", &rangeOffset));
+                CHECK(outbuf->meta()->findInt32("rangeLength", &rangeLength));
+                outbuf->meta()->setPointer("handle", NULL);
+
+                // MediaSender will post the following message when HDCP
+                // is done, to release the output buffer back to encoder.
+                sp<AMessage> notify(new AMessage(
+                        kWhatReleaseOutputBuffer, id()));
+                notify->setInt32("bufferIndex", bufferIndex);
+
+                buffer = new ABuffer(
+                        rangeLength > (int32_t)size ? rangeLength : size);
+                buffer->meta()->setPointer("handle", handle);
+                buffer->meta()->setInt32("rangeOffset", rangeOffset);
+                buffer->meta()->setInt32("rangeLength", rangeLength);
+                buffer->meta()->setMessage("notify", notify);
+            } else {
+                buffer = new ABuffer(size);
+            }
+
             buffer->meta()->setInt64("timeUs", timeUs);
 
             ALOGV("[%s] time %lld us (%.2f secs)",
                   mIsVideo ? "video" : "audio", timeUs, timeUs / 1E6);
 
-            memcpy(buffer->data(),
-                   mEncoderOutputBuffers.itemAt(bufferIndex)->base() + offset,
-                   size);
+            memcpy(buffer->data(), outbuf->base() + offset, size);
 
             if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
-                mOutputFormat->setBuffer("csd-0", buffer);
+                if (!handle) {
+                    mOutputFormat->setBuffer("csd-0", buffer);
+                }
             } else {
                 sp<AMessage> notify = mNotify->dup();
                 notify->setInt32("what", kWhatAccessUnit);
@@ -687,7 +715,9 @@
             }
         }
 
-        mEncoder->releaseOutputBuffer(bufferIndex);
+        if (!handle) {
+            mEncoder->releaseOutputBuffer(bufferIndex);
+        }
 
         if (flags & MediaCodec::BUFFER_FLAG_EOS) {
             break;
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index ba297c4..76c8b19 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -33,11 +33,9 @@
 // media access unit of a different format.
 // Right now this'll convert raw video into H.264 and raw audio into AAC.
 struct Converter : public AHandler {
-    Converter(
-            const sp<AMessage> &notify,
-            const sp<ALooper> &codecLooper,
-            const sp<AMessage> &format,
-            bool usePCMAudio);
+    Converter(const sp<AMessage> &notify,
+              const sp<ALooper> &codecLooper,
+              const sp<AMessage> &outputFormat);
 
     status_t initCheck() const;
 
@@ -66,6 +64,7 @@
         kWhatMediaPullerNotify,
         kWhatEncoderActivity,
         kWhatDropAFrame,
+        kWhatReleaseOutputBuffer,
     };
 
     void shutdownAsync();
@@ -83,10 +82,9 @@
     status_t mInitCheck;
     sp<AMessage> mNotify;
     sp<ALooper> mCodecLooper;
-    sp<AMessage> mInputFormat;
+    sp<AMessage> mOutputFormat;
     bool mIsVideo;
     bool mIsPCMAudio;
-    sp<AMessage> mOutputFormat;
     bool mNeedToManuallyPrependSPSPPS;
 
     sp<MediaCodec> mEncoder;
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 3d7b865..a15fbac 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -378,7 +378,9 @@
         bool usePCMAudio,
         bool enableVideo,
         VideoFormats::ResolutionType videoResolutionType,
-        size_t videoResolutionIndex) {
+        size_t videoResolutionIndex,
+        VideoFormats::ProfileType videoProfileType,
+        VideoFormats::LevelType videoLevelType) {
     sp<AMessage> notify = new AMessage(kWhatMediaSenderNotify, id());
     mMediaSender = new MediaSender(mNetSession, notify);
     looper()->registerHandler(mMediaSender);
@@ -390,7 +392,9 @@
             usePCMAudio,
             enableVideo,
             videoResolutionType,
-            videoResolutionIndex);
+            videoResolutionIndex,
+            videoProfileType,
+            videoLevelType);
 
     if (err == OK) {
         err = mMediaSender->initAsync(
@@ -559,6 +563,8 @@
                         converter->dropAFrame();
                     }
                 }
+            } else if (what == MediaSender::kWhatInformSender) {
+                onSinkFeedback(msg);
             } else {
                 TRESPASS();
             }
@@ -654,6 +660,89 @@
     }
 }
 
+void WifiDisplaySource::PlaybackSession::onSinkFeedback(const sp<AMessage> &msg) {
+    int64_t avgLatencyUs;
+    CHECK(msg->findInt64("avgLatencyUs", &avgLatencyUs));
+
+    int64_t maxLatencyUs;
+    CHECK(msg->findInt64("maxLatencyUs", &maxLatencyUs));
+
+    ALOGI("sink reports avg. latency of %lld ms (max %lld ms)",
+          avgLatencyUs / 1000ll,
+          maxLatencyUs / 1000ll);
+
+    if (mVideoTrackIndex >= 0) {
+        const sp<Track> &videoTrack = mTracks.valueFor(mVideoTrackIndex);
+        sp<Converter> converter = videoTrack->converter();
+
+        if (converter != NULL) {
+            int32_t videoBitrate =
+                Converter::GetInt32Property("media.wfd.video-bitrate", -1);
+
+            char val[PROPERTY_VALUE_MAX];
+            if (videoBitrate < 0
+                    && property_get("media.wfd.video-bitrate", val, NULL)
+                    && !strcasecmp("adaptive", val)) {
+                videoBitrate = converter->getVideoBitrate();
+
+                if (avgLatencyUs > 300000ll) {
+                    videoBitrate *= 0.6;
+                } else if (avgLatencyUs < 100000ll) {
+                    videoBitrate *= 1.1;
+                }
+            }
+
+            if (videoBitrate > 0) {
+                if (videoBitrate < 500000) {
+                    videoBitrate = 500000;
+                } else if (videoBitrate > 10000000) {
+                    videoBitrate = 10000000;
+                }
+
+                if (videoBitrate != converter->getVideoBitrate()) {
+                    ALOGI("setting video bitrate to %d bps", videoBitrate);
+
+                    converter->setVideoBitrate(videoBitrate);
+                }
+            }
+        }
+
+        sp<RepeaterSource> repeaterSource = videoTrack->repeaterSource();
+        if (repeaterSource != NULL) {
+            double rateHz =
+                Converter::GetInt32Property(
+                        "media.wfd.video-framerate", -1);
+
+            char val[PROPERTY_VALUE_MAX];
+            if (rateHz < 0.0
+                    && property_get("media.wfd.video-framerate", val, NULL)
+                    && !strcasecmp("adaptive", val)) {
+                 rateHz = repeaterSource->getFrameRate();
+
+                if (avgLatencyUs > 300000ll) {
+                    rateHz *= 0.9;
+                } else if (avgLatencyUs < 200000ll) {
+                    rateHz *= 1.1;
+                }
+            }
+
+            if (rateHz > 0) {
+                if (rateHz < 5.0) {
+                    rateHz = 5.0;
+                } else if (rateHz > 30.0) {
+                    rateHz = 30.0;
+                }
+
+                if (rateHz != repeaterSource->getFrameRate()) {
+                    ALOGI("setting frame rate to %.2f Hz", rateHz);
+
+                    repeaterSource->setFrameRate(rateHz);
+                }
+            }
+        }
+    }
+}
+
 status_t WifiDisplaySource::PlaybackSession::setupMediaPacketizer(
         bool enableAudio, bool enableVideo) {
     DataSource::RegisterDefaultSniffers();
@@ -785,7 +874,9 @@
         bool usePCMAudio,
         bool enableVideo,
         VideoFormats::ResolutionType videoResolutionType,
-        size_t videoResolutionIndex) {
+        size_t videoResolutionIndex,
+        VideoFormats::ProfileType videoProfileType,
+        VideoFormats::LevelType videoLevelType) {
     CHECK(enableAudio || enableVideo);
 
     if (!mMediaPath.empty()) {
@@ -794,7 +885,8 @@
 
     if (enableVideo) {
         status_t err = addVideoSource(
-                videoResolutionType, videoResolutionIndex);
+                videoResolutionType, videoResolutionIndex, videoProfileType,
+                videoLevelType);
 
         if (err != OK) {
             return err;
@@ -810,9 +902,13 @@
 
 status_t WifiDisplaySource::PlaybackSession::addSource(
         bool isVideo, const sp<MediaSource> &source, bool isRepeaterSource,
-        bool usePCMAudio, size_t *numInputBuffers) {
+        bool usePCMAudio, unsigned profileIdc, unsigned levelIdc,
+        unsigned constraintSet, size_t *numInputBuffers) {
     CHECK(!usePCMAudio || !isVideo);
     CHECK(!isRepeaterSource || isVideo);
+    CHECK(!profileIdc || isVideo);
+    CHECK(!levelIdc || isVideo);
+    CHECK(!constraintSet || isVideo);
 
     sp<ALooper> pullLooper = new ALooper;
     pullLooper->setName("pull_looper");
@@ -841,17 +937,25 @@
     CHECK_EQ(err, (status_t)OK);
 
     if (isVideo) {
+        format->setString("mime", MEDIA_MIMETYPE_VIDEO_AVC);
         format->setInt32("store-metadata-in-buffers", true);
-
+        format->setInt32("store-metadata-in-buffers-output", (mHDCP != NULL));
         format->setInt32(
                 "color-format", OMX_COLOR_FormatAndroidOpaque);
+        format->setInt32("profile-idc", profileIdc);
+        format->setInt32("level-idc", levelIdc);
+        format->setInt32("constraint-set", constraintSet);
+    } else {
+        format->setString(
+                "mime",
+                usePCMAudio
+                    ? MEDIA_MIMETYPE_AUDIO_RAW : MEDIA_MIMETYPE_AUDIO_AAC);
     }
 
     notify = new AMessage(kWhatConverterNotify, id());
     notify->setSize("trackIndex", trackIndex);
 
-    sp<Converter> converter =
-        new Converter(notify, codecLooper, format, usePCMAudio);
+    sp<Converter> converter = new Converter(notify, codecLooper, format);
 
     err = converter->initCheck();
     if (err != OK) {
@@ -905,7 +1009,9 @@
 
 status_t WifiDisplaySource::PlaybackSession::addVideoSource(
         VideoFormats::ResolutionType videoResolutionType,
-        size_t videoResolutionIndex) {
+        size_t videoResolutionIndex,
+        VideoFormats::ProfileType videoProfileType,
+        VideoFormats::LevelType videoLevelType) {
     size_t width, height, framesPerSecond;
     bool interlaced;
     CHECK(VideoFormats::GetConfiguration(
@@ -916,6 +1022,14 @@
                 &framesPerSecond,
                 &interlaced));
 
+    unsigned profileIdc, levelIdc, constraintSet;
+    CHECK(VideoFormats::GetProfileLevel(
+                videoProfileType,
+                videoLevelType,
+                &profileIdc,
+                &levelIdc,
+                &constraintSet));
+
     sp<SurfaceMediaSource> source = new SurfaceMediaSource(width, height);
 
     source->setUseAbsoluteTimestamps();
@@ -926,7 +1040,8 @@
     size_t numInputBuffers;
     status_t err = addSource(
             true /* isVideo */, videoSource, true /* isRepeaterSource */,
-            false /* usePCMAudio */, &numInputBuffers);
+            false /* usePCMAudio */, profileIdc, levelIdc, constraintSet,
+            &numInputBuffers);
 
     if (err != OK) {
         return err;
@@ -949,7 +1064,8 @@
     if (audioSource->initCheck() == OK) {
         return addSource(
                 false /* isVideo */, audioSource, false /* isRepeaterSource */,
-                usePCMAudio, NULL /* numInputBuffers */);
+                usePCMAudio, 0 /* profileIdc */, 0 /* levelIdc */,
+                0 /* constraintSet */, NULL /* numInputBuffers */);
     }
 
     ALOGW("Unable to instantiate audio source");
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 39086a1..5c8ee94 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -53,7 +53,9 @@
             bool usePCMAudio,
             bool enableVideo,
             VideoFormats::ResolutionType videoResolutionType,
-            size_t videoResolutionIndex);
+            size_t videoResolutionIndex,
+            VideoFormats::ProfileType videoProfileType,
+            VideoFormats::LevelType videoLevelType);
 
     void destroyAsync();
 
@@ -130,18 +132,25 @@
             bool usePCMAudio,
             bool enableVideo,
             VideoFormats::ResolutionType videoResolutionType,
-            size_t videoResolutionIndex);
+            size_t videoResolutionIndex,
+            VideoFormats::ProfileType videoProfileType,
+            VideoFormats::LevelType videoLevelType);
 
     status_t addSource(
             bool isVideo,
             const sp<MediaSource> &source,
             bool isRepeaterSource,
             bool usePCMAudio,
+            unsigned profileIdc,
+            unsigned levelIdc,
+            unsigned contraintSet,
             size_t *numInputBuffers);
 
     status_t addVideoSource(
             VideoFormats::ResolutionType videoResolutionType,
-            size_t videoResolutionIndex);
+            size_t videoResolutionIndex,
+            VideoFormats::ProfileType videoProfileType,
+            VideoFormats::LevelType videoLevelType);
 
     status_t addAudioSource(bool usePCMAudio);
 
diff --git a/media/libstagefright/wifi-display/source/TSPacketizer.cpp b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
index 2c4a373..c674700 100644
--- a/media/libstagefright/wifi-display/source/TSPacketizer.cpp
+++ b/media/libstagefright/wifi-display/source/TSPacketizer.cpp
@@ -261,12 +261,24 @@
             data[0] = 40;  // descriptor_tag
             data[1] = 4;  // descriptor_length
 
-            CHECK_GE(mCSD.size(), 1u);
-            const sp<ABuffer> &sps = mCSD.itemAt(0);
-            CHECK(!memcmp("\x00\x00\x00\x01", sps->data(), 4));
-            CHECK_GE(sps->size(), 7u);
-            // profile_idc, constraint_set*, level_idc
-            memcpy(&data[2], sps->data() + 4, 3);
+            if (mCSD.size() > 0) {
+                CHECK_GE(mCSD.size(), 1u);
+                const sp<ABuffer> &sps = mCSD.itemAt(0);
+                CHECK(!memcmp("\x00\x00\x00\x01", sps->data(), 4));
+                CHECK_GE(sps->size(), 7u);
+                // profile_idc, constraint_set*, level_idc
+                memcpy(&data[2], sps->data() + 4, 3);
+            } else {
+                int32_t profileIdc, levelIdc, constraintSet;
+                CHECK(mFormat->findInt32("profile-idc", &profileIdc));
+                CHECK(mFormat->findInt32("level-idc", &levelIdc));
+                CHECK(mFormat->findInt32("constraint-set", &constraintSet));
+                CHECK_GE(profileIdc, 0u);
+                CHECK_GE(levelIdc, 0u);
+                data[2] = profileIdc;    // profile_idc
+                data[3] = constraintSet; // constraint_set*
+                data[4] = levelIdc;      // level_idc
+            }
 
             // AVC_still_present=0, AVC_24_hour_picture_flag=0, reserved
             data[5] = 0x3f;
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 22dd0b1..b421b35 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -23,6 +23,7 @@
 #include "Parameters.h"
 #include "ParsedMessage.h"
 #include "rtp/RTPSender.h"
+#include "TimeSyncer.h"
 
 #include <binder/IServiceManager.h>
 #include <gui/IGraphicBufferProducer.h>
@@ -73,6 +74,12 @@
 
     mSupportedSourceVideoFormats.setNativeResolution(
             VideoFormats::RESOLUTION_CEA, 5);  // 1280x720 p30
+
+    // Enable all resolutions up to 1280x720p30
+    mSupportedSourceVideoFormats.enableResolutionUpto(
+            VideoFormats::RESOLUTION_CEA, 5,
+            VideoFormats::PROFILE_CHP,  // Constrained High Profile
+            VideoFormats::LEVEL_32);    // Level 3.2
 }
 
 WifiDisplaySource::~WifiDisplaySource() {
@@ -164,6 +171,14 @@
                 } else {
                     err = -EINVAL;
                 }
+            }
+
+            if (err == OK) {
+                sp<AMessage> notify = new AMessage(kWhatTimeSyncerNotify, id());
+                mTimeSyncer = new TimeSyncer(mNetSession, notify);
+                looper()->registerHandler(mTimeSyncer);
+
+                mTimeSyncer->startServer(8123);
 
                 mState = AWAITING_CLIENT_CONNECTION;
             }
@@ -539,6 +554,11 @@
             break;
         }
 
+        case kWhatTimeSyncerNotify:
+        {
+            break;
+        }
+
         default:
             TRESPASS();
     }
@@ -617,6 +637,9 @@
         chosenVideoFormat.disableAll();
         chosenVideoFormat.setNativeResolution(
                 mChosenVideoResolutionType, mChosenVideoResolutionIndex);
+        chosenVideoFormat.setProfileLevel(
+                mChosenVideoResolutionType, mChosenVideoResolutionIndex,
+                mChosenVideoProfile, mChosenVideoLevel);
 
         body.append(chosenVideoFormat.getFormatSpec(true /* forM4Message */));
         body.append("\r\n");
@@ -729,6 +752,8 @@
 
     ++mNextCSeq;
 
+    scheduleKeepAlive(sessionID);
+
     return OK;
 }
 
@@ -845,7 +870,9 @@
                     mSupportedSinkVideoFormats,
                     mSupportedSourceVideoFormats,
                     &mChosenVideoResolutionType,
-                    &mChosenVideoResolutionIndex)) {
+                    &mChosenVideoResolutionIndex,
+                    &mChosenVideoProfile,
+                    &mChosenVideoLevel)) {
             ALOGE("Sink and source share no commonly supported video "
                   "formats.");
 
@@ -864,6 +891,9 @@
 
         ALOGI("Picked video resolution %u x %u %c%u",
               width, height, interlaced ? 'i' : 'p', framesPerSecond);
+
+        ALOGI("Picked AVC profile %d, level %d",
+              mChosenVideoProfile, mChosenVideoLevel);
     } else {
         ALOGI("Sink doesn't support video at all.");
     }
@@ -994,8 +1024,6 @@
 
     if (mClientInfo.mPlaybackSession != NULL) {
         mClientInfo.mPlaybackSession->updateLiveness();
-
-        scheduleKeepAlive(sessionID);
     }
 
     return OK;
@@ -1257,7 +1285,9 @@
             mUsingPCMAudio,
             mSinkSupportsVideo,
             mChosenVideoResolutionType,
-            mChosenVideoResolutionIndex);
+            mChosenVideoResolutionIndex,
+            mChosenVideoProfile,
+            mChosenVideoLevel);
 
     if (err != OK) {
         looper()->unregisterHandler(playbackSession->id());
@@ -1340,7 +1370,9 @@
         return ERROR_MALFORMED;
     }
 
-    if (mState != AWAITING_CLIENT_PLAY) {
+    if (mState != AWAITING_CLIENT_PLAY
+     && mState != PAUSED_TO_PLAYING
+     && mState != PAUSED) {
         ALOGW("Received PLAY request but we're in state %d", mState);
 
         sendErrorResponse(
@@ -1367,7 +1399,7 @@
         return err;
     }
 
-    if (mState == PAUSED_TO_PLAYING) {
+    if (mState == PAUSED_TO_PLAYING || mPlaybackSessionEstablished) {
         mState = PLAYING;
         return OK;
     }
@@ -1401,7 +1433,7 @@
 
     ALOGI("Received PAUSE request.");
 
-    if (mState != PLAYING_TO_PAUSED) {
+    if (mState != PLAYING_TO_PAUSED && mState != PLAYING) {
         return INVALID_OPERATION;
     }
 
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index 44d3e4d..64186fc 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -30,6 +30,7 @@
 struct IHDCP;
 struct IRemoteDisplayClient;
 struct ParsedMessage;
+struct TimeSyncer;
 
 // Represents the RTSP server acting as a wifi display source.
 // Manages incoming connections, sets up Playback sessions as necessary.
@@ -82,6 +83,7 @@
         kWhatHDCPNotify,
         kWhatFinishStop2,
         kWhatTeardownTriggerTimedOut,
+        kWhatTimeSyncerNotify,
     };
 
     struct ResponseID {
@@ -118,6 +120,7 @@
     sp<ANetworkSession> mNetSession;
     sp<IRemoteDisplayClient> mClient;
     AString mMediaPath;
+    sp<TimeSyncer> mTimeSyncer;
     struct in_addr mInterfaceAddr;
     int32_t mSessionID;
 
@@ -131,6 +134,8 @@
 
     VideoFormats::ResolutionType mChosenVideoResolutionType;
     size_t mChosenVideoResolutionIndex;
+    VideoFormats::ProfileType mChosenVideoProfile;
+    VideoFormats::LevelType mChosenVideoLevel;
 
     bool mSinkSupportsAudio;
 
diff --git a/media/libstagefright/wifi-display/udptest.cpp b/media/libstagefright/wifi-display/udptest.cpp
new file mode 100644
index 0000000..111846d
--- /dev/null
+++ b/media/libstagefright/wifi-display/udptest.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NEBUG 0
+#define LOG_TAG "udptest"
+#include <utils/Log.h>
+
+#include "ANetworkSession.h"
+#include "TimeSyncer.h"
+
+#include <binder/ProcessState.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+namespace android {
+
+}  // namespace android
+
+static void usage(const char *me) {
+    fprintf(stderr,
+            "usage: %s -c host[:port]\tconnect to test server\n"
+            "           -l            \tcreate a test server\n",
+            me);
+}
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    ProcessState::self()->startThreadPool();
+
+    int32_t localPort = -1;
+    int32_t connectToPort = -1;
+    AString connectToHost;
+
+    int res;
+    while ((res = getopt(argc, argv, "hc:l:")) >= 0) {
+        switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    connectToHost = optarg;
+                    connectToPort = 49152;
+                } else {
+                    connectToHost.setTo(optarg, colonPos - optarg);
+
+                    char *end;
+                    connectToPort = strtol(colonPos + 1, &end, 10);
+
+                    if (*end != '\0' || end == colonPos + 1
+                            || connectToPort < 1 || connectToPort > 65535) {
+                        fprintf(stderr, "Illegal port specified.\n");
+                        exit(1);
+                    }
+                }
+                break;
+            }
+
+            case 'l':
+            {
+                char *end;
+                localPort = strtol(optarg, &end, 10);
+
+                if (*end != '\0' || end == optarg
+                        || localPort < 1 || localPort > 65535) {
+                    fprintf(stderr, "Illegal port specified.\n");
+                    exit(1);
+                }
+                break;
+            }
+
+            case '?':
+            case 'h':
+                usage(argv[0]);
+                exit(1);
+        }
+    }
+
+    if (localPort < 0 && connectToPort < 0) {
+        fprintf(stderr,
+                "You need to select either client or server mode.\n");
+        exit(1);
+    }
+
+    sp<ANetworkSession> netSession = new ANetworkSession;
+    netSession->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<TimeSyncer> handler = new TimeSyncer(netSession, NULL /* notify */);
+    looper->registerHandler(handler);
+
+    if (localPort >= 0) {
+        handler->startServer(localPort);
+    } else {
+        handler->startClient(connectToHost.c_str(), connectToPort);
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    return 0;
+}
+
diff --git a/media/libstagefright/wifi-display/wfd.cpp b/media/libstagefright/wifi-display/wfd.cpp
index c947765..9fee4d0 100644
--- a/media/libstagefright/wifi-display/wfd.cpp
+++ b/media/libstagefright/wifi-display/wfd.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "wfd"
 #include <utils/Log.h>
 
+#include "sink/WifiDisplaySink.h"
 #include "source/WifiDisplaySource.h"
 
 #include <binder/ProcessState.h>
@@ -38,8 +39,12 @@
 static void usage(const char *me) {
     fprintf(stderr,
             "usage:\n"
-            "           %s -l iface[:port]\tcreate a wifi display source\n"
-            "               -f(ilename)  \tstream media\n",
+            "           %s -c host[:port]\tconnect to wifi source\n"
+            "               -u uri        \tconnect to an rtsp uri\n"
+            "               -l ip[:port] \tlisten on the specified port "
+            "               -f(ilename)  \tstream media "
+            "(create a sink)\n"
+            "               -s(pecial)   \trun in 'special' mode\n",
             me);
 }
 
@@ -209,14 +214,48 @@
 
     DataSource::RegisterDefaultSniffers();
 
+    AString connectToHost;
+    int32_t connectToPort = -1;
+    AString uri;
+
     AString listenOnAddr;
     int32_t listenOnPort = -1;
 
     AString path;
 
+    bool specialMode = false;
+
     int res;
-    while ((res = getopt(argc, argv, "hl:f:")) >= 0) {
+    while ((res = getopt(argc, argv, "hc:l:u:f:s")) >= 0) {
         switch (res) {
+            case 'c':
+            {
+                const char *colonPos = strrchr(optarg, ':');
+
+                if (colonPos == NULL) {
+                    connectToHost = optarg;
+                    connectToPort = WifiDisplaySource::kWifiDisplayDefaultPort;
+                } else {
+                    connectToHost.setTo(optarg, colonPos - optarg);
+
+                    char *end;
+                    connectToPort = strtol(colonPos + 1, &end, 10);
+
+                    if (*end != '\0' || end == colonPos + 1
+                            || connectToPort < 1 || connectToPort > 65535) {
+                        fprintf(stderr, "Illegal port specified.\n");
+                        exit(1);
+                    }
+                }
+                break;
+            }
+
+            case 'u':
+            {
+                uri = optarg;
+                break;
+            }
+
             case 'f':
             {
                 path = optarg;
@@ -245,6 +284,12 @@
                 break;
             }
 
+            case 's':
+            {
+                specialMode = true;
+                break;
+            }
+
             case '?':
             case 'h':
             default:
@@ -253,6 +298,13 @@
         }
     }
 
+    if (connectToPort >= 0 && listenOnPort >= 0) {
+        fprintf(stderr,
+                "You can connect to a source or create one, "
+                "but not both at the same time.\n");
+        exit(1);
+    }
+
     if (listenOnPort >= 0) {
         if (path.empty()) {
             createSource(listenOnAddr, listenOnPort);
@@ -263,7 +315,72 @@
         exit(0);
     }
 
-    usage(argv[0]);
+    if (connectToPort < 0 && uri.empty()) {
+        fprintf(stderr,
+                "You need to select either source host or uri.\n");
+
+        exit(1);
+    }
+
+    if (connectToPort >= 0 && !uri.empty()) {
+        fprintf(stderr,
+                "You need to either connect to a wfd host or an rtsp url, "
+                "not both.\n");
+        exit(1);
+    }
+
+    sp<SurfaceComposerClient> composerClient = new SurfaceComposerClient;
+    CHECK_EQ(composerClient->initCheck(), (status_t)OK);
+
+    sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
+            ISurfaceComposer::eDisplayIdMain));
+    DisplayInfo info;
+    SurfaceComposerClient::getDisplayInfo(display, &info);
+    ssize_t displayWidth = info.w;
+    ssize_t displayHeight = info.h;
+
+    ALOGV("display is %d x %d\n", displayWidth, displayHeight);
+
+    sp<SurfaceControl> control =
+        composerClient->createSurface(
+                String8("A Surface"),
+                displayWidth,
+                displayHeight,
+                PIXEL_FORMAT_RGB_565,
+                0);
+
+    CHECK(control != NULL);
+    CHECK(control->isValid());
+
+    SurfaceComposerClient::openGlobalTransaction();
+    CHECK_EQ(control->setLayer(INT_MAX), (status_t)OK);
+    CHECK_EQ(control->show(), (status_t)OK);
+    SurfaceComposerClient::closeGlobalTransaction();
+
+    sp<Surface> surface = control->getSurface();
+    CHECK(surface != NULL);
+
+    sp<ANetworkSession> session = new ANetworkSession;
+    session->start();
+
+    sp<ALooper> looper = new ALooper;
+
+    sp<WifiDisplaySink> sink = new WifiDisplaySink(
+            specialMode ? WifiDisplaySink::FLAG_SPECIAL_MODE : 0 /* flags */,
+            session,
+            surface->getIGraphicBufferProducer());
+
+    looper->registerHandler(sink);
+
+    if (connectToPort >= 0) {
+        sink->start(connectToHost.c_str(), connectToPort);
+    } else {
+        sink->start(uri.c_str());
+    }
+
+    looper->start(true /* runOnCallingThread */);
+
+    composerClient->dispose();
 
     return 0;
 }
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 061a079..714854e 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -81,6 +81,8 @@
     LOCAL_CFLAGS += -DANDROID_SMP=0
 endif
 
+LOCAL_CFLAGS += -fvisibility=hidden
+
 include $(BUILD_SHARED_LIBRARY)
 
 #
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 87eb6aa..a6edb77 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1453,10 +1453,18 @@
         }
         mPlaybackThreads.add(id, thread);
 
-        if (pSamplingRate != NULL) *pSamplingRate = config.sample_rate;
-        if (pFormat != NULL) *pFormat = config.format;
-        if (pChannelMask != NULL) *pChannelMask = config.channel_mask;
-        if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = config.sample_rate;
+        }
+        if (pFormat != NULL) {
+            *pFormat = config.format;
+        }
+        if (pChannelMask != NULL) {
+            *pChannelMask = config.channel_mask;
+        }
+        if (pLatencyMs != NULL) {
+            *pLatencyMs = thread->latency();
+        }
 
         // notify client processes of the new output creation
         thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
@@ -1698,9 +1706,15 @@
                                   );
         mRecordThreads.add(id, thread);
         ALOGV("openInput() created record thread: ID %d thread %p", id, thread);
-        if (pSamplingRate != NULL) *pSamplingRate = reqSamplingRate;
-        if (pFormat != NULL) *pFormat = config.format;
-        if (pChannelMask != NULL) *pChannelMask = reqChannels;
+        if (pSamplingRate != NULL) {
+            *pSamplingRate = reqSamplingRate;
+        }
+        if (pFormat != NULL) {
+            *pFormat = config.format;
+        }
+        if (pChannelMask != NULL) {
+            *pChannelMask = reqChannels;
+        }
 
         // notify client processes of the new input creation
         thread->audioConfigChanged_l(AudioSystem::INPUT_OPENED);
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index b0efef6..05dbab1 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -24,6 +24,8 @@
 
 #include <common_time/cc_helper.h>
 
+#include <cutils/compiler.h>
+
 #include <media/IAudioFlinger.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioTrack.h>
@@ -54,6 +56,7 @@
 #include <powermanager/IPowerManager.h>
 
 #include <media/nbaio/NBLog.h>
+#include <private/media/AudioTrackShared.h>
 
 namespace android {
 
@@ -89,7 +92,7 @@
 {
     friend class BinderService<AudioFlinger>;   // for AudioFlinger()
 public:
-    static const char* getServiceName() { return "media.audio_flinger"; }
+    static const char* getServiceName() ANDROID_API { return "media.audio_flinger"; }
 
     virtual     status_t    dump(int fd, const Vector<String16>& args);
 
@@ -278,7 +281,7 @@
 
                 bool        btNrecIsOff() const { return mBtNrecIsOff; }
 
-                            AudioFlinger();
+                            AudioFlinger() ANDROID_API;
     virtual                 ~AudioFlinger();
 
     // call in any IAudioFlinger method that accesses mPrimaryHardwareDev
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index 35cf368..53238fa 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -19,6 +19,7 @@
 
 #include <cutils/misc.h>
 #include <cutils/config_utils.h>
+#include <cutils/compiler.h>
 #include <utils/String8.h>
 #include <utils/Vector.h>
 #include <utils/SortedVector.h>
@@ -44,7 +45,7 @@
 
 public:
     // for BinderService
-    static const char *getServiceName() { return "media.audio_policy"; }
+    static const char *getServiceName() ANDROID_API { return "media.audio_policy"; }
 
     virtual status_t    dump(int fd, const Vector<String16>& args);
 
@@ -137,7 +138,7 @@
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
 
 private:
-                        AudioPolicyService();
+                        AudioPolicyService() ANDROID_API;
     virtual             ~AudioPolicyService();
 
             status_t dumpInternals(int fd);
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index 2b8694f..29dc5b6 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -19,13 +19,14 @@
 
 #include <stdint.h>
 #include <sys/types.h>
+#include <cutils/compiler.h>
 
 #include <media/AudioBufferProvider.h>
 
 namespace android {
 // ----------------------------------------------------------------------------
 
-class AudioResampler {
+class ANDROID_API AudioResampler {
 public:
     // Determines quality of SRC.
     //  LOW_QUALITY: linear interpolator (1st order)
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index a749d7a..b1286d3 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -46,6 +46,8 @@
             void        destroy();
             int         name() const { return mName; }
 
+    virtual uint32_t    sampleRate() const;
+
             audio_stream_type_t streamType() const {
                 return mStreamType;
             }
@@ -139,6 +141,7 @@
                                         // 'volatile' means accessed without lock or
                                         // barrier, but is read/written atomically
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
+    AudioTrackServerProxy*  mAudioTrackServerProxy;
 };  // end of Track
 
 class TimedTrack : public Track {
@@ -255,10 +258,6 @@
 
 private:
 
-    enum {
-        NO_MORE_BUFFERS = 0x80000001,   // same in AudioTrack.h, ok to be different value
-    };
-
     status_t            obtainBuffer(AudioBufferProvider::Buffer* buffer,
                                      uint32_t waitTimeMs);
     void                clearBufferQueue();
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 6c0d1d3..ffe3e9f 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -57,4 +57,5 @@
     // releaseBuffer() not overridden
 
     bool                mOverflow;  // overflow on most recent attempt to fill client buffer
+    AudioRecordServerProxy* mAudioRecordServerProxy;
 };
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 97f66f4..0773534 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -139,7 +139,7 @@
 // FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or
 // N-buffering, so AudioFlinger could allocate the right amount of memory.
 // See the client's minBufCount and mNotificationFramesAct calculations for details.
-static const int kFastTrackMultiplier = 2;
+static const int kFastTrackMultiplier = 1;
 
 // ----------------------------------------------------------------------------
 
@@ -495,7 +495,8 @@
         sp<IBinder> binder = new BBinder();
         status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
                                                          binder,
-                                                         String16(mName));
+                                                         String16(mName),
+                                                         String16("media"));
         if (status == NO_ERROR) {
             mWakeLockToken = binder;
         }
@@ -1326,7 +1327,7 @@
         // the track is newly added, make sure it fills up all its
         // buffers before playing. This is to ensure the client will
         // effectively get the latency it requested.
-        track->mFillingUpStatus = Track::FS_FILLING;
+        track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
         mActiveTracks.add(track);
@@ -1713,7 +1714,7 @@
 
 void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
 {
-    ALOGV ("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+    ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
             this,  streamType, mTracks.size());
     Mutex::Autolock _l(mLock);
 
@@ -2595,24 +2596,35 @@
         // app does not call stop() and relies on underrun to stop:
         // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
         // during last round
+        size_t desiredFrames;
+        if (t->sampleRate() == mSampleRate) {
+            desiredFrames = mNormalFrameCount;
+        } else {
+            // +1 for rounding and +1 for additional sample needed for interpolation
+            desiredFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
+            // add frames already consumed but not yet released by the resampler
+            // because cblk->framesReady() will include these frames
+            desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
+            // the minimum track buffer size is normally twice the number of frames necessary
+            // to fill one buffer and the resampler should not leave more than one buffer worth
+            // of unreleased frames after each pass, but just in case...
+            ALOG_ASSERT(desiredFrames <= cblk->frameCount_);
+        }
         uint32_t minFrames = 1;
         if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
                 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
-            if (t->sampleRate() == mSampleRate) {
-                minFrames = mNormalFrameCount;
-            } else {
-                // +1 for rounding and +1 for additional sample needed for interpolation
-                minFrames = (mNormalFrameCount * t->sampleRate()) / mSampleRate + 1 + 1;
-                // add frames already consumed but not yet released by the resampler
-                // because cblk->framesReady() will include these frames
-                minFrames += mAudioMixer->getUnreleasedFrames(track->name());
-                // the minimum track buffer size is normally twice the number of frames necessary
-                // to fill one buffer and the resampler should not leave more than one buffer worth
-                // of unreleased frames after each pass, but just in case...
-                ALOG_ASSERT(minFrames <= cblk->frameCount_);
-            }
+            minFrames = desiredFrames;
         }
-        if ((track->framesReady() >= minFrames) && track->isReady() &&
+        // It's not safe to call framesReady() for a static buffer track, so assume it's ready
+        size_t framesReady;
+        if (track->sharedBuffer() == 0) {
+            framesReady = track->framesReady();
+        } else if (track->isStopped()) {
+            framesReady = 0;
+        } else {
+            framesReady = 1;
+        }
+        if ((framesReady >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
             ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server,
@@ -2663,7 +2675,7 @@
                 // read original volumes with volume control
                 float typeVolume = mStreamTypes[track->streamType()].volume;
                 float v = masterVolume * typeVolume;
-                ServerProxy *proxy = track->mServerProxy;
+                AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
                 uint32_t vlr = proxy->getVolumeLR();
                 vl = vlr & 0xFFFF;
                 vr = vlr >> 16;
@@ -2736,7 +2748,7 @@
                 AudioMixer::CHANNEL_MASK, (void *)track->channelMask());
             // limit track sample rate to 2 x output sample rate, which changes at re-configuration
             uint32_t maxSampleRate = mSampleRate * 2;
-            uint32_t reqSampleRate = track->mServerProxy->getSampleRate();
+            uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
             if (reqSampleRate == 0) {
                 reqSampleRate = mSampleRate;
             } else if (reqSampleRate > maxSampleRate) {
@@ -2767,6 +2779,13 @@
                 mixerStatus = MIXER_TRACKS_READY;
             }
         } else {
+            // only implemented for normal tracks, not fast tracks
+            if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
+                // we missed desiredFrames whatever the actual number of frames missing was
+                cblk->u.mStreaming.mUnderrunFrames += desiredFrames;
+                // FIXME also wake futex so that underrun is noticed more quickly
+                (void) android_atomic_or(CBLK_UNDERRUN, &cblk->flags);
+            }
             // clear effect chain input buffer if an active track underruns to avoid sending
             // previous audio buffer again to effects
             chain = getEffectChain_l(track->sessionId());
@@ -3169,7 +3188,7 @@
             } else {
                 float typeVolume = mStreamTypes[track->streamType()].volume;
                 float v = mMasterVolume * typeVolume;
-                uint32_t vlr = track->mServerProxy->getVolumeLR();
+                uint32_t vlr = track->mAudioTrackServerProxy->getVolumeLR();
                 float v_clamped = v * (vlr & 0xFFFF);
                 if (v_clamped > MAX_GAIN) {
                     v_clamped = MAX_GAIN;
@@ -3695,7 +3714,8 @@
             }
 
             buffer.frameCount = mFrameCount;
-            if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+            status_t status = mActiveTrack->getNextBuffer(&buffer);
+            if (CC_LIKELY(status == NO_ERROR)) {
                 readOnce = true;
                 size_t framesOut = buffer.frameCount;
                 if (mResampler == NULL) {
@@ -3969,6 +3989,7 @@
         ALOGV("Record started OK");
         return status;
     }
+
 startError:
     AudioSystem::stopInput(mId);
     clearSyncStartEvent();
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index fac7071..55d96fa 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -74,7 +74,7 @@
 
     audio_channel_mask_t channelMask() const { return mChannelMask; }
 
-    uint32_t sampleRate() const; // FIXME inline after cblk sr moved
+    virtual uint32_t sampleRate() const { return mSampleRate; }
 
     // Return a pointer to the start of a contiguous slice of the track buffer.
     // Parameter 'offset' is the requested start position, expressed in
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 5ac3129..bfc197c 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -98,7 +98,7 @@
 
     // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
     size_t size = sizeof(audio_track_cblk_t);
-    size_t bufferSize = frameCount * mFrameSize;
+    size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
     if (sharedBuffer == 0) {
         size += bufferSize;
     }
@@ -124,22 +124,16 @@
         new(mCblk) audio_track_cblk_t();
         // clear all buffers
         mCblk->frameCount_ = frameCount;
-// uncomment the following lines to quickly test 32-bit wraparound
-//      mCblk->user = 0xffff0000;
-//      mCblk->server = 0xffff0000;
-//      mCblk->userBase = 0xffff0000;
-//      mCblk->serverBase = 0xffff0000;
         if (sharedBuffer == 0) {
             mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
             memset(mBuffer, 0, bufferSize);
-            // Force underrun condition to avoid false underrun callback until first data is
-            // written to buffer (other flags are cleared)
-            mCblk->flags = CBLK_UNDERRUN;
         } else {
             mBuffer = sharedBuffer->pointer();
+#if 0
+            mCblk->flags = CBLK_FORCEREADY;     // FIXME hack, need to fix the track ready logic
+#endif
         }
         mBufferEnd = (uint8_t *)mBuffer + bufferSize;
-        mServerProxy = new ServerProxy(mCblk, mBuffer, frameCount, mFrameSize, isOut);
 
 #ifdef TEE_SINK
         if (mTeeSinkTrackEnabled) {
@@ -199,51 +193,17 @@
     }
 #endif
 
-    buffer->raw = NULL;
-    mStepCount = buffer->frameCount;
-    // FIXME See note at getNextBuffer()
-    (void) step();      // ignore return value of step()
+    ServerProxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    buf.mRaw = buffer->raw;
     buffer->frameCount = 0;
-}
-
-bool AudioFlinger::ThreadBase::TrackBase::step() {
-    bool result = mServerProxy->step(mStepCount);
-    if (!result) {
-        ALOGV("stepServer failed acquiring cblk mutex");
-        mStepServerFailed = true;
-    }
-    return result;
+    buffer->raw = NULL;
+    mServerProxy->releaseBuffer(&buf);
 }
 
 void AudioFlinger::ThreadBase::TrackBase::reset() {
-    audio_track_cblk_t* cblk = this->cblk();
-
-    cblk->user = 0;
-    cblk->server = 0;
-    cblk->userBase = 0;
-    cblk->serverBase = 0;
-    mStepServerFailed = false;
     ALOGV("TrackBase::reset");
-}
-
-uint32_t AudioFlinger::ThreadBase::TrackBase::sampleRate() const {
-    return mServerProxy->getSampleRate();
-}
-
-void* AudioFlinger::ThreadBase::TrackBase::getBuffer(uint32_t offset, uint32_t frames) const {
-    audio_track_cblk_t* cblk = this->cblk();
-    int8_t *bufferStart = (int8_t *)mBuffer + (offset-cblk->serverBase) * mFrameSize;
-    int8_t *bufferEnd = bufferStart + frames * mFrameSize;
-
-    // Check validity of returned pointer in case the track control block would have been corrupted.
-    ALOG_ASSERT(!(bufferStart < mBuffer || bufferStart > bufferEnd || bufferEnd > mBufferEnd),
-            "TrackBase::getBuffer buffer out of range:\n"
-                "    start: %p, end %p , mBuffer %p mBufferEnd %p\n"
-                "    server %u, serverBase %u, user %u, userBase %u, frameSize %u",
-                bufferStart, bufferEnd, mBuffer, mBufferEnd,
-                cblk->server, cblk->serverBase, cblk->user, cblk->userBase, mFrameSize);
-
-    return bufferStart;
+    // FIXME still needed?
 }
 
 status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
@@ -362,9 +322,18 @@
     mFastIndex(-1),
     mUnderrunCount(0),
     mCachedVolume(1.0),
-    mIsInvalid(false)
+    mIsInvalid(false),
+    mAudioTrackServerProxy(NULL)
 {
     if (mCblk != NULL) {
+        if (sharedBuffer == 0) {
+            mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
+                    mFrameSize);
+        } else {
+            mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
+                    mFrameSize);
+        }
+        mServerProxy = mAudioTrackServerProxy;
         // to avoid leaking a track name, do not allocate one unless there is an mCblk
         mName = thread->getTrackName_l(channelMask, sessionId);
         mCblk->mName = mName;
@@ -374,6 +343,7 @@
         }
         // only allocate a fast track index if we were able to allocate a normal track name
         if (flags & IAudioFlinger::TRACK_FAST) {
+            mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
             ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
             int i = __builtin_ctz(thread->mFastTrackAvailMask);
             ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
@@ -432,12 +402,12 @@
 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
 {
     result.append("   Name Client Type Fmt Chn mask   Session StpCnt fCount S F SRate  "
-                  "L dB  R dB    Server      User     Main buf    Aux Buf  Flags Underruns\n");
+                  "L dB  R dB    Server    Main buf    Aux Buf  Flags Underruns\n");
 }
 
 void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
 {
-    uint32_t vlr = mServerProxy->getVolumeLR();
+    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
     if (isFastTrack()) {
         sprintf(buffer, "   F %2d", mFastIndex);
     } else {
@@ -496,7 +466,7 @@
         break;
     }
     snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g  "
-            "0x%08x 0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
+            "0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mStreamType,
             mFormat,
@@ -506,11 +476,10 @@
             mFrameCount,
             stateChar,
             mFillingUpStatus,
-            mServerProxy->getSampleRate(),
+            mAudioTrackServerProxy->getSampleRate(),
             20.0 * log10((vlr & 0xFFFF) / 4096.0),
             20.0 * log10((vlr >> 16) / 4096.0),
             mCblk->server,
-            mCblk->user,
             (int)mMainBuffer,
             (int)mAuxBuffer,
             mCblk->flags,
@@ -518,53 +487,27 @@
             nowInUnderrun);
 }
 
+uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
+    return mAudioTrackServerProxy->getSampleRate();
+}
+
 // AudioBufferProvider interface
 status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
         AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
-    audio_track_cblk_t* cblk = this->cblk();
-    uint32_t framesReady;
-    uint32_t framesReq = buffer->frameCount;
-
-    // Check if last stepServer failed, try to step now
-    if (mStepServerFailed) {
-        // FIXME When called by fast mixer, this takes a mutex with tryLock().
-        //       Since the fast mixer is higher priority than client callback thread,
-        //       it does not result in priority inversion for client.
-        //       But a non-blocking solution would be preferable to avoid
-        //       fast mixer being unable to tryLock(), and
-        //       to avoid the extra context switches if the client wakes up,
-        //       discovers the mutex is locked, then has to wait for fast mixer to unlock.
-        if (!step())  goto getNextBuffer_exit;
-        ALOGV("stepServer recovered");
-        mStepServerFailed = false;
+    ServerProxy::Buffer buf;
+    size_t desiredFrames = buffer->frameCount;
+    buf.mFrameCount = desiredFrames;
+    status_t status = mServerProxy->obtainBuffer(&buf);
+    buffer->frameCount = buf.mFrameCount;
+    buffer->raw = buf.mRaw;
+    if (buf.mFrameCount == 0) {
+        // only implemented so far for normal tracks, not fast tracks
+        mCblk->u.mStreaming.mUnderrunFrames += desiredFrames;
+        // FIXME also wake futex so that underrun is noticed more quickly
+        (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
     }
-
-    // FIXME Same as above
-    framesReady = mServerProxy->framesReady();
-
-    if (CC_LIKELY(framesReady)) {
-        uint32_t s = cblk->server;
-        uint32_t bufferEnd = cblk->serverBase + mFrameCount;
-
-        bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
-        if (framesReq > framesReady) {
-            framesReq = framesReady;
-        }
-        if (framesReq > bufferEnd - s) {
-            framesReq = bufferEnd - s;
-        }
-
-        buffer->raw = getBuffer(s, framesReq);
-        buffer->frameCount = framesReq;
-        return NO_ERROR;
-    }
-
-getNextBuffer_exit:
-    buffer->raw = NULL;
-    buffer->frameCount = 0;
-    ALOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
-    return NOT_ENOUGH_DATA;
+    return status;
 }
 
 // Note that framesReady() takes a mutex on the control block using tryLock().
@@ -576,7 +519,7 @@
 // the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
 // FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
 size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
-    return mServerProxy->framesReady();
+    return mAudioTrackServerProxy->framesReady();
 }
 
 // Don't call for fast tracks; the framesReady() could result in priority inversion
@@ -732,7 +675,6 @@
         // Force underrun condition to avoid false underrun callback until first data is
         // written to buffer
         android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
-        android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
         mFillingUpStatus = FS_FILLING;
         mResetDone = true;
         if (mState == FLUSHED) {
@@ -833,7 +775,7 @@
 {
     // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
     ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
-    uint32_t vlr = mServerProxy->getVolumeLR();
+    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
     uint32_t vl = vlr & 0xFFFF;
     uint32_t vr = vlr >> 16;
     // track volumes come from shared memory, so can't be trusted and must be clamped
@@ -870,9 +812,12 @@
 
 void AudioFlinger::PlaybackThread::Track::invalidate()
 {
-    // FIXME should use proxy
-    android_atomic_or(CBLK_INVALID, &mCblk->flags);
-    mCblk->cv.signal();
+    // FIXME should use proxy, and needs work
+    audio_track_cblk_t* cblk = mCblk;
+    android_atomic_or(CBLK_INVALID, &cblk->flags);
+    android_atomic_release_store(0x40000000, &cblk->mFutex);
+    // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
+    (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX);
     mIsInvalid = true;
 }
 
@@ -1418,6 +1363,8 @@
         mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000));
         mClientProxy->setSendLevel(0.0);
         mClientProxy->setSampleRate(sampleRate);
+        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
+                true /*clientInServer*/);
     } else {
         ALOGW("Error creating output track on thread %p", playbackThread);
     }
@@ -1477,7 +1424,7 @@
                     memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
                     mBufferQueue.add(pInBuffer);
                 } else {
-                    ALOGW ("OutputTrack::write() %p no more buffers in queue", this);
+                    ALOGW("OutputTrack::write() %p no more buffers in queue", this);
                 }
             }
         }
@@ -1498,9 +1445,10 @@
         if (mOutBuffer.frameCount == 0) {
             mOutBuffer.frameCount = pInBuffer->frameCount;
             nsecs_t startTime = systemTime();
-            if (obtainBuffer(&mOutBuffer, waitTimeLeftMs) == (status_t)NO_MORE_BUFFERS) {
-                ALOGV ("OutputTrack::write() %p thread %p no more output buffers", this,
-                        mThread.unsafe_get());
+            status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
+            if (status != NO_ERROR) {
+                ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
+                        mThread.unsafe_get(), status);
                 outputBufferFull = true;
                 break;
             }
@@ -1515,7 +1463,10 @@
         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
                 pInBuffer->frameCount;
         memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
-        mClientProxy->stepUser(outFrames);
+        Proxy::Buffer buf;
+        buf.mFrameCount = outFrames;
+        buf.mRaw = NULL;
+        mClientProxy->releaseBuffer(&buf);
         pInBuffer->frameCount -= outFrames;
         pInBuffer->i16 += outFrames * channelCount;
         mOutBuffer.frameCount -= outFrames;
@@ -1559,8 +1510,10 @@
     // If no more buffers are pending, fill output track buffer to make sure it is started
     // by output mixer.
     if (frames == 0 && mBufferQueue.size() == 0) {
-        if (mCblk->user < mFrameCount) {
-            frames = mFrameCount - mCblk->user;
+        // FIXME borken, replace by getting framesReady() from proxy
+        size_t user = 0;    // was mCblk->user
+        if (user < mFrameCount) {
+            frames = mFrameCount - user;
             pInBuffer = new Buffer;
             pInBuffer->mBuffer = new int16_t[frames * channelCount];
             pInBuffer->frameCount = frames;
@@ -1578,46 +1531,17 @@
 status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
         AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
 {
-    audio_track_cblk_t* cblk = mCblk;
-    uint32_t framesReq = buffer->frameCount;
-
-    ALOGVV("OutputTrack::obtainBuffer user %d, server %d", cblk->user, cblk->server);
-    buffer->frameCount  = 0;
-
-    size_t framesAvail;
-    {
-        Mutex::Autolock _l(cblk->lock);
-
-        // read the server count again
-        while (!(framesAvail = mClientProxy->framesAvailable_l())) {
-            if (CC_UNLIKELY(!mActive)) {
-                ALOGV("Not active and NO_MORE_BUFFERS");
-                return NO_MORE_BUFFERS;
-            }
-            status_t result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs));
-            if (result != NO_ERROR) {
-                return NO_MORE_BUFFERS;
-            }
-        }
-    }
-
-    if (framesReq > framesAvail) {
-        framesReq = framesAvail;
-    }
-
-    uint32_t u = cblk->user;
-    uint32_t bufferEnd = cblk->userBase + mFrameCount;
-
-    if (framesReq > bufferEnd - u) {
-        framesReq = bufferEnd - u;
-    }
-
-    buffer->frameCount  = framesReq;
-    buffer->raw         = mClientProxy->buffer(u);
-    return NO_ERROR;
+    ClientProxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    struct timespec timeout;
+    timeout.tv_sec = waitTimeMs / 1000;
+    timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
+    status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
+    buffer->frameCount = buf.mFrameCount;
+    buffer->raw = buf.mRaw;
+    return status;
 }
 
-
 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
 {
     size_t size = mBufferQueue.size();
@@ -1688,6 +1612,11 @@
         mOverflow(false)
 {
     ALOGV("RecordTrack constructor, size %d", (int)mBufferEnd - (int)mBuffer);
+    if (mCblk != NULL) {
+        mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
+                mFrameSize);
+        mServerProxy = mAudioRecordServerProxy;
+    }
 }
 
 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
@@ -1699,42 +1628,16 @@
 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
         int64_t pts)
 {
-    audio_track_cblk_t* cblk = this->cblk();
-    uint32_t framesAvail;
-    uint32_t framesReq = buffer->frameCount;
-
-    // Check if last stepServer failed, try to step now
-    if (mStepServerFailed) {
-        if (!step()) {
-            goto getNextBuffer_exit;
-        }
-        ALOGV("stepServer recovered");
-        mStepServerFailed = false;
+    ServerProxy::Buffer buf;
+    buf.mFrameCount = buffer->frameCount;
+    status_t status = mServerProxy->obtainBuffer(&buf);
+    buffer->frameCount = buf.mFrameCount;
+    buffer->raw = buf.mRaw;
+    if (buf.mFrameCount == 0) {
+        // FIXME also wake futex so that overrun is noticed more quickly
+        (void) android_atomic_or(CBLK_OVERRUN, &mCblk->flags);
     }
-
-    // FIXME lock is not actually held, so overrun is possible
-    framesAvail = mServerProxy->framesAvailableIn_l();
-
-    if (CC_LIKELY(framesAvail)) {
-        uint32_t s = cblk->server;
-        uint32_t bufferEnd = cblk->serverBase + mFrameCount;
-
-        if (framesReq > framesAvail) {
-            framesReq = framesAvail;
-        }
-        if (framesReq > bufferEnd - s) {
-            framesReq = bufferEnd - s;
-        }
-
-        buffer->raw = getBuffer(s, framesReq);
-        buffer->frameCount = framesReq;
-        return NO_ERROR;
-    }
-
-getNextBuffer_exit:
-    buffer->raw = NULL;
-    buffer->frameCount = 0;
-    return NOT_ENOUGH_DATA;
+    return status;
 }
 
 status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
@@ -1790,12 +1693,12 @@
 
 /*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
 {
-    result.append("   Clien Fmt Chn mask   Session Step S Serv     User   FrameCount\n");
+    result.append("   Clien Fmt Chn mask   Session Step S Serv   FrameCount\n");
 }
 
 void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
 {
-    snprintf(buffer, size, "   %05d %03u 0x%08x %05d   %04u %01d %08x %08x %05d\n",
+    snprintf(buffer, size, "   %05d %03u 0x%08x %05d   %04u %01d %08x %05d\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mFormat,
             mChannelMask,
@@ -1803,7 +1706,6 @@
             mStepCount,
             mState,
             mCblk->server,
-            mCblk->user,
             mFrameCount);
 }
 
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index a1971e3..d1ab7eb 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -616,17 +616,31 @@
         params.previewCallbackOneShot = true;
     }
     if (params.previewCallbackFlags != (uint32_t)flag) {
+
+        if (flag != CAMERA_FRAME_CALLBACK_FLAG_NOOP) {
+            // Disable any existing preview callback window when enabling
+            // preview callback flags
+            res = mCallbackProcessor->setCallbackWindow(NULL);
+            if (res != OK) {
+                ALOGE("%s: Camera %d: Unable to clear preview callback surface:"
+                        " %s (%d)", __FUNCTION__, mCameraId, strerror(-res), res);
+                return;
+            }
+            params.previewCallbackSurface = false;
+        }
+
         params.previewCallbackFlags = flag;
+
         switch(params.state) {
-        case Parameters::PREVIEW:
-            res = startPreviewL(params, true);
-            break;
-        case Parameters::RECORD:
-        case Parameters::VIDEO_SNAPSHOT:
-            res = startRecordingL(params, true);
-            break;
-        default:
-            break;
+            case Parameters::PREVIEW:
+                res = startPreviewL(params, true);
+                break;
+            case Parameters::RECORD:
+            case Parameters::VIDEO_SNAPSHOT:
+                res = startRecordingL(params, true);
+                break;
+            default:
+                break;
         }
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to refresh request in state %s",
@@ -637,6 +651,59 @@
 
 }
 
+status_t Camera2Client::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer) {
+    ATRACE_CALL();
+    ALOGV("%s: E", __FUNCTION__);
+    Mutex::Autolock icl(mBinderSerializationLock);
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    sp<ANativeWindow> window;
+    if (callbackProducer != 0) {
+        window = new Surface(callbackProducer);
+    }
+
+    res = mCallbackProcessor->setCallbackWindow(window);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to set preview callback surface: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    SharedParameters::Lock l(mParameters);
+
+    if (window != NULL) {
+        // Disable traditional callbacks when a valid callback target is given
+        l.mParameters.previewCallbackFlags = CAMERA_FRAME_CALLBACK_FLAG_NOOP;
+        l.mParameters.previewCallbackOneShot = false;
+        l.mParameters.previewCallbackSurface = true;
+    } else {
+        // Disable callback target if given a NULL interface.
+        l.mParameters.previewCallbackSurface = false;
+    }
+
+    switch(l.mParameters.state) {
+        case Parameters::PREVIEW:
+            res = startPreviewL(l.mParameters, true);
+            break;
+        case Parameters::RECORD:
+        case Parameters::VIDEO_SNAPSHOT:
+            res = startRecordingL(l.mParameters, true);
+            break;
+        default:
+            break;
+    }
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable to refresh request in state %s",
+                __FUNCTION__, mCameraId,
+                Parameters::getStateName(l.mParameters.state));
+    }
+
+    return OK;
+}
+
+
 status_t Camera2Client::startPreview() {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
@@ -699,8 +766,10 @@
     }
 
     Vector<uint8_t> outputStreams;
-    bool callbacksEnabled = params.previewCallbackFlags &
-        CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
+    bool callbacksEnabled = (params.previewCallbackFlags &
+            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
+            params.previewCallbackSurface;
+
     if (callbacksEnabled) {
         res = mCallbackProcessor->updateStream(params);
         if (res != OK) {
@@ -909,8 +978,10 @@
     }
 
     Vector<uint8_t> outputStreams;
-    bool callbacksEnabled = params.previewCallbackFlags &
-        CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK;
+    bool callbacksEnabled = (params.previewCallbackFlags &
+            CAMERA_FRAME_CALLBACK_FLAG_ENABLE_MASK) ||
+            params.previewCallbackSurface;
+
     if (callbacksEnabled) {
         res = mCallbackProcessor->updateStream(params);
         if (res != OK) {
diff --git a/services/camera/libcameraservice/Camera2Client.h b/services/camera/libcameraservice/Camera2Client.h
index 8ab46b1..078e3a3 100644
--- a/services/camera/libcameraservice/Camera2Client.h
+++ b/services/camera/libcameraservice/Camera2Client.h
@@ -51,6 +51,9 @@
     virtual status_t        setPreviewTexture(
         const sp<IGraphicBufferProducer>& bufferProducer);
     virtual void            setPreviewCallbackFlag(int flag);
+    virtual status_t        setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer);
+
     virtual status_t        startPreview();
     virtual void            stopPreview();
     virtual bool            previewEnabled();
diff --git a/services/camera/libcameraservice/Camera3Device.cpp b/services/camera/libcameraservice/Camera3Device.cpp
index cc7802b..73bf30c 100644
--- a/services/camera/libcameraservice/Camera3Device.cpp
+++ b/services/camera/libcameraservice/Camera3Device.cpp
@@ -128,7 +128,10 @@
 
     /** Initialize device with callback functions */
 
+    ATRACE_BEGIN("camera3->initialize");
     res = device->ops->initialize(device, this);
+    ATRACE_END();
+
     if (res != OK) {
         SET_ERR_L("Unable to initialize HAL device: %s (%d)",
                 strerror(-res), res);
@@ -140,7 +143,9 @@
 
     mVendorTagOps.get_camera_vendor_section_name = NULL;
 
+    ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
     device->ops->get_metadata_vendor_tag_ops(device, &mVendorTagOps);
+    ATRACE_END();
 
     if (mVendorTagOps.get_camera_vendor_section_name != NULL) {
         res = set_camera_metadata_vendor_tag_ops(&mVendorTagOps);
@@ -736,7 +741,7 @@
 status_t Camera3Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
     ATRACE_CALL();
-    ALOGV("%s: E", __FUNCTION__);
+    ALOGV("%s: for template %d", __FUNCTION__, templateId);
     Mutex::Autolock l(mLock);
 
     switch (mStatus) {
@@ -756,8 +761,10 @@
     }
 
     const camera_metadata_t *rawRequest;
+    ATRACE_BEGIN("camera3->construct_default_request_settings");
     rawRequest = mHal3Device->ops->construct_default_request_settings(
         mHal3Device, templateId);
+    ATRACE_END();
     if (rawRequest == NULL) {
         SET_ERR_L("HAL is unable to construct default settings for template %d",
                 templateId);
@@ -1068,8 +1075,9 @@
 
     // Do the HAL configuration; will potentially touch stream
     // max_buffers, usage, priv fields.
-
+    ATRACE_BEGIN("camera3->configure_streams");
     res = mHal3Device->ops->configure_streams(mHal3Device, &config);
+    ATRACE_END();
 
     if (res != OK) {
         SET_ERR_L("Unable to configure streams with HAL: %s (%d)",
@@ -1224,6 +1232,7 @@
         }
 
         if (request.haveResultMetadata && request.numBuffersLeft == 0) {
+            ATRACE_ASYNC_END("frame capture", frameNumber);
             mInFlightMap.removeItemsAt(idx, 1);
         }
 
@@ -1274,8 +1283,7 @@
         if (entry.count == 0) {
             SET_ERR("No timestamp provided by HAL for frame %d!",
                     frameNumber);
-        }
-        if (timestamp != entry.data.i64[0]) {
+        } else if (timestamp != entry.data.i64[0]) {
             SET_ERR("Timestamp mismatch between shutter notify and result"
                     " metadata for frame %d (%lld vs %lld respectively)",
                     frameNumber, timestamp, entry.data.i64[0]);
@@ -1374,6 +1382,7 @@
 }
 
 void Camera3Device::notify(const camera3_notify_msg *msg) {
+    ATRACE_CALL();
     NotificationListener *listener;
     {
         Mutex::Autolock l(mOutputLock);
@@ -1394,6 +1403,9 @@
                                   msg->message.error.error_stream);
                 streamId = stream->getId();
             }
+            ALOGV("Camera %d: %s: HAL error, frame %d, stream %d: %d",
+                    mId, __FUNCTION__, msg->message.error.frame_number,
+                    streamId, msg->message.error.error_code);
             if (listener != NULL) {
                 listener->notifyError(msg->message.error.error_code,
                         msg->message.error.frame_number, streamId);
@@ -1429,7 +1441,8 @@
                         frameNumber);
                 break;
             }
-
+            ALOGVV("Camera %d: %s: Shutter fired for frame %d at %lld",
+                    mId, __FUNCTION__, frameNumber, timestamp);
             // Call listener, if any
             if (listener != NULL) {
                 listener->notifyShutter(frameNumber, timestamp);
@@ -1550,6 +1563,7 @@
 }
 
 status_t Camera3Device::RequestThread::waitUntilPaused(nsecs_t timeout) {
+    ATRACE_CALL();
     status_t res;
     Mutex::Autolock l(mPauseLock);
     while (!mPaused) {
@@ -1696,8 +1710,11 @@
     }
 
     // Submit request and block until ready for next one
-
+    ATRACE_ASYNC_BEGIN("frame capture", request.frame_number);
+    ATRACE_BEGIN("camera3->process_capture_request");
     res = mHal3Device->ops->process_capture_request(mHal3Device, &request);
+    ATRACE_END();
+
     if (res != OK) {
         SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
                 " device: %s (%d)", request.frame_number, strerror(-res), res);
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/CameraClient.cpp
index e577fa3..be78f69 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/CameraClient.cpp
@@ -347,6 +347,12 @@
     }
 }
 
+status_t CameraClient::setPreviewCallbackTarget(
+        const sp<IGraphicBufferProducer>& callbackProducer) {
+    ALOGE("%s: Unimplemented!", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
 // start preview mode
 status_t CameraClient::startPreview() {
     LOG1("startPreview (pid %d)", getCallingPid());
diff --git a/services/camera/libcameraservice/CameraClient.h b/services/camera/libcameraservice/CameraClient.h
index 7f0cb29..abde75a 100644
--- a/services/camera/libcameraservice/CameraClient.h
+++ b/services/camera/libcameraservice/CameraClient.h
@@ -40,6 +40,8 @@
     virtual status_t        setPreviewDisplay(const sp<Surface>& surface);
     virtual status_t        setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer);
     virtual void            setPreviewCallbackFlag(int flag);
+    virtual status_t        setPreviewCallbackTarget(
+            const sp<IGraphicBufferProducer>& callbackProducer);
     virtual status_t        startPreview();
     virtual void            stopPreview();
     virtual bool            previewEnabled();
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 710f164..eaa316a 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -190,6 +190,8 @@
         virtual status_t      setPreviewDisplay(const sp<Surface>& surface) = 0;
         virtual status_t      setPreviewTexture(const sp<IGraphicBufferProducer>& bufferProducer)=0;
         virtual void          setPreviewCallbackFlag(int flag) = 0;
+        virtual status_t      setPreviewCallbackTarget(
+                const sp<IGraphicBufferProducer>& callbackProducer) = 0;
         virtual status_t      startPreview() = 0;
         virtual void          stopPreview() = 0;
         virtual bool          previewEnabled() = 0;
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
index 98673ff..bc81409 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.cpp
@@ -37,6 +37,7 @@
         mDevice(client->getCameraDevice()),
         mId(client->getCameraId()),
         mCallbackAvailable(false),
+        mCallbackToApp(false),
         mCallbackStreamId(NO_STREAM) {
 }
 
@@ -53,6 +54,35 @@
     }
 }
 
+status_t CallbackProcessor::setCallbackWindow(
+        sp<ANativeWindow> callbackWindow) {
+    ATRACE_CALL();
+    status_t res;
+
+    Mutex::Autolock l(mInputMutex);
+
+    sp<Camera2Client> client = mClient.promote();
+    if (client == 0) return OK;
+    sp<CameraDeviceBase> device = client->getCameraDevice();
+
+    // If the window is changing, clear out stream if it already exists
+    if (mCallbackWindow != callbackWindow && mCallbackStreamId != NO_STREAM) {
+        res = device->deleteStream(mCallbackStreamId);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Unable to delete old stream "
+                    "for callbacks: %s (%d)", __FUNCTION__,
+                    client->getCameraId(), strerror(-res), res);
+            return res;
+        }
+        mCallbackStreamId = NO_STREAM;
+        mCallbackConsumer.clear();
+    }
+    mCallbackWindow = callbackWindow;
+    mCallbackToApp = (mCallbackWindow != NULL);
+
+    return OK;
+}
+
 status_t CallbackProcessor::updateStream(const Parameters &params) {
     ATRACE_CALL();
     status_t res;
@@ -67,14 +97,18 @@
 
     // If possible, use the flexible YUV format
     int32_t callbackFormat = params.previewFormat;
-    if (params.fastInfo.useFlexibleYuv &&
+    if (mCallbackToApp) {
+        // TODO: etalvala: This should use the flexible YUV format as well, but
+        // need to reconcile HAL2/HAL3 requirements.
+        callbackFormat = HAL_PIXEL_FORMAT_YV12;
+    } else if(params.fastInfo.useFlexibleYuv &&
             (params.previewFormat == HAL_PIXEL_FORMAT_YCrCb_420_SP ||
              params.previewFormat == HAL_PIXEL_FORMAT_YV12) ) {
         callbackFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
     }
 
-    if (mCallbackConsumer == 0) {
-        // Create CPU buffer queue endpoint
+    if (!mCallbackToApp && mCallbackConsumer == 0) {
+        // Create CPU buffer queue endpoint, since app hasn't given us one
         mCallbackConsumer = new CpuConsumer(kCallbackHeapCount);
         mCallbackConsumer->setFrameAvailableListener(this);
         mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
@@ -104,8 +138,8 @@
             res = device->deleteStream(mCallbackStreamId);
             if (res != OK) {
                 ALOGE("%s: Camera %d: Unable to delete old output stream "
-                        "for callbacks: %s (%d)", __FUNCTION__, mId,
-                        strerror(-res), res);
+                        "for callbacks: %s (%d)", __FUNCTION__,
+                        mId, strerror(-res), res);
                 return res;
             }
             mCallbackStreamId = NO_STREAM;
diff --git a/services/camera/libcameraservice/camera2/CallbackProcessor.h b/services/camera/libcameraservice/camera2/CallbackProcessor.h
index d851a84..17dcfb1 100644
--- a/services/camera/libcameraservice/camera2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/camera2/CallbackProcessor.h
@@ -45,6 +45,8 @@
 
     void onFrameAvailable();
 
+    // Set to NULL to disable the direct-to-app callback window
+    status_t setCallbackWindow(sp<ANativeWindow> callbackWindow);
     status_t updateStream(const Parameters &params);
     status_t deleteStream();
     int getStreamId() const;
@@ -64,6 +66,9 @@
         NO_STREAM = -1
     };
 
+    // True if mCallbackWindow is a remote consumer, false if just the local
+    // mCallbackConsumer
+    bool mCallbackToApp;
     int mCallbackStreamId;
     static const size_t kCallbackHeapCount = 6;
     sp<CpuConsumer>    mCallbackConsumer;
diff --git a/services/camera/libcameraservice/camera2/JpegCompressor.cpp b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
index c9af71e..2f0c67d 100644
--- a/services/camera/libcameraservice/camera2/JpegCompressor.cpp
+++ b/services/camera/libcameraservice/camera2/JpegCompressor.cpp
@@ -210,7 +210,8 @@
     return true;
 }
 
-void JpegCompressor::jpegTermDestination(j_compress_ptr /*cinfo*/) {
+void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
+    (void) cinfo; // TODO: clean up
     ALOGV("%s", __FUNCTION__);
     ALOGV("%s: Done writing JPEG data. %d bytes left in buffer",
             __FUNCTION__, cinfo->dest->free_in_buffer);
diff --git a/services/camera/libcameraservice/camera2/Parameters.cpp b/services/camera/libcameraservice/camera2/Parameters.cpp
index a248b76..0459866 100644
--- a/services/camera/libcameraservice/camera2/Parameters.cpp
+++ b/services/camera/libcameraservice/camera2/Parameters.cpp
@@ -292,8 +292,11 @@
             CameraParameters::WHITE_BALANCE_AUTO);
 
     camera_metadata_ro_entry_t availableWhiteBalanceModes =
-        staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
-    {
+        staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
+    if (!availableWhiteBalanceModes.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+                CameraParameters::WHITE_BALANCE_AUTO);
+    } else {
         String8 supportedWhiteBalance;
         bool addComma = false;
         for (size_t i=0; i < availableWhiteBalanceModes.count; i++) {
@@ -353,9 +356,11 @@
             CameraParameters::EFFECT_NONE);
 
     camera_metadata_ro_entry_t availableEffects =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS);
-    if (!availableEffects.count) return NO_INIT;
-    {
+        staticInfo(ANDROID_CONTROL_AVAILABLE_EFFECTS, 0, 0, false);
+    if (!availableEffects.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
+                CameraParameters::EFFECT_NONE);
+    } else {
         String8 supportedEffects;
         bool addComma = false;
         for (size_t i=0; i < availableEffects.count; i++) {
@@ -413,9 +418,11 @@
             CameraParameters::ANTIBANDING_AUTO);
 
     camera_metadata_ro_entry_t availableAntibandingModes =
-        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES);
-    if (!availableAntibandingModes.count) return NO_INIT;
-    {
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, 0, 0, false);
+    if (!availableAntibandingModes.count) {
+        params.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+                CameraParameters::ANTIBANDING_OFF);
+    } else {
         String8 supportedAntibanding;
         bool addComma = false;
         for (size_t i=0; i < availableAntibandingModes.count; i++) {
@@ -455,9 +462,10 @@
             CameraParameters::SCENE_MODE_AUTO);
 
     camera_metadata_ro_entry_t availableSceneModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
-    if (!availableSceneModes.count) return NO_INIT;
-    {
+        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
+    if (!availableSceneModes.count) {
+        params.remove(CameraParameters::KEY_SCENE_MODE);
+    } else {
         String8 supportedSceneModes(CameraParameters::SCENE_MODE_AUTO);
         bool addComma = true;
         bool noSceneModes = false;
@@ -548,15 +556,17 @@
         }
     }
 
+    bool isFlashAvailable = false;
     camera_metadata_ro_entry_t flashAvailable =
-        staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 1, 1);
-    if (!flashAvailable.count) return NO_INIT;
+        staticInfo(ANDROID_FLASH_INFO_AVAILABLE, 0, 1, false);
+    if (flashAvailable.count) {
+        isFlashAvailable = flashAvailable.data.u8[0];
+    }
 
     camera_metadata_ro_entry_t availableAeModes =
-        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES);
-    if (!availableAeModes.count) return NO_INIT;
+        staticInfo(ANDROID_CONTROL_AE_AVAILABLE_MODES, 0, 0, false);
 
-    if (flashAvailable.data.u8[0]) {
+    if (isFlashAvailable) {
         flashMode = Parameters::FLASH_MODE_OFF;
         params.set(CameraParameters::KEY_FLASH_MODE,
                 CameraParameters::FLASH_MODE_OFF);
@@ -585,14 +595,12 @@
     }
 
     camera_metadata_ro_entry_t minFocusDistance =
-        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 1, 1);
-    if (!minFocusDistance.count) return NO_INIT;
+        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 1, false);
 
     camera_metadata_ro_entry_t availableAfModes =
-        staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES);
-    if (!availableAfModes.count) return NO_INIT;
+        staticInfo(ANDROID_CONTROL_AF_AVAILABLE_MODES, 0, 0, false);
 
-    if (minFocusDistance.data.f[0] == 0) {
+    if (!minFocusDistance.count || minFocusDistance.data.f[0] == 0) {
         // Fixed-focus lens
         focusMode = Parameters::FOCUS_MODE_FIXED;
         params.set(CameraParameters::KEY_FOCUS_MODE,
@@ -662,7 +670,7 @@
     focusingAreas.add(Parameters::Area(0,0,0,0,0));
 
     camera_metadata_ro_entry_t availableFocalLengths =
-        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
+        staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, 0, 0, false);
     if (!availableFocalLengths.count) return NO_INIT;
 
     float minFocalLength = availableFocalLengths.data.f[0];
@@ -768,8 +776,8 @@
             CameraParameters::FALSE);
 
     camera_metadata_ro_entry_t availableVideoStabilizationModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
-    if (!availableVideoStabilizationModes.count) return NO_INIT;
+        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+                false);
 
     if (availableVideoStabilizationModes.count > 1) {
         params.set(CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED,
@@ -794,9 +802,10 @@
 
     previewCallbackFlags = 0;
     previewCallbackOneShot = false;
+    previewCallbackSurface = false;
 
     camera_metadata_ro_entry_t supportedHardwareLevel =
-        staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL);
+        staticInfo(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, 0, 0, false);
     if (!supportedHardwareLevel.count || (supportedHardwareLevel.data.u8[0] ==
             ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED)) {
         ALOGI("Camera %d: ZSL mode disabled for limited mode HALs", cameraId);
@@ -828,14 +837,23 @@
 status_t Parameters::buildFastInfo() {
 
     camera_metadata_ro_entry_t activeArraySize =
-        staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 2);
+        staticInfo(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, 2, 4);
     if (!activeArraySize.count) return NO_INIT;
-    int32_t arrayWidth = activeArraySize.data.i32[0];
-    int32_t arrayHeight = activeArraySize.data.i32[1];
+    int32_t arrayWidth;
+    int32_t arrayHeight;
+    if (activeArraySize.count == 2) {
+        ALOGW("%s: Camera %d: activeArraySize is missing xmin/ymin!",
+                __FUNCTION__, cameraId);
+        arrayWidth = activeArraySize.data.i32[0];
+        arrayHeight = activeArraySize.data.i32[1];
+    } else if (activeArraySize.count == 4) {
+        arrayWidth = activeArraySize.data.i32[2];
+        arrayHeight = activeArraySize.data.i32[3];
+    } else return NO_INIT;
 
     camera_metadata_ro_entry_t availableFaceDetectModes =
-        staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES);
-    if (!availableFaceDetectModes.count) return NO_INIT;
+        staticInfo(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, 0, 0,
+                false);
 
     uint8_t bestFaceDetectMode =
         ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
@@ -862,19 +880,21 @@
         }
     }
 
+    int32_t maxFaces = 0;
     camera_metadata_ro_entry_t maxFacesDetected =
-        staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 1, 1);
-    if (!maxFacesDetected.count) return NO_INIT;
-
-    int32_t maxFaces = maxFacesDetected.data.i32[0];
+        staticInfo(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, 0, 1, false);
+    if (maxFacesDetected.count) {
+        maxFaces = maxFacesDetected.data.i32[0];
+    }
 
     camera_metadata_ro_entry_t availableSceneModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+        staticInfo(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, 0, 0, false);
     camera_metadata_ro_entry_t sceneModeOverrides =
-        staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES);
+        staticInfo(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, 0, 0, false);
     camera_metadata_ro_entry_t minFocusDistance =
-        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
-    bool fixedLens = (minFocusDistance.data.f[0] == 0);
+        staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0, false);
+    bool fixedLens = minFocusDistance.count == 0 ||
+        minFocusDistance.data.f[0] == 0;
 
     camera_metadata_ro_entry_t availableFocalLengths =
         staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
@@ -1465,7 +1485,7 @@
     }
     if (validatedParams.wbMode != wbMode) {
         camera_metadata_ro_entry_t availableWbModes =
-            staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
+            staticInfo(ANDROID_CONTROL_AWB_AVAILABLE_MODES, 0, 0, false);
         for (i = 0; i < availableWbModes.count; i++) {
             if (validatedParams.wbMode == availableWbModes.data.u8[i]) break;
         }
@@ -1496,8 +1516,9 @@
         validatedParams.currentAfTriggerId = -1;
         if (validatedParams.focusMode != Parameters::FOCUS_MODE_FIXED) {
             camera_metadata_ro_entry_t minFocusDistance =
-                staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE);
-            if (minFocusDistance.data.f[0] == 0) {
+                staticInfo(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, 0, 0,
+                        false);
+            if (minFocusDistance.count && minFocusDistance.data.f[0] == 0) {
                 ALOGE("%s: Requested focus mode \"%s\" is not available: "
                         "fixed focus lens",
                         __FUNCTION__,
@@ -1617,7 +1638,8 @@
     validatedParams.videoStabilization = boolFromString(
         newParams.get(CameraParameters::KEY_VIDEO_STABILIZATION) );
     camera_metadata_ro_entry_t availableVideoStabilizationModes =
-        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES);
+        staticInfo(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, 0, 0,
+                false);
     if (validatedParams.videoStabilization &&
             availableVideoStabilizationModes.count == 1) {
         ALOGE("%s: Video stabilization not supported", __FUNCTION__);
@@ -2544,10 +2566,6 @@
             staticInfo(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, 2, 2);
     if (!sensorSize.count) return NO_INIT;
 
-    camera_metadata_ro_entry_t availableFocalLengths =
-            staticInfo(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS);
-    if (!availableFocalLengths.count) return NO_INIT;
-
     float arrayAspect = static_cast<float>(fastInfo.arrayWidth) /
             fastInfo.arrayHeight;
     float stillAspect = static_cast<float>(pictureWidth) / pictureHeight;
diff --git a/services/camera/libcameraservice/camera2/Parameters.h b/services/camera/libcameraservice/camera2/Parameters.h
index be05b54..464830c 100644
--- a/services/camera/libcameraservice/camera2/Parameters.h
+++ b/services/camera/libcameraservice/camera2/Parameters.h
@@ -142,6 +142,7 @@
 
     uint32_t previewCallbackFlags;
     bool previewCallbackOneShot;
+    bool previewCallbackSurface;
 
     bool zslMode;
 
diff --git a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
index 2efeede..f085443 100644
--- a/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3OutputStream.cpp
@@ -301,8 +301,13 @@
         return res;
     }
 
-    ALOGV("%s: Consumer wants %d buffers", __FUNCTION__,
-            maxConsumerBuffers);
+    ALOGV("%s: Consumer wants %d buffers, HAL wants %d", __FUNCTION__,
+            maxConsumerBuffers, camera3_stream::max_buffers);
+    if (camera3_stream::max_buffers == 0) {
+        ALOGE("%s: Camera HAL requested no max_buffers, requires at least 1",
+                __FUNCTION__, camera3_stream::max_buffers);
+        return INVALID_OPERATION;
+    }
 
     mTotalBufferCount = maxConsumerBuffers + camera3_stream::max_buffers;
     mDequeuedBufferCount = 0;
diff --git a/services/camera/libcameraservice/camera3/Camera3Stream.cpp b/services/camera/libcameraservice/camera3/Camera3Stream.cpp
index f05658a..ab563df 100644
--- a/services/camera/libcameraservice/camera3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/camera3/Camera3Stream.cpp
@@ -312,8 +312,10 @@
         // Got all buffers, register with HAL
         ALOGV("%s: Registering %d buffers with camera HAL",
                 __FUNCTION__, bufferCount);
+        ATRACE_BEGIN("camera3->register_stream_buffers");
         res = hal3Device->ops->register_stream_buffers(hal3Device,
                 &bufferSet);
+        ATRACE_END();
     }
 
     // Return all valid buffers to stream, in ERROR state to indicate
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index cd39bad..dfa1066 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -214,7 +214,11 @@
         // In case the object was never pinned, pass the acquire fence
         // back to the release fence. If the fence was already waited on,
         // it'll just be a no-op to wait on it again.
-        err = addReleaseFenceLocked(item.mBuf, item.mFence);
+
+        // item.mGraphicBuffer was populated with the proper graphic-buffer
+        // at acquire even if it was previously acquired
+        err = addReleaseFenceLocked(item.mBuf,
+                item.mGraphicBuffer, item.mFence);
 
         if (err != OK) {
             BI_LOGE("Failed to add release fence to buffer "
@@ -226,7 +230,9 @@
         BI_LOGV("Attempting to release buffer timestamp %lld, frame %lld",
                 item.mTimestamp, item.mFrameNumber);
 
-        err = releaseBufferLocked(item.mBuf,
+        // item.mGraphicBuffer was populated with the proper graphic-buffer
+        // at acquire even if it was previously acquired
+        err = releaseBufferLocked(item.mBuf, item.mGraphicBuffer,
                                   EGL_NO_DISPLAY,
                                   EGL_NO_SYNC_KHR);
         if (err != OK) {
@@ -310,7 +316,8 @@
 
         RingBufferItem& find = *it;
         if (item.mGraphicBuffer == find.mGraphicBuffer) {
-            status_t res = addReleaseFenceLocked(item.mBuf, item.mFence);
+            status_t res = addReleaseFenceLocked(item.mBuf,
+                    item.mGraphicBuffer, item.mFence);
 
             if (res != OK) {
                 BI_LOGE("Failed to add release fence to buffer "