Merge "Limit the amount of audio record data in each buffer"
diff --git a/include/common_time/ICommonClock.h b/include/common_time/ICommonClock.h
new file mode 100644
index 0000000..d7073f1
--- /dev/null
+++ b/include/common_time/ICommonClock.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ICOMMONCLOCK_H
+#define ANDROID_ICOMMONCLOCK_H
+
+#include <stdint.h>
+#include <linux/socket.h>
+
+#include <binder/IInterface.h>
+#include <binder/IServiceManager.h>
+
+namespace android {
+
+class ICommonClockListener : public IInterface {
+  public:
+    DECLARE_META_INTERFACE(CommonClockListener);
+
+    virtual void onTimelineChanged(uint64_t timelineID) = 0;
+};
+
+class BnCommonClockListener : public BnInterface<ICommonClockListener> {
+  public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+};
+
+class ICommonClock : public IInterface {
+  public:
+    DECLARE_META_INTERFACE(CommonClock);
+
+    // Name of the ICommonClock service registered with the service manager.
+    static const String16 kServiceName;
+
+    // a reserved invalid timeline ID
+    static const uint64_t kInvalidTimelineID;
+
+    // a reserved invalid error estimate
+    static const int32_t kErrorEstimateUnknown;
+
+    enum State {
+        // the device just came up and is trying to discover the master
+        STATE_INITIAL,
+
+        // the device is a client of a master
+        STATE_CLIENT,
+
+        // the device is acting as master
+        STATE_MASTER,
+
+        // the device has lost contact with its master and needs to participate
+        // in the election of a new master
+        STATE_RONIN,
+
+        // the device is waiting for announcement of the newly elected master
+        STATE_WAIT_FOR_ELECTION,
+    };
+
+    virtual status_t isCommonTimeValid(bool* valid, uint32_t* timelineID) = 0;
+    virtual status_t commonTimeToLocalTime(int64_t commonTime,
+                                           int64_t* localTime) = 0;
+    virtual status_t localTimeToCommonTime(int64_t localTime,
+                                           int64_t* commonTime) = 0;
+    virtual status_t getCommonTime(int64_t* commonTime) = 0;
+    virtual status_t getCommonFreq(uint64_t* freq) = 0;
+    virtual status_t getLocalTime(int64_t* localTime) = 0;
+    virtual status_t getLocalFreq(uint64_t* freq) = 0;
+    virtual status_t getEstimatedError(int32_t* estimate) = 0;
+    virtual status_t getTimelineID(uint64_t* id) = 0;
+    virtual status_t getState(State* state) = 0;
+    virtual status_t getMasterAddr(struct sockaddr_storage* addr) = 0;
+
+    virtual status_t registerListener(
+            const sp<ICommonClockListener>& listener) = 0;
+    virtual status_t unregisterListener(
+            const sp<ICommonClockListener>& listener) = 0;
+
+    // Simple helper to make it easier to connect to the CommonClock service.
+    static inline sp<ICommonClock> getInstance() {
+        sp<IBinder> binder = defaultServiceManager()->checkService(
+                ICommonClock::kServiceName);
+        sp<ICommonClock> clk = interface_cast<ICommonClock>(binder);
+        return clk;
+    }
+};
+
+class BnCommonClock : public BnInterface<ICommonClock> {
+  public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+};
+
+};  // namespace android
+
+#endif  // ANDROID_ICOMMONCLOCK_H
diff --git a/include/common_time/ICommonTimeConfig.h b/include/common_time/ICommonTimeConfig.h
new file mode 100644
index 0000000..497b666
--- /dev/null
+++ b/include/common_time/ICommonTimeConfig.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_ICOMMONTIMECONFIG_H
+#define ANDROID_ICOMMONTIMECONFIG_H
+
+#include <stdint.h>
+#include <linux/socket.h>
+
+#include <binder/IInterface.h>
+#include <binder/IServiceManager.h>
+
+namespace android {
+
+class String16;
+
+class ICommonTimeConfig : public IInterface {
+  public:
+    DECLARE_META_INTERFACE(CommonTimeConfig);
+
+    // Name of the ICommonTimeConfig service registered with the service
+    // manager.
+    static const String16 kServiceName;
+
+    virtual status_t getMasterElectionPriority(uint8_t *priority) = 0;
+    virtual status_t setMasterElectionPriority(uint8_t priority) = 0;
+    virtual status_t getMasterElectionEndpoint(struct sockaddr_storage *addr) = 0;
+    virtual status_t setMasterElectionEndpoint(const struct sockaddr_storage *addr) = 0;
+    virtual status_t getMasterElectionGroupId(uint64_t *id) = 0;
+    virtual status_t setMasterElectionGroupId(uint64_t id) = 0;
+    virtual status_t getInterfaceBinding(String16& ifaceName) = 0;
+    virtual status_t setInterfaceBinding(const String16& ifaceName) = 0;
+    virtual status_t getMasterAnnounceInterval(int *interval) = 0;
+    virtual status_t setMasterAnnounceInterval(int interval) = 0;
+    virtual status_t getClientSyncInterval(int *interval) = 0;
+    virtual status_t setClientSyncInterval(int interval) = 0;
+    virtual status_t getPanicThreshold(int *threshold) = 0;
+    virtual status_t setPanicThreshold(int threshold) = 0;
+    virtual status_t getAutoDisable(bool *autoDisable) = 0;
+    virtual status_t setAutoDisable(bool autoDisable) = 0;
+    virtual status_t forceNetworklessMasterMode() = 0;
+
+    // Simple helper to make it easier to connect to the CommonTimeConfig service.
+    static inline sp<ICommonTimeConfig> getInstance() {
+        sp<IBinder> binder = defaultServiceManager()->checkService(
+                ICommonTimeConfig::kServiceName);
+        sp<ICommonTimeConfig> clk = interface_cast<ICommonTimeConfig>(binder);
+        return clk;
+    }
+};
+
+class BnCommonTimeConfig : public BnInterface<ICommonTimeConfig> {
+  public:
+    virtual status_t onTransact(uint32_t code, const Parcel& data,
+                                Parcel* reply, uint32_t flags = 0);
+};
+
+};  // namespace android
+
+#endif  // ANDROID_ICOMMONTIMECONFIG_H
diff --git a/include/common_time/cc_helper.h b/include/common_time/cc_helper.h
new file mode 100644
index 0000000..8c4d5c0
--- /dev/null
+++ b/include/common_time/cc_helper.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CC_HELPER_H__
+#define __CC_HELPER_H__
+
+#include <stdint.h>
+#include <common_time/ICommonClock.h>
+#include <utils/threads.h>
+
+namespace android {
+
+// CCHelper is a simple wrapper class to help with centralizing access to the
+// Common Clock service and implementing lifetime managment, as well as to
+// implement a simple policy of making a basic attempt to reconnect to the
+// common clock service when things go wrong.
+//
+// On platforms which run the native common_time service in auto-disable mode,
+// the service will go into networkless mode whenever it has no active clients.
+// It tracks active clients using registered CommonClockListeners (the callback
+// interface for onTimelineChanged) since this provides a convienent death
+// handler notification for when the service's clients die unexpectedly.  This
+// means that users of the common time service should really always have a
+// CommonClockListener, unless they know that the time service is not running in
+// auto disabled mode, or that there is at least one other registered listener
+// active in the system.  The CCHelper makes this a little easier by sharing a
+// ref counted ICommonClock interface across all clients and automatically
+// registering and unregistering a listener whenever there are CCHelper
+// instances active in the process.
+class CCHelper {
+  public:
+    CCHelper();
+    ~CCHelper();
+
+    status_t isCommonTimeValid(bool* valid, uint32_t* timelineID);
+    status_t commonTimeToLocalTime(int64_t commonTime, int64_t* localTime);
+    status_t localTimeToCommonTime(int64_t localTime, int64_t* commonTime);
+    status_t getCommonTime(int64_t* commonTime);
+    status_t getCommonFreq(uint64_t* freq);
+    status_t getLocalTime(int64_t* localTime);
+    status_t getLocalFreq(uint64_t* freq);
+
+  private:
+    class CommonClockListener : public BnCommonClockListener {
+      public:
+        void onTimelineChanged(uint64_t timelineID);
+    };
+
+    static bool verifyClock_l();
+
+    static Mutex lock_;
+    static sp<ICommonClock> common_clock_;
+    static sp<ICommonClockListener> common_clock_listener_;
+    static uint32_t ref_count_;
+};
+
+
+}  // namespace android
+#endif  // __CC_HELPER_H__
diff --git a/include/common_time/local_clock.h b/include/common_time/local_clock.h
new file mode 100644
index 0000000..845d1c2
--- /dev/null
+++ b/include/common_time/local_clock.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __LOCAL_CLOCK_H__
+#define __LOCAL_CLOCK_H__
+
+#include <stdint.h>
+
+#include <hardware/local_time_hal.h>
+#include <utils/Errors.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class LocalClock {
+  public:
+     LocalClock();
+
+    bool initCheck();
+
+    int64_t  getLocalTime();
+    uint64_t getLocalFreq();
+    status_t setLocalSlew(int16_t rate);
+    int32_t  getDebugLog(struct local_time_debug_event* records,
+                         int max_records);
+
+  private:
+    static Mutex dev_lock_;
+    static local_time_hw_device_t* dev_;
+};
+
+}  // namespace android
+#endif  // __LOCAL_CLOCK_H__
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index ca57f9e..437a89c 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -341,7 +341,7 @@
     private:
         friend class AudioRecord;
         virtual bool        threadLoop();
-        virtual status_t    readyToRun() { return NO_ERROR; }
+        virtual status_t    readyToRun();
         virtual void        onFirstRef() {}
         AudioRecord& mReceiver;
     };
@@ -359,7 +359,9 @@
     sp<IAudioRecord>        mAudioRecord;
     sp<IMemory>             mCblkMemory;
     sp<ClientRecordThread>  mClientRecordThread;
+    status_t                mReadyToRun;
     Mutex                   mLock;
+    Condition               mCondition;
 
     uint32_t                mFrameCount;
 
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index da99620..1916ac5 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -131,7 +131,7 @@
         NUM_CONFIG_EVENTS
     };
 
-    // audio output descritor used to cache output configurations in client process to avoid frequent calls
+    // audio output descriptor used to cache output configurations in client process to avoid frequent calls
     // through IAudioFlinger
     class OutputDescriptor {
     public:
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 11db81b..9f2bd3a 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -58,8 +58,8 @@
         EVENT_BUFFER_END = 5        // Playback head is at the end of the buffer.
     };
 
-    /* Create Buffer on the stack and pass it to obtainBuffer()
-     * and releaseBuffer().
+    /* Client should declare Buffer on the stack and pass address to obtainBuffer()
+     * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
      */
 
     class Buffer
@@ -68,12 +68,16 @@
         enum {
             MUTE    = 0x00000001
         };
-        uint32_t    flags;
+        uint32_t    flags;        // 0 or MUTE
         audio_format_t format; // but AUDIO_FORMAT_PCM_8_BIT -> AUDIO_FORMAT_PCM_16_BIT
         // accessed directly by WebKit ANP callback
         int         channelCount; // will be removed in the future, do not use
-        size_t      frameCount;
-        size_t      size;
+
+        size_t      frameCount;   // number of sample frames corresponding to size;
+                                  // on input it is the number of frames desired,
+                                  // on output is the number of frames actually filled
+
+        size_t      size;         // input/output in byte units
         union {
             void*       raw;
             short*      i16;    // signed 16-bit
@@ -84,15 +88,15 @@
 
     /* As a convenience, if a callback is supplied, a handler thread
      * is automatically created with the appropriate priority. This thread
-     * invokes the callback when a new buffer becomes available or an underrun condition occurs.
+     * invokes the callback when a new buffer becomes available or various conditions occur.
      * Parameters:
      *
      * event:   type of event notified (see enum AudioTrack::event_type).
      * user:    Pointer to context for use by the callback receiver.
      * info:    Pointer to optional parameter according to event type:
      *          - EVENT_MORE_DATA: pointer to AudioTrack::Buffer struct. The callback must not write
-     *          more bytes than indicated by 'size' field and update 'size' if less bytes are
-     *          written.
+     *            more bytes than indicated by 'size' field and update 'size' if fewer bytes are
+     *            written.
      *          - EVENT_UNDERRUN: unused.
      *          - EVENT_LOOP_END: pointer to an int indicating the number of loops remaining.
      *          - EVENT_MARKER: pointer to an uint32_t containing the marker position in frames.
@@ -225,7 +229,7 @@
      */
             uint32_t     latency() const;
 
-    /* getters, see constructor */
+    /* getters, see constructors and set() */
 
             audio_stream_type_t streamType() const;
             audio_format_t format() const;
@@ -299,7 +303,6 @@
      *          (loopEnd-loopStart) <= framecount()
      */
             status_t    setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount);
-            status_t    getLoop(uint32_t *loopStart, uint32_t *loopEnd, int *loopCount) const;
 
     /* Sets marker position. When playback reaches the number of frames specified, a callback with
      * event type EVENT_MARKER is called. Calling setMarkerPosition with marker == 0 cancels marker
@@ -401,13 +404,19 @@
             status_t    attachAuxEffect(int effectId);
 
     /* Obtains a buffer of "frameCount" frames. The buffer must be
-     * filled entirely. If the track is stopped, obtainBuffer() returns
+     * filled entirely, and then released with releaseBuffer().
+     * If the track is stopped, obtainBuffer() returns
      * STOPPED instead of NO_ERROR as long as there are buffers available,
      * at which point NO_MORE_BUFFERS is returned.
      * Buffers will be returned until the pool (buffercount())
      * is exhausted, at which point obtainBuffer() will either block
      * or return WOULD_BLOCK depending on the value of the "blocking"
      * parameter.
+     *
+     * Interpretation of waitCount:
+     *  +n  limits wait time to n * WAIT_PERIOD_MS,
+     *  -1  causes an (almost) infinite wait time,
+     *   0  non-blocking.
      */
 
         enum {
@@ -416,12 +425,19 @@
         };
 
             status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount);
+
+    /* Release a filled buffer of "frameCount" frames for AudioFlinger to process. */
             void        releaseBuffer(Buffer* audioBuffer);
 
     /* As a convenience we provide a write() interface to the audio buffer.
-     * This is implemented on top of lockBuffer/unlockBuffer. For best
-     * performance use callbacks. Return actual number of bytes written.
-     *
+     * This is implemented on top of obtainBuffer/releaseBuffer. For best
+     * performance use callbacks. Returns actual number of bytes written >= 0,
+     * or one of the following negative status codes:
+     *      INVALID_OPERATION   AudioTrack is configured for shared buffer mode
+     *      BAD_VALUE           size is invalid
+     *      STOPPED             AudioTrack was stopped during the write
+     *      NO_MORE_BUFFERS     when obtainBuffer() returns same
+     *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
      */
             ssize_t     write(const void* buffer, size_t size);
 
@@ -430,7 +446,7 @@
      */
             status_t dump(int fd, const Vector<String16>& args) const;
 
-private:
+protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
             AudioTrack& operator = (const AudioTrack& other);
@@ -448,6 +464,7 @@
         AudioTrack& mReceiver;
     };
 
+            // body of AudioTrackThread::threadLoop()
             bool processAudioBuffer(const sp<AudioTrackThread>& thread);
             status_t createTrack_l(audio_stream_type_t streamType,
                                  uint32_t sampleRate,
@@ -484,7 +501,7 @@
 
     bool                    mActive;                // protected by mLock
 
-    callback_t              mCbf;
+    callback_t              mCbf;                   // callback handler for events, or NULL
     void*                   mUserData;
     uint32_t                mNotificationFramesReq; // requested number of frames between each notification callback
     uint32_t                mNotificationFramesAct; // actual number of frames between each notification callback
@@ -501,10 +518,33 @@
     int                     mAuxEffectId;
     mutable Mutex           mLock;
     status_t                mRestoreStatus;
+    bool                    mIsTimed;
     int                     mPreviousPriority;          // before start()
     int                     mPreviousSchedulingGroup;
 };
 
+class TimedAudioTrack : public AudioTrack
+{
+public:
+    TimedAudioTrack();
+
+    /* allocate a shared memory buffer that can be passed to queueTimedBuffer */
+    status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer);
+
+    /* queue a buffer obtained via allocateTimedBuffer for playback at the
+       given timestamp.  PTS units a microseconds on the media time timeline.
+       The media time transform (set with setMediaTimeTransform) set by the
+       audio producer will handle converting from media time to local time
+       (perhaps going through the common time timeline in the case of
+       synchronized multiroom audio case) */
+    status_t queueTimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+
+    /* define a transform between media time and either common time or
+       local time */
+    enum TargetTimeline {LOCAL_TIME, COMMON_TIME};
+    status_t setMediaTimeTransform(const LinearTransform& xform,
+                                   TargetTimeline target);
+};
 
 }; // namespace android
 
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index df83995..65c26f4 100644
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -87,7 +87,7 @@
 //    Description:    Creates an effect engine of the specified type and returns an
 //          effect control interface on this engine. The function will allocate the
 //          resources for an instance of the requested effect engine and return
-//          a handler on the effect control interface.
+//          a handle on the effect control interface.
 //
 //    Input:
 //          pEffectUuid:    pointer to the effect uuid.
@@ -115,17 +115,17 @@
 //
 //    Function:       EffectRelease
 //
-//    Description:    Releases the effect engine whose handler is given as argument.
+//    Description:    Releases the effect engine whose handle is given as argument.
 //          All resources allocated to this particular instance of the effect are
 //          released.
 //
 //    Input:
-//          handle:    handler on the effect interface to be released.
+//          handle:    handle on the effect interface to be released.
 //
 //    Output:
 //        returned value:    0          successful operation.
 //                          -ENODEV     factory failed to initialize
-//                          -EINVAL     invalid interface handler
+//                          -EINVAL     invalid interface handle
 //
 ////////////////////////////////////////////////////////////////////////////////
 int EffectRelease(effect_handle_t handle);
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 433ce7c..7a2ada0 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -55,6 +55,7 @@
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status) = 0;
 
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index 46735de..7869020 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -37,8 +37,9 @@
 
     /* After it's created the track is not active. Call start() to
      * make it active. If set, the callback will start being called.
+     * tid identifies the client callback thread, or 0 if not needed.
      */
-    virtual status_t    start() = 0;
+    virtual status_t    start(pid_t tid) = 0;
 
     /* Stop a track. If set, the callback will cease being called and
      * obtainBuffer will return an error. Buffers that are already released 
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index b83e552..77f3e21 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -24,7 +24,7 @@
 #include <utils/Errors.h>
 #include <binder/IInterface.h>
 #include <binder/IMemory.h>
-
+#include <utils/LinearTransform.h>
 
 namespace android {
 
@@ -40,17 +40,18 @@
 
     /* After it's created the track is not active. Call start() to
      * make it active. If set, the callback will start being called.
+     * tid identifies the client callback thread, or 0 if not needed.
      */
-    virtual status_t    start() = 0;
+    virtual status_t    start(pid_t tid) = 0;
 
     /* Stop a track. If set, the callback will cease being called and
      * obtainBuffer will return an error. Buffers that are already released 
-     * will be processed, unless flush() is called.
+     * will continue to be processed, unless/until flush() is called.
      */
     virtual void        stop() = 0;
 
-    /* Flush a stopped track. All pending buffers are discarded.
-     * This function has no effect if the track is not stopped.
+    /* Flush a stopped or paused track. All pending/released buffers are discarded.
+     * This function has no effect if the track is not stopped or paused.
      */
     virtual void        flush() = 0;
 
@@ -61,7 +62,7 @@
     
     /* Pause a track. If set, the callback will cease being called and
      * obtainBuffer will return an error. Buffers that are already released 
-     * will be processed, unless flush() is called.
+     * will continue to be processed, unless/until flush() is called.
      */
     virtual void        pause() = 0;
 
@@ -70,6 +71,23 @@
      */
     virtual status_t    attachAuxEffect(int effectId) = 0;
 
+
+    /* Allocate a shared memory buffer suitable for holding timed audio
+       samples */
+    virtual status_t    allocateTimedBuffer(size_t size,
+                                            sp<IMemory>* buffer) = 0;
+
+    /* Queue a buffer obtained via allocateTimedBuffer for playback at the given
+       timestamp */
+    virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                         int64_t pts) = 0;
+
+    /* Define the linear transform that will be applied to the timestamps
+       given to queueTimedBuffer (which are expressed in media time).
+       Target specifies whether this transform converts media time to local time
+       or Tungsten time. The values for target are defined in AudioTrack.h */
+    virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                              int target) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 77c82b2..23a3e49 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -46,6 +46,9 @@
     // The shared library with the test player is passed passed as an
     // argument to the 'test:' url in the setDataSource call.
     TEST_PLAYER = 5,
+
+    AAH_RX_PLAYER = 100,
+    AAH_TX_PLAYER = 101,
 };
 
 
diff --git a/include/media/MemoryLeakTrackUtil.h b/include/media/MemoryLeakTrackUtil.h
index 290b748..ac0f6b2 100644
--- a/include/media/MemoryLeakTrackUtil.h
+++ b/include/media/MemoryLeakTrackUtil.h
@@ -19,7 +19,7 @@
 
 namespace android {
 /*
- * Dump the memory adddress of the calling process to the given fd.
+ * Dump the memory address of the calling process to the given fd.
  */
 extern void dumpMemoryAddresses(int fd);
 
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 23226c0..af2db93 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -62,16 +62,23 @@
     // are in the same line of data cache.
                 Mutex       lock;           // sizeof(int)
                 Condition   cv;             // sizeof(int)
+
+                // next 4 are offsets within "buffers"
     volatile    uint32_t    user;
     volatile    uint32_t    server;
                 uint32_t    userBase;
                 uint32_t    serverBase;
+
+                // if there is a shared buffer, "buffers" is the value of pointer() for the shared
+                // buffer, otherwise "buffers" points immediately after the control block
                 void*       buffers;
                 uint32_t    frameCount;
+
                 // Cache line boundary
+
                 uint32_t    loopStart;
-                uint32_t    loopEnd;
-                int         loopCount;
+                uint32_t    loopEnd;        // read-only for server, read/write for client
+                int         loopCount;      // read/write for client
 
                 // Channel volumes are fixed point U4.12, so 0x1000 means 1.0.
                 // Left channel is in [0:15], right channel is in [16:31].
@@ -82,29 +89,39 @@
 public:
 
                 uint32_t    sampleRate;
+
                 // NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for
                 // 8 bit PCM data: in this case,  mCblk->frameSize is based on a sample size of
                 // 16 bit because data is converted to 16 bit before being stored in buffer
 
+                // read-only for client, server writes once at initialization and is then read-only
                 uint8_t     frameSize;       // would normally be size_t, but 8 bits is plenty
+
+                // never used
                 uint8_t     pad1;
+
+                // used by client only
                 uint16_t    bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger
 
-                uint16_t    waitTimeMs;      // Cumulated wait time
+                uint16_t    waitTimeMs;      // Cumulated wait time, used by client only
 private:
+                // client write-only, server read-only
                 uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
 public:
     volatile    int32_t     flags;
 
                 // Cache line boundary (32 bytes)
 
+                // Since the control block is always located in shared memory, this constructor
+                // is only used for placement new().  It is never used for regular new() or stack.
                             audio_track_cblk_t();
-                uint32_t    stepUser(uint32_t frameCount);
-                bool        stepServer(uint32_t frameCount);
+                uint32_t    stepUser(uint32_t frameCount);      // called by client only, where
+                // client includes regular AudioTrack and AudioFlinger::PlaybackThread::OutputTrack
+                bool        stepServer(uint32_t frameCount);    // called by server only
                 void*       buffer(uint32_t offset) const;
                 uint32_t    framesAvailable();
                 uint32_t    framesAvailable_l();
-                uint32_t    framesReady();
+                uint32_t    framesReady();                      // called by server only
                 bool        tryLock();
 
                 // No barriers on the following operations, so the ordering of loads/stores
diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk
new file mode 100644
index 0000000..526f17b
--- /dev/null
+++ b/media/common_time/Android.mk
@@ -0,0 +1,21 @@
+LOCAL_PATH:= $(call my-dir)
+#
+# libcommon_time_client
+# (binder marshalers for ICommonClock as well as common clock and local clock
+# helper code)
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libcommon_time_client
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := cc_helper.cpp \
+                   local_clock.cpp \
+                   ICommonClock.cpp \
+                   ICommonTimeConfig.cpp \
+                   utils.cpp
+LOCAL_SHARED_LIBRARIES := libbinder \
+                          libhardware \
+                          libutils
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/common_time/ICommonClock.cpp b/media/common_time/ICommonClock.cpp
new file mode 100644
index 0000000..28b43ac
--- /dev/null
+++ b/media/common_time/ICommonClock.cpp
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <linux/socket.h>
+
+#include <common_time/ICommonClock.h>
+#include <binder/Parcel.h>
+
+#include "utils.h"
+
+namespace android {
+
+/***** ICommonClock *****/
+
+enum {
+    IS_COMMON_TIME_VALID = IBinder::FIRST_CALL_TRANSACTION,
+    COMMON_TIME_TO_LOCAL_TIME,
+    LOCAL_TIME_TO_COMMON_TIME,
+    GET_COMMON_TIME,
+    GET_COMMON_FREQ,
+    GET_LOCAL_TIME,
+    GET_LOCAL_FREQ,
+    GET_ESTIMATED_ERROR,
+    GET_TIMELINE_ID,
+    GET_STATE,
+    GET_MASTER_ADDRESS,
+    REGISTER_LISTENER,
+    UNREGISTER_LISTENER,
+};
+
+const String16 ICommonClock::kServiceName("common_time.clock");
+const uint64_t ICommonClock::kInvalidTimelineID = 0;
+const int32_t ICommonClock::kErrorEstimateUnknown = 0x7FFFFFFF;
+
+class BpCommonClock : public BpInterface<ICommonClock>
+{
+  public:
+    BpCommonClock(const sp<IBinder>& impl)
+        : BpInterface<ICommonClock>(impl) {}
+
+    virtual status_t isCommonTimeValid(bool* valid, uint32_t* timelineID) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(IS_COMMON_TIME_VALID,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *valid = reply.readInt32();
+                *timelineID = reply.readInt32();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t commonTimeToLocalTime(int64_t commonTime,
+            int64_t* localTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeInt64(commonTime);
+        status_t status = remote()->transact(COMMON_TIME_TO_LOCAL_TIME,
+                data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *localTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t localTimeToCommonTime(int64_t localTime,
+            int64_t* commonTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeInt64(localTime);
+        status_t status = remote()->transact(LOCAL_TIME_TO_COMMON_TIME,
+                data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *commonTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getCommonTime(int64_t* commonTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_COMMON_TIME, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *commonTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getCommonFreq(uint64_t* freq) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_COMMON_FREQ, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *freq = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getLocalTime(int64_t* localTime) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_LOCAL_TIME, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *localTime = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getLocalFreq(uint64_t* freq) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_LOCAL_FREQ, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *freq = reply.readInt64();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getEstimatedError(int32_t* estimate) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_ESTIMATED_ERROR, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *estimate = reply.readInt32();
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getTimelineID(uint64_t* id) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_TIMELINE_ID, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *id = static_cast<uint64_t>(reply.readInt64());
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getState(State* state) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_STATE, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *state = static_cast<State>(reply.readInt32());
+            }
+        }
+        return status;
+    }
+
+    virtual status_t getMasterAddr(struct sockaddr_storage* addr) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ADDRESS, data, &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK)
+                deserializeSockaddr(&reply, addr);
+        }
+        return status;
+    }
+
+    virtual status_t registerListener(
+            const sp<ICommonClockListener>& listener) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeStrongBinder(listener->asBinder());
+
+        status_t status = remote()->transact(REGISTER_LISTENER, data, &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t unregisterListener(
+            const sp<ICommonClockListener>& listener) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor());
+        data.writeStrongBinder(listener->asBinder());
+        status_t status = remote()->transact(UNREGISTER_LISTENER, data, &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(CommonClock, "android.os.ICommonClock");
+
+status_t BnCommonClock::onTransact(uint32_t code,
+                                   const Parcel& data,
+                                   Parcel* reply,
+                                   uint32_t flags) {
+    switch(code) {
+        case IS_COMMON_TIME_VALID: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            bool valid;
+            uint32_t timelineID;
+            status_t status = isCommonTimeValid(&valid, &timelineID);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(valid);
+                reply->writeInt32(timelineID);
+            }
+            return OK;
+        } break;
+
+        case COMMON_TIME_TO_LOCAL_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t commonTime = data.readInt64();
+            int64_t localTime;
+            status_t status = commonTimeToLocalTime(commonTime, &localTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(localTime);
+            }
+            return OK;
+        } break;
+
+        case LOCAL_TIME_TO_COMMON_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t localTime = data.readInt64();
+            int64_t commonTime;
+            status_t status = localTimeToCommonTime(localTime, &commonTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(commonTime);
+            }
+            return OK;
+        } break;
+
+        case GET_COMMON_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t commonTime;
+            status_t status = getCommonTime(&commonTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(commonTime);
+            }
+            return OK;
+        } break;
+
+        case GET_COMMON_FREQ: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            uint64_t freq;
+            status_t status = getCommonFreq(&freq);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(freq);
+            }
+            return OK;
+        } break;
+
+        case GET_LOCAL_TIME: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int64_t localTime;
+            status_t status = getLocalTime(&localTime);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(localTime);
+            }
+            return OK;
+        } break;
+
+        case GET_LOCAL_FREQ: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            uint64_t freq;
+            status_t status = getLocalFreq(&freq);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(freq);
+            }
+            return OK;
+        } break;
+
+        case GET_ESTIMATED_ERROR: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            int32_t error;
+            status_t status = getEstimatedError(&error);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(error);
+            }
+            return OK;
+        } break;
+
+        case GET_TIMELINE_ID: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            uint64_t id;
+            status_t status = getTimelineID(&id);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(static_cast<int64_t>(id));
+            }
+            return OK;
+        } break;
+
+        case GET_STATE: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            State state;
+            status_t status = getState(&state);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(static_cast<int32_t>(state));
+            }
+            return OK;
+        } break;
+
+        case GET_MASTER_ADDRESS: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            struct sockaddr_storage addr;
+            status_t status = getMasterAddr(&addr);
+
+            if ((status == OK) && !canSerializeSockaddr(&addr)) {
+                status = UNKNOWN_ERROR;
+            }
+
+            reply->writeInt32(status);
+
+            if (status == OK) {
+                serializeSockaddr(reply, &addr);
+            }
+
+            return OK;
+        } break;
+
+        case REGISTER_LISTENER: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            sp<ICommonClockListener> listener =
+                interface_cast<ICommonClockListener>(data.readStrongBinder());
+            status_t status = registerListener(listener);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case UNREGISTER_LISTENER: {
+            CHECK_INTERFACE(ICommonClock, data, reply);
+            sp<ICommonClockListener> listener =
+                interface_cast<ICommonClockListener>(data.readStrongBinder());
+            status_t status = unregisterListener(listener);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+    }
+    return BBinder::onTransact(code, data, reply, flags);
+}
+
+/***** ICommonClockListener *****/
+
+enum {
+    ON_TIMELINE_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+class BpCommonClockListener : public BpInterface<ICommonClockListener>
+{
+  public:
+    BpCommonClockListener(const sp<IBinder>& impl)
+        : BpInterface<ICommonClockListener>(impl) {}
+
+    virtual void onTimelineChanged(uint64_t timelineID) {
+        Parcel data, reply;
+        data.writeInterfaceToken(
+                ICommonClockListener::getInterfaceDescriptor());
+        data.writeInt64(timelineID);
+        remote()->transact(ON_TIMELINE_CHANGED, data, &reply);
+    }
+};
+
+IMPLEMENT_META_INTERFACE(CommonClockListener,
+                         "android.os.ICommonClockListener");
+
+status_t BnCommonClockListener::onTransact(
+        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+    switch(code) {
+        case ON_TIMELINE_CHANGED: {
+            CHECK_INTERFACE(ICommonClockListener, data, reply);
+            uint32_t timelineID = data.readInt64();
+            onTimelineChanged(timelineID);
+            return NO_ERROR;
+        } break;
+    }
+
+    return BBinder::onTransact(code, data, reply, flags);
+}
+
+}; // namespace android
diff --git a/media/common_time/ICommonTimeConfig.cpp b/media/common_time/ICommonTimeConfig.cpp
new file mode 100644
index 0000000..8eb37cb
--- /dev/null
+++ b/media/common_time/ICommonTimeConfig.cpp
@@ -0,0 +1,508 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <linux/socket.h>
+
+#include <common_time/ICommonTimeConfig.h>
+#include <binder/Parcel.h>
+
+#include "utils.h"
+
+namespace android {
+
+/***** ICommonTimeConfig *****/
+
+enum {
+    GET_MASTER_ELECTION_PRIORITY = IBinder::FIRST_CALL_TRANSACTION,
+    SET_MASTER_ELECTION_PRIORITY,
+    GET_MASTER_ELECTION_ENDPOINT,
+    SET_MASTER_ELECTION_ENDPOINT,
+    GET_MASTER_ELECTION_GROUP_ID,
+    SET_MASTER_ELECTION_GROUP_ID,
+    GET_INTERFACE_BINDING,
+    SET_INTERFACE_BINDING,
+    GET_MASTER_ANNOUNCE_INTERVAL,
+    SET_MASTER_ANNOUNCE_INTERVAL,
+    GET_CLIENT_SYNC_INTERVAL,
+    SET_CLIENT_SYNC_INTERVAL,
+    GET_PANIC_THRESHOLD,
+    SET_PANIC_THRESHOLD,
+    GET_AUTO_DISABLE,
+    SET_AUTO_DISABLE,
+    FORCE_NETWORKLESS_MASTER_MODE,
+};
+
+const String16 ICommonTimeConfig::kServiceName("common_time.config");
+
+class BpCommonTimeConfig : public BpInterface<ICommonTimeConfig>
+{
+  public:
+    BpCommonTimeConfig(const sp<IBinder>& impl)
+        : BpInterface<ICommonTimeConfig>(impl) {}
+
+    virtual status_t getMasterElectionPriority(uint8_t *priority) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ELECTION_PRIORITY,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *priority = static_cast<uint8_t>(reply.readInt32());
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterElectionPriority(uint8_t priority) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(static_cast<int32_t>(priority));
+        status_t status = remote()->transact(SET_MASTER_ELECTION_PRIORITY,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getMasterElectionEndpoint(struct sockaddr_storage *addr) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ELECTION_ENDPOINT,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                deserializeSockaddr(&reply, addr);
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterElectionEndpoint(
+            const struct sockaddr_storage *addr) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        if (!canSerializeSockaddr(addr))
+            return BAD_VALUE;
+        if (NULL == addr) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            serializeSockaddr(&data, addr);
+        }
+        status_t status = remote()->transact(SET_MASTER_ELECTION_ENDPOINT,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getMasterElectionGroupId(uint64_t *id) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ELECTION_GROUP_ID,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *id = static_cast<uint64_t>(reply.readInt64());
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterElectionGroupId(uint64_t id) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt64(id);
+        status_t status = remote()->transact(SET_MASTER_ELECTION_GROUP_ID,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getInterfaceBinding(String16& ifaceName) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_INTERFACE_BINDING,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                ifaceName = reply.readString16();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setInterfaceBinding(const String16& ifaceName) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeString16(ifaceName);
+        status_t status = remote()->transact(SET_INTERFACE_BINDING,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getMasterAnnounceInterval(int *interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MASTER_ANNOUNCE_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *interval = reply.readInt32();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setMasterAnnounceInterval(int interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(interval);
+        status_t status = remote()->transact(SET_MASTER_ANNOUNCE_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getClientSyncInterval(int *interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_CLIENT_SYNC_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *interval = reply.readInt32();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setClientSyncInterval(int interval) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(interval);
+        status_t status = remote()->transact(SET_CLIENT_SYNC_INTERVAL,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getPanicThreshold(int *threshold) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_PANIC_THRESHOLD,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *threshold = reply.readInt32();
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setPanicThreshold(int threshold) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(threshold);
+        status_t status = remote()->transact(SET_PANIC_THRESHOLD,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t getAutoDisable(bool *autoDisable) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_AUTO_DISABLE,
+                                             data,
+                                             &reply);
+        if (status == OK) {
+            status = reply.readInt32();
+            if (status == OK) {
+                *autoDisable = (0 != reply.readInt32());
+            }
+        }
+
+        return status;
+    }
+
+    virtual status_t setAutoDisable(bool autoDisable) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        data.writeInt32(autoDisable ? 1 : 0);
+        status_t status = remote()->transact(SET_AUTO_DISABLE,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+
+    virtual status_t forceNetworklessMasterMode() {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor());
+        status_t status = remote()->transact(FORCE_NETWORKLESS_MASTER_MODE,
+                                             data,
+                                             &reply);
+
+        if (status == OK) {
+            status = reply.readInt32();
+        }
+
+        return status;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(CommonTimeConfig, "android.os.ICommonTimeConfig");
+
+status_t BnCommonTimeConfig::onTransact(uint32_t code,
+                                   const Parcel& data,
+                                   Parcel* reply,
+                                   uint32_t flags) {
+    switch(code) {
+        case GET_MASTER_ELECTION_PRIORITY: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint8_t priority;
+            status_t status = getMasterElectionPriority(&priority);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(static_cast<int32_t>(priority));
+            }
+            return OK;
+        } break;
+
+        case SET_MASTER_ELECTION_PRIORITY: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint8_t priority = static_cast<uint8_t>(data.readInt32());
+            status_t status = setMasterElectionPriority(priority);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_MASTER_ELECTION_ENDPOINT: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            struct sockaddr_storage addr;
+            status_t status = getMasterElectionEndpoint(&addr);
+
+            if ((status == OK) && !canSerializeSockaddr(&addr)) {
+                status = UNKNOWN_ERROR;
+            }
+
+            reply->writeInt32(status);
+
+            if (status == OK) {
+                serializeSockaddr(reply, &addr);
+            }
+
+            return OK;
+        } break;
+
+        case SET_MASTER_ELECTION_ENDPOINT: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            struct sockaddr_storage addr;
+            int hasAddr = data.readInt32();
+
+            status_t status;
+            if (hasAddr) {
+                deserializeSockaddr(&data, &addr);
+                status = setMasterElectionEndpoint(&addr);
+            } else {
+                status = setMasterElectionEndpoint(&addr);
+            }
+
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_MASTER_ELECTION_GROUP_ID: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint64_t id;
+            status_t status = getMasterElectionGroupId(&id);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt64(id);
+            }
+            return OK;
+        } break;
+
+        case SET_MASTER_ELECTION_GROUP_ID: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            uint64_t id = static_cast<uint64_t>(data.readInt64());
+            status_t status = setMasterElectionGroupId(id);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_INTERFACE_BINDING: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            String16 ret;
+            status_t status = getInterfaceBinding(ret);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeString16(ret);
+            }
+            return OK;
+        } break;
+
+        case SET_INTERFACE_BINDING: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            String16 ifaceName;
+            ifaceName = data.readString16();
+            status_t status = setInterfaceBinding(ifaceName);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_MASTER_ANNOUNCE_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval;
+            status_t status = getMasterAnnounceInterval(&interval);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(interval);
+            }
+            return OK;
+        } break;
+
+        case SET_MASTER_ANNOUNCE_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval = data.readInt32();
+            status_t status = setMasterAnnounceInterval(interval);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_CLIENT_SYNC_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval;
+            status_t status = getClientSyncInterval(&interval);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(interval);
+            }
+            return OK;
+        } break;
+
+        case SET_CLIENT_SYNC_INTERVAL: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int interval = data.readInt32();
+            status_t status = setClientSyncInterval(interval);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_PANIC_THRESHOLD: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int threshold;
+            status_t status = getPanicThreshold(&threshold);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(threshold);
+            }
+            return OK;
+        } break;
+
+        case SET_PANIC_THRESHOLD: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            int threshold = data.readInt32();
+            status_t status = setPanicThreshold(threshold);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case GET_AUTO_DISABLE: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            bool autoDisable;
+            status_t status = getAutoDisable(&autoDisable);
+            reply->writeInt32(status);
+            if (status == OK) {
+                reply->writeInt32(autoDisable ? 1 : 0);
+            }
+            return OK;
+        } break;
+
+        case SET_AUTO_DISABLE: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            bool autoDisable = (0 != data.readInt32());
+            status_t status = setAutoDisable(autoDisable);
+            reply->writeInt32(status);
+            return OK;
+        } break;
+
+        case FORCE_NETWORKLESS_MASTER_MODE: {
+            CHECK_INTERFACE(ICommonTimeConfig, data, reply);
+            status_t status = forceNetworklessMasterMode();
+            reply->writeInt32(status);
+            return OK;
+        } break;
+    }
+    return BBinder::onTransact(code, data, reply, flags);
+}
+
+}; // namespace android
+
diff --git a/media/common_time/cc_helper.cpp b/media/common_time/cc_helper.cpp
new file mode 100644
index 0000000..8d8556c
--- /dev/null
+++ b/media/common_time/cc_helper.cpp
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <common_time/cc_helper.h>
+#include <common_time/ICommonClock.h>
+#include <utils/threads.h>
+
+namespace android {
+
+Mutex CCHelper::lock_;
+sp<ICommonClock> CCHelper::common_clock_;
+sp<ICommonClockListener> CCHelper::common_clock_listener_;
+uint32_t CCHelper::ref_count_ = 0;
+
+bool CCHelper::verifyClock_l() {
+    bool ret = false;
+
+    if (common_clock_ == NULL) {
+        common_clock_ = ICommonClock::getInstance();
+        if (common_clock_ == NULL)
+            goto bailout;
+    }
+
+    if (ref_count_ > 0) {
+        if (common_clock_listener_ == NULL) {
+            common_clock_listener_ = new CommonClockListener();
+            if (common_clock_listener_ == NULL)
+                goto bailout;
+
+            if (OK != common_clock_->registerListener(common_clock_listener_))
+                goto bailout;
+        }
+    }
+
+    ret = true;
+
+bailout:
+    if (!ret) {
+        common_clock_listener_ = NULL;
+        common_clock_ = NULL;
+    }
+    return ret;
+}
+
+CCHelper::CCHelper() {
+    Mutex::Autolock lock(&lock_);
+    ref_count_++;
+    verifyClock_l();
+}
+
+CCHelper::~CCHelper() {
+    Mutex::Autolock lock(&lock_);
+
+    assert(ref_count_ > 0);
+    ref_count_--;
+
+    // If we were the last CCHelper instance in the system, and we had
+    // previously register a listener, unregister it now so that the common time
+    // service has the chance to go into auto-disabled mode.
+    if (!ref_count_ &&
+       (common_clock_ != NULL) &&
+       (common_clock_listener_ != NULL)) {
+        common_clock_->unregisterListener(common_clock_listener_);
+        common_clock_listener_ = NULL;
+    }
+}
+
+void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID) {
+    // do nothing; listener is only really used as a token so the server can
+    // find out when clients die.
+}
+
+// Helper methods which attempts to make calls to the common time binder
+// service.  If the first attempt fails with DEAD_OBJECT, the helpers will
+// attempt to make a connection to the service again (assuming that the process
+// hosting the service had crashed and the client proxy we are holding is dead)
+// If the second attempt fails, or no connection can be made, the we let the
+// error propagate up the stack and let the caller deal with the situation as
+// best they can.
+#define CCHELPER_METHOD(decl, call)                 \
+    status_t CCHelper::decl {                       \
+        Mutex::Autolock lock(&lock_);               \
+                                                    \
+        if (!verifyClock_l())                       \
+            return DEAD_OBJECT;                     \
+                                                    \
+        status_t status = common_clock_->call;      \
+        if (DEAD_OBJECT == status) {                \
+            if (!verifyClock_l())                   \
+                return DEAD_OBJECT;                 \
+            status = common_clock_->call;           \
+        }                                           \
+                                                    \
+        return status;                              \
+    }
+
+#define VERIFY_CLOCK()
+
+CCHELPER_METHOD(isCommonTimeValid(bool* valid, uint32_t* timelineID),
+                isCommonTimeValid(valid, timelineID))
+CCHELPER_METHOD(commonTimeToLocalTime(int64_t commonTime, int64_t* localTime),
+                commonTimeToLocalTime(commonTime, localTime))
+CCHELPER_METHOD(localTimeToCommonTime(int64_t localTime, int64_t* commonTime),
+                localTimeToCommonTime(localTime, commonTime))
+CCHELPER_METHOD(getCommonTime(int64_t* commonTime),
+                getCommonTime(commonTime))
+CCHELPER_METHOD(getCommonFreq(uint64_t* freq),
+                getCommonFreq(freq))
+CCHELPER_METHOD(getLocalTime(int64_t* localTime),
+                getLocalTime(localTime))
+CCHELPER_METHOD(getLocalFreq(uint64_t* freq),
+                getLocalFreq(freq))
+
+}  // namespace android
diff --git a/media/common_time/local_clock.cpp b/media/common_time/local_clock.cpp
new file mode 100644
index 0000000..a7c61fc
--- /dev/null
+++ b/media/common_time/local_clock.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "common_time"
+#include <utils/Log.h>
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <common_time/local_clock.h>
+#include <hardware/hardware.h>
+#include <hardware/local_time_hal.h>
+#include <utils/Errors.h>
+#include <utils/threads.h>
+
+namespace android {
+
+Mutex LocalClock::dev_lock_;
+local_time_hw_device_t* LocalClock::dev_ = NULL;
+
+LocalClock::LocalClock() {
+    int res;
+    const hw_module_t* mod;
+
+    AutoMutex lock(&dev_lock_);
+
+    if (dev_ != NULL)
+        return;
+
+    res = hw_get_module_by_class(LOCAL_TIME_HARDWARE_MODULE_ID, NULL, &mod);
+    if (res) {
+        ALOGE("Failed to open local time HAL module (res = %d)", res);
+    } else {
+        res = local_time_hw_device_open(mod, &dev_);
+        if (res) {
+            ALOGE("Failed to open local time HAL device (res = %d)", res);
+            dev_ = NULL;
+        }
+    }
+}
+
+bool LocalClock::initCheck() {
+    return (NULL != dev_);
+}
+
+int64_t LocalClock::getLocalTime() {
+    assert(NULL != dev_);
+    assert(NULL != dev_->get_local_time);
+
+    return dev_->get_local_time(dev_);
+}
+
+uint64_t LocalClock::getLocalFreq() {
+    assert(NULL != dev_);
+    assert(NULL != dev_->get_local_freq);
+
+    return dev_->get_local_freq(dev_);
+}
+
+status_t LocalClock::setLocalSlew(int16_t rate) {
+    assert(NULL != dev_);
+
+    if (!dev_->set_local_slew)
+        return INVALID_OPERATION;
+
+    return static_cast<status_t>(dev_->set_local_slew(dev_, rate));
+}
+
+int32_t LocalClock::getDebugLog(struct local_time_debug_event* records,
+                                int max_records) {
+    assert(NULL != dev_);
+
+    if (!dev_->get_debug_log)
+        return INVALID_OPERATION;
+
+    return dev_->get_debug_log(dev_, records, max_records);
+}
+
+}  // namespace android
diff --git a/media/common_time/utils.cpp b/media/common_time/utils.cpp
new file mode 100644
index 0000000..6539171
--- /dev/null
+++ b/media/common_time/utils.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <arpa/inet.h>
+#include <linux/socket.h>
+
+#include <binder/Parcel.h>
+
+namespace android {
+
+bool canSerializeSockaddr(const struct sockaddr_storage* addr) {
+    switch (addr->ss_family) {
+        case AF_INET:
+        case AF_INET6:
+            return true;
+        default:
+            return false;
+    }
+}
+
+void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr) {
+    switch (addr->ss_family) {
+        case AF_INET: {
+            const struct sockaddr_in* s =
+                reinterpret_cast<const struct sockaddr_in*>(addr);
+            p->writeInt32(AF_INET);
+            p->writeInt32(ntohl(s->sin_addr.s_addr));
+            p->writeInt32(static_cast<int32_t>(ntohs(s->sin_port)));
+        } break;
+
+        case AF_INET6: {
+            const struct sockaddr_in6* s =
+                reinterpret_cast<const struct sockaddr_in6*>(addr);
+            const int32_t* a =
+                reinterpret_cast<const int32_t*>(s->sin6_addr.s6_addr);
+            p->writeInt32(AF_INET6);
+            p->writeInt32(ntohl(a[0]));
+            p->writeInt32(ntohl(a[1]));
+            p->writeInt32(ntohl(a[2]));
+            p->writeInt32(ntohl(a[3]));
+            p->writeInt32(static_cast<int32_t>(ntohs(s->sin6_port)));
+            p->writeInt32(ntohl(s->sin6_flowinfo));
+            p->writeInt32(ntohl(s->sin6_scope_id));
+        } break;
+    }
+}
+
+void deserializeSockaddr(const Parcel* p, struct sockaddr_storage* addr) {
+    memset(addr, 0, sizeof(addr));
+
+    addr->ss_family = p->readInt32();
+    switch(addr->ss_family) {
+        case AF_INET: {
+            struct sockaddr_in* s =
+                reinterpret_cast<struct sockaddr_in*>(addr);
+            s->sin_addr.s_addr = htonl(p->readInt32());
+            s->sin_port = htons(static_cast<uint16_t>(p->readInt32()));
+        } break;
+
+        case AF_INET6: {
+            struct sockaddr_in6* s =
+                reinterpret_cast<struct sockaddr_in6*>(addr);
+            int32_t* a = reinterpret_cast<int32_t*>(s->sin6_addr.s6_addr);
+
+            a[0] = htonl(p->readInt32());
+            a[1] = htonl(p->readInt32());
+            a[2] = htonl(p->readInt32());
+            a[3] = htonl(p->readInt32());
+            s->sin6_port = htons(static_cast<uint16_t>(p->readInt32()));
+            s->sin6_flowinfo = htonl(p->readInt32());
+            s->sin6_scope_id = htonl(p->readInt32());
+        } break;
+    }
+}
+
+}  // namespace android
diff --git a/media/common_time/utils.h b/media/common_time/utils.h
new file mode 100644
index 0000000..ce79d0d
--- /dev/null
+++ b/media/common_time/utils.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_LIBCOMMONCLOCK_UTILS_H
+#define ANDROID_LIBCOMMONCLOCK_UTILS_H
+
+#include <linux/socket.h>
+
+#include <binder/Parcel.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+extern bool canSerializeSockaddr(const struct sockaddr_storage* addr);
+extern void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr);
+extern status_t deserializeSockaddr(const Parcel* p,
+                                    struct sockaddr_storage* addr);
+
+};  // namespace android
+
+#endif  // ANDROID_LIBCOMMONCLOCK_UTILS_H
diff --git a/media/libaah_rtp/Android.mk b/media/libaah_rtp/Android.mk
new file mode 100644
index 0000000..54fd9ec
--- /dev/null
+++ b/media/libaah_rtp/Android.mk
@@ -0,0 +1,40 @@
+LOCAL_PATH:= $(call my-dir)
+#
+# libaah_rtp
+#
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libaah_rtp
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := \
+    aah_decoder_pump.cpp \
+    aah_rx_player.cpp \
+    aah_rx_player_core.cpp \
+    aah_rx_player_ring_buffer.cpp \
+    aah_rx_player_substream.cpp \
+    aah_tx_packet.cpp \
+    aah_tx_player.cpp \
+    aah_tx_sender.cpp \
+    pipe_event.cpp
+
+LOCAL_C_INCLUDES := \
+    frameworks/base/include \
+    frameworks/base/include/media/stagefright/openmax \
+    frameworks/base/media \
+    frameworks/base/media/libstagefright
+
+LOCAL_SHARED_LIBRARIES := \
+    libcommon_time_client \
+    libbinder \
+    libmedia \
+    libstagefright \
+    libstagefright_foundation \
+    libutils
+
+LOCAL_LDLIBS := \
+    -lpthread
+
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/media/libaah_rtp/aah_decoder_pump.cpp b/media/libaah_rtp/aah_decoder_pump.cpp
new file mode 100644
index 0000000..72fe43b
--- /dev/null
+++ b/media/libaah_rtp/aah_decoder_pump.cpp
@@ -0,0 +1,520 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <poll.h>
+#include <pthread.h>
+
+#include <common_time/cc_helper.h>
+#include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
+#include <utils/Timers.h>
+#include <utils/threads.h>
+
+#include "aah_decoder_pump.h"
+
+namespace android {
+
+static const long long kLongDecodeErrorThreshold = 1000000ll;
+static const uint32_t kMaxLongErrorsBeforeFatal = 3;
+static const uint32_t kMaxErrorsBeforeFatal = 60;
+
+AAH_DecoderPump::AAH_DecoderPump(OMXClient& omx)
+    : omx_(omx)
+    , thread_status_(OK)
+    , renderer_(NULL)
+    , last_queued_pts_valid_(false)
+    , last_queued_pts_(0)
+    , last_ts_transform_valid_(false)
+    , last_volume_(0xFF) {
+    thread_ = new ThreadWrapper(this);
+}
+
+AAH_DecoderPump::~AAH_DecoderPump() {
+    shutdown();
+}
+
+status_t AAH_DecoderPump::initCheck() {
+    if (thread_ == NULL) {
+        ALOGE("Failed to allocate thread");
+        return NO_MEMORY;
+    }
+
+    return OK;
+}
+
+status_t AAH_DecoderPump::queueForDecode(MediaBuffer* buf) {
+    if (NULL == buf) {
+        return BAD_VALUE;
+    }
+
+    if (OK != thread_status_) {
+        return thread_status_;
+    }
+
+    {   // Explicit scope for AutoMutex pattern.
+        AutoMutex lock(&thread_lock_);
+        in_queue_.push_back(buf);
+    }
+
+    thread_cond_.signal();
+
+    return OK;
+}
+
+void AAH_DecoderPump::queueToRenderer(MediaBuffer* decoded_sample) {
+    Mutex::Autolock lock(&render_lock_);
+    sp<MetaData> meta;
+    int64_t ts;
+    status_t res;
+
+    // Fetch the metadata and make sure the sample has a timestamp.  We
+    // cannot render samples which are missing PTSs.
+    meta = decoded_sample->meta_data();
+    if ((meta == NULL) || (!meta->findInt64(kKeyTime, &ts))) {
+        ALOGV("Decoded sample missing timestamp, cannot render.");
+        CHECK(false);
+    } else {
+        // If we currently are not holding on to a renderer, go ahead and
+        // make one now.
+        if (NULL == renderer_) {
+            renderer_ = new TimedAudioTrack();
+            if (NULL != renderer_) {
+                int frameCount;
+                AudioTrack::getMinFrameCount(&frameCount,
+                        AUDIO_STREAM_DEFAULT,
+                        static_cast<int>(format_sample_rate_));
+                int ch_format = (format_channels_ == 1)
+                    ? AUDIO_CHANNEL_OUT_MONO
+                    : AUDIO_CHANNEL_OUT_STEREO;
+
+                res = renderer_->set(AUDIO_STREAM_DEFAULT,
+                        format_sample_rate_,
+                        AUDIO_FORMAT_PCM_16_BIT,
+                        ch_format,
+                        frameCount);
+                if (res != OK) {
+                    ALOGE("Failed to setup audio renderer. (res = %d)", res);
+                    delete renderer_;
+                    renderer_ = NULL;
+                } else {
+                    CHECK(last_ts_transform_valid_);
+
+                    res = renderer_->setMediaTimeTransform(
+                            last_ts_transform_, TimedAudioTrack::COMMON_TIME);
+                    if (res != NO_ERROR) {
+                        ALOGE("Failed to set media time transform on AudioTrack"
+                              " (res = %d)", res);
+                        delete renderer_;
+                        renderer_ = NULL;
+                    } else {
+                        float volume = static_cast<float>(last_volume_)
+                                     / 255.0f;
+                        if (renderer_->setVolume(volume, volume) != OK) {
+                            ALOGW("%s: setVolume failed", __FUNCTION__);
+                        }
+
+                        renderer_->start();
+                    }
+                }
+            } else {
+                ALOGE("Failed to allocate AudioTrack to use as a renderer.");
+            }
+        }
+
+        if (NULL != renderer_) {
+            uint8_t* decoded_data =
+                reinterpret_cast<uint8_t*>(decoded_sample->data());
+            uint32_t decoded_amt  = decoded_sample->range_length();
+            decoded_data += decoded_sample->range_offset();
+
+            sp<IMemory> pcm_payload;
+            res = renderer_->allocateTimedBuffer(decoded_amt, &pcm_payload);
+            if (res != OK) {
+                ALOGE("Failed to allocate %d byte audio track buffer."
+                      " (res = %d)", decoded_amt, res);
+            } else {
+                memcpy(pcm_payload->pointer(), decoded_data, decoded_amt);
+
+                res = renderer_->queueTimedBuffer(pcm_payload, ts);
+                if (res != OK) {
+                    ALOGE("Failed to queue %d byte audio track buffer with media"
+                          " PTS %lld. (res = %d)", decoded_amt, ts, res);
+                } else {
+                    last_queued_pts_valid_ = true;
+                    last_queued_pts_ = ts;
+                }
+            }
+
+        } else {
+            ALOGE("No renderer, dropping audio payload.");
+        }
+    }
+}
+
+void AAH_DecoderPump::stopAndCleanupRenderer() {
+    if (NULL == renderer_) {
+        return;
+    }
+
+    renderer_->stop();
+    delete renderer_;
+    renderer_ = NULL;
+}
+
+void AAH_DecoderPump::setRenderTSTransform(const LinearTransform& trans) {
+    Mutex::Autolock lock(&render_lock_);
+
+    if (last_ts_transform_valid_ && !memcmp(&trans,
+                                            &last_ts_transform_,
+                                            sizeof(trans))) {
+        return;
+    }
+
+    last_ts_transform_       = trans;
+    last_ts_transform_valid_ = true;
+
+    if (NULL != renderer_) {
+        status_t res = renderer_->setMediaTimeTransform(
+                last_ts_transform_, TimedAudioTrack::COMMON_TIME);
+        if (res != NO_ERROR) {
+            ALOGE("Failed to set media time transform on AudioTrack"
+                  " (res = %d)", res);
+        }
+    }
+}
+
+void AAH_DecoderPump::setRenderVolume(uint8_t volume) {
+    Mutex::Autolock lock(&render_lock_);
+
+    if (volume == last_volume_) {
+        return;
+    }
+
+    last_volume_ = volume;
+    if (renderer_ != NULL) {
+        float volume = static_cast<float>(last_volume_) / 255.0f;
+        if (renderer_->setVolume(volume, volume) != OK) {
+            ALOGW("%s: setVolume failed", __FUNCTION__);
+        }
+    }
+}
+
+// isAboutToUnderflow is something of a hack used to figure out when it might be
+// time to give up on trying to fill in a gap in the RTP sequence and simply
+// move on with a discontinuity.  If we had perfect knowledge of when we were
+// going to underflow, it would not be a hack, but unfortunately we do not.
+// Right now, we just take the PTS of the last sample queued, and check to see
+// if its presentation time is within kAboutToUnderflowThreshold from now.  If
+// it is, then we say that we are about to underflow.  This decision is based on
+// two (possibly invalid) assumptions.
+//
+// 1) The transmitter is leading the clock by more than
+//    kAboutToUnderflowThreshold.
+// 2) The delta between the PTS of the last sample queued and the next sample
+//    is less than the transmitter's clock lead amount.
+//
+// Right now, the default transmitter lead time is 1 second, which is a pretty
+// large number and greater than the 50mSec that kAboutToUnderflowThreshold is
+// currently set to.  This should satisfy assumption #1 for now, but changes to
+// the transmitter clock lead time could effect this.
+//
+// For non-sparse streams with a homogeneous sample rate (the vast majority of
+// streams in the world), the delta between any two adjacent PTSs will always be
+// the homogeneous sample period.  It is very uncommon to see a sample period
+// greater than the 1 second clock lead we are currently using, and you
+// certainly will not see it in an MP3 file which should satisfy assumption #2.
+// Sparse audio streams (where no audio is transmitted for long periods of
+// silence) and extremely low framerate video stream (like an MPEG-2 slideshow
+// or the video stream for a pay TV audio channel) are examples of streams which
+// might violate assumption #2.
+bool AAH_DecoderPump::isAboutToUnderflow(int64_t threshold) {
+    Mutex::Autolock lock(&render_lock_);
+
+    // If we have never queued anything to the decoder, we really don't know if
+    // we are going to underflow or not.
+    if (!last_queued_pts_valid_ || !last_ts_transform_valid_) {
+        return false;
+    }
+
+    // Don't have access to Common Time?  If so, then things are Very Bad
+    // elsewhere in the system; it pretty much does not matter what we do here.
+    // Since we cannot really tell if we are about to underflow or not, its
+    // probably best to assume that we are not and proceed accordingly.
+    int64_t tt_now;
+    if (OK != cc_helper_.getCommonTime(&tt_now)) {
+        return false;
+    }
+
+    // Transform from media time to common time.
+    int64_t last_queued_pts_tt;
+    if (!last_ts_transform_.doForwardTransform(last_queued_pts_,
+                &last_queued_pts_tt)) {
+        return false;
+    }
+
+    // Check to see if we are underflowing.
+    return ((tt_now + threshold - last_queued_pts_tt) > 0);
+}
+
+void* AAH_DecoderPump::workThread() {
+    // No need to lock when accessing decoder_ from the thread.  The
+    // implementation of init and shutdown ensure that other threads never touch
+    // decoder_ while the work thread is running.
+    CHECK(decoder_ != NULL);
+    CHECK(format_  != NULL);
+
+    // Start the decoder and note its result code.  If something goes horribly
+    // wrong, callers of queueForDecode and getOutput will be able to detect
+    // that the thread encountered a fatal error and shut down by examining
+    // thread_status_.
+    thread_status_ = decoder_->start(format_.get());
+    if (OK != thread_status_) {
+        ALOGE("AAH_DecoderPump's work thread failed to start decoder (res = %d)",
+                thread_status_);
+        return NULL;
+    }
+
+    DurationTimer decode_timer;
+    uint32_t consecutive_long_errors = 0;
+    uint32_t consecutive_errors = 0;
+
+    while (!thread_->exitPending()) {
+        status_t res;
+        MediaBuffer* bufOut = NULL;
+
+        decode_timer.start();
+        res = decoder_->read(&bufOut);
+        decode_timer.stop();
+
+        if (res == INFO_FORMAT_CHANGED) {
+            // Format has changed.  Destroy our current renderer so that a new
+            // one can be created during queueToRenderer with the proper format.
+            //
+            // TODO : In order to transition seamlessly, we should change this
+            // to put the old renderer in a queue to play out completely before
+            // we destroy it.  We can still create a new renderer, the timed
+            // nature of the renderer should ensure a seamless splice.
+            stopAndCleanupRenderer();
+            res = OK;
+        }
+
+        // Try to be a little nuanced in our handling of actual decode errors.
+        // Errors could happen because of minor stream corruption or because of
+        // transient resource limitations.  In these cases, we would rather drop
+        // a little bit of output and ride out the unpleasantness then throw up
+        // our hands and abort everything.
+        //
+        // OTOH - When things are really bad (like we have a non-transient
+        // resource or bookkeeping issue, or the stream being fed to us is just
+        // complete and total garbage) we really want to terminate playback and
+        // raise an error condition all the way up to the application level so
+        // they can deal with it.
+        //
+        // Unfortunately, the error codes returned by the decoder can be a
+        // little non-specific.  For example, if an OMXCodec times out
+        // attempting to obtain an output buffer, the error we get back is a
+        // generic -1.  Try to distinguish between this resource timeout error
+        // and ES corruption error by timing how long the decode operation
+        // takes.  Maintain accounting for both errors and "long errors".  If we
+        // get more than a certain number consecutive errors of either type,
+        // consider it fatal and shutdown (which will cause the error to
+        // propagate all of the way up to the application level).  The threshold
+        // for "long errors" is deliberately much lower than that of normal
+        // decode errors, both because of how long they take to happen and
+        // because they generally indicate resource limitation errors which are
+        // unlikely to go away in pathologically bad cases (in contrast to
+        // stream corruption errors which might happen 20 times in a row and
+        // then be suddenly OK again)
+        if (res != OK) {
+            consecutive_errors++;
+            if (decode_timer.durationUsecs() >= kLongDecodeErrorThreshold)
+                consecutive_long_errors++;
+
+            CHECK(NULL == bufOut);
+
+            ALOGW("%s: Failed to decode data (res = %d)",
+                    __PRETTY_FUNCTION__, res);
+
+            if ((consecutive_errors      >= kMaxErrorsBeforeFatal) ||
+                (consecutive_long_errors >= kMaxLongErrorsBeforeFatal)) {
+                ALOGE("%s: Maximum decode error threshold has been reached."
+                      " There have been %d consecutive decode errors, and %d"
+                      " consecutive decode operations which resulted in errors"
+                      " and took more than %lld uSec to process.  The last"
+                      " decode operation took %lld uSec.",
+                      __PRETTY_FUNCTION__,
+                      consecutive_errors, consecutive_long_errors,
+                      kLongDecodeErrorThreshold, decode_timer.durationUsecs());
+                thread_status_ = res;
+                break;
+            }
+
+            continue;
+        }
+
+        if (NULL == bufOut) {
+            ALOGW("%s: Successful decode, but no buffer produced",
+                    __PRETTY_FUNCTION__);
+            continue;
+        }
+
+        // Successful decode (with actual output produced).  Clear the error
+        // counters.
+        consecutive_errors = 0;
+        consecutive_long_errors = 0;
+
+        queueToRenderer(bufOut);
+        bufOut->release();
+    }
+
+    decoder_->stop();
+    stopAndCleanupRenderer();
+
+    return NULL;
+}
+
+status_t AAH_DecoderPump::init(const sp<MetaData>& params) {
+    Mutex::Autolock lock(&init_lock_);
+
+    if (decoder_ != NULL) {
+        // already inited
+        return OK;
+    }
+
+    if (params == NULL) {
+        return BAD_VALUE;
+    }
+
+    if (!params->findInt32(kKeyChannelCount, &format_channels_)) {
+        return BAD_VALUE;
+    }
+
+    if (!params->findInt32(kKeySampleRate, &format_sample_rate_)) {
+        return BAD_VALUE;
+    }
+
+    CHECK(OK == thread_status_);
+    CHECK(decoder_ == NULL);
+
+    status_t ret_val = UNKNOWN_ERROR;
+
+    // Cache the format and attempt to create the decoder.
+    format_  = params;
+    decoder_ = OMXCodec::Create(
+            omx_.interface(),       // IOMX Handle
+            format_,                // Metadata for substream (indicates codec)
+            false,                  // Make a decoder, not an encoder
+            sp<MediaSource>(this)); // We will be the source for this codec.
+
+    if (decoder_ == NULL) {
+      ALOGE("Failed to allocate decoder in %s", __PRETTY_FUNCTION__);
+      goto bailout;
+    }
+
+    // Fire up the pump thread.  It will take care of starting and stopping the
+    // decoder.
+    ret_val = thread_->run("aah_decode_pump", ANDROID_PRIORITY_AUDIO);
+    if (OK != ret_val) {
+        ALOGE("Failed to start work thread in %s (res = %d)",
+                __PRETTY_FUNCTION__, ret_val);
+        goto bailout;
+    }
+
+bailout:
+    if (OK != ret_val) {
+        decoder_ = NULL;
+        format_  = NULL;
+    }
+
+    return OK;
+}
+
+status_t AAH_DecoderPump::shutdown() {
+    Mutex::Autolock lock(&init_lock_);
+    return shutdown_l();
+}
+
+status_t AAH_DecoderPump::shutdown_l() {
+    thread_->requestExit();
+    thread_cond_.signal();
+    thread_->requestExitAndWait();
+
+    for (MBQueue::iterator iter = in_queue_.begin();
+         iter != in_queue_.end();
+         ++iter) {
+        (*iter)->release();
+    }
+    in_queue_.clear();
+
+    last_queued_pts_valid_   = false;
+    last_ts_transform_valid_ = false;
+    last_volume_             = 0xFF;
+    thread_status_           = OK;
+
+    decoder_ = NULL;
+    format_  = NULL;
+
+    return OK;
+}
+
+status_t AAH_DecoderPump::read(MediaBuffer **buffer,
+                               const ReadOptions *options) {
+    if (!buffer) {
+        return BAD_VALUE;
+    }
+
+    *buffer = NULL;
+
+    // While its not time to shut down, and we have no data to process, wait.
+    AutoMutex lock(&thread_lock_);
+    while (!thread_->exitPending() && in_queue_.empty())
+        thread_cond_.wait(thread_lock_);
+
+    // At this point, if its not time to shutdown then we must have something to
+    // process.  Go ahead and pop the front of the queue for processing.
+    if (!thread_->exitPending()) {
+        CHECK(!in_queue_.empty());
+
+        *buffer = *(in_queue_.begin());
+        in_queue_.erase(in_queue_.begin());
+    }
+
+    // If we managed to get a buffer, then everything must be OK.  If not, then
+    // we must be shutting down.
+    return (NULL == *buffer) ? INVALID_OPERATION : OK;
+}
+
+AAH_DecoderPump::ThreadWrapper::ThreadWrapper(AAH_DecoderPump* owner)
+    : Thread(false /* canCallJava*/ )
+    , owner_(owner) {
+}
+
+bool AAH_DecoderPump::ThreadWrapper::threadLoop() {
+    CHECK(NULL != owner_);
+    owner_->workThread();
+    return false;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_decoder_pump.h b/media/libaah_rtp/aah_decoder_pump.h
new file mode 100644
index 0000000..f5a6529
--- /dev/null
+++ b/media/libaah_rtp/aah_decoder_pump.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __DECODER_PUMP_H__
+#define __DECODER_PUMP_H__
+
+#include <pthread.h>
+
+#include <common_time/cc_helper.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/LinearTransform.h>
+#include <utils/List.h>
+#include <utils/threads.h>
+
+namespace android {
+
+class MetaData;
+class OMXClient;
+class TimedAudioTrack;
+
+class AAH_DecoderPump : public MediaSource {
+  public:
+    explicit AAH_DecoderPump(OMXClient& omx);
+    status_t initCheck();
+
+    status_t queueForDecode(MediaBuffer* buf);
+
+    status_t init(const sp<MetaData>& params);
+    status_t shutdown();
+
+    void setRenderTSTransform(const LinearTransform& trans);
+    void setRenderVolume(uint8_t volume);
+    bool isAboutToUnderflow(int64_t threshold);
+    bool getStatus() const { return thread_status_; }
+
+    // MediaSource methods
+    virtual status_t     start(MetaData *params) { return OK; }
+    virtual sp<MetaData> getFormat() { return format_; }
+    virtual status_t     stop() { return OK; }
+    virtual status_t     read(MediaBuffer **buffer,
+                              const ReadOptions *options);
+
+  protected:
+    virtual ~AAH_DecoderPump();
+
+  private:
+    class ThreadWrapper : public Thread {
+      public:
+        friend class AAH_DecoderPump;
+        explicit ThreadWrapper(AAH_DecoderPump* owner);
+
+      private:
+        virtual bool threadLoop();
+        AAH_DecoderPump* owner_;
+
+        DISALLOW_EVIL_CONSTRUCTORS(ThreadWrapper);
+    };
+
+    void* workThread();
+    virtual status_t shutdown_l();
+    void queueToRenderer(MediaBuffer* decoded_sample);
+    void stopAndCleanupRenderer();
+
+    sp<MetaData>        format_;
+    int32_t             format_channels_;
+    int32_t             format_sample_rate_;
+
+    sp<MediaSource>     decoder_;
+    OMXClient&          omx_;
+    Mutex               init_lock_;
+
+    sp<ThreadWrapper>   thread_;
+    Condition           thread_cond_;
+    Mutex               thread_lock_;
+    status_t            thread_status_;
+
+    Mutex               render_lock_;
+    TimedAudioTrack*    renderer_;
+    bool                last_queued_pts_valid_;
+    int64_t             last_queued_pts_;
+    bool                last_ts_transform_valid_;
+    LinearTransform     last_ts_transform_;
+    uint8_t             last_volume_;
+    CCHelper            cc_helper_;
+
+    // protected by the thread_lock_
+    typedef List<MediaBuffer*> MBQueue;
+    MBQueue in_queue_;
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_DecoderPump);
+};
+
+}  // namespace android
+#endif  // __DECODER_PUMP_H__
diff --git a/media/libaah_rtp/aah_rx_player.cpp b/media/libaah_rtp/aah_rx_player.cpp
new file mode 100644
index 0000000..9dd79fd
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+
+#include <binder/IServiceManager.h>
+#include <media/MediaPlayerInterface.h>
+#include <utils/Log.h>
+
+#include "aah_rx_player.h"
+
+namespace android {
+
+const uint32_t AAH_RXPlayer::kRTPRingBufferSize = 1 << 10;
+
+sp<MediaPlayerBase> createAAH_RXPlayer() {
+    sp<MediaPlayerBase> ret = new AAH_RXPlayer();
+    return ret;
+}
+
+AAH_RXPlayer::AAH_RXPlayer()
+        : ring_buffer_(kRTPRingBufferSize)
+        , substreams_(NULL) {
+    thread_wrapper_ = new ThreadWrapper(*this);
+
+    is_playing_          = false;
+    multicast_joined_    = false;
+    transmitter_known_   = false;
+    current_epoch_known_ = false;
+    data_source_set_     = false;
+    sock_fd_             = -1;
+
+    substreams_.setCapacity(4);
+
+    memset(&listen_addr_,      0, sizeof(listen_addr_));
+    memset(&transmitter_addr_, 0, sizeof(transmitter_addr_));
+
+    fetchAudioFlinger();
+}
+
+AAH_RXPlayer::~AAH_RXPlayer() {
+    reset_l();
+    CHECK(substreams_.size() == 0);
+    omx_.disconnect();
+}
+
+status_t AAH_RXPlayer::initCheck() {
+    if (thread_wrapper_ == NULL) {
+        ALOGE("Failed to allocate thread wrapper!");
+        return NO_MEMORY;
+    }
+
+    if (!ring_buffer_.initCheck()) {
+        ALOGE("Failed to allocate reassembly ring buffer!");
+        return NO_MEMORY;
+    }
+
+    // Check for the presense of the common time service by attempting to query
+    // for CommonTime's frequency.  If we get an error back, we cannot talk to
+    // the service at all and should abort now.
+    status_t res;
+    uint64_t freq;
+    res = cc_helper_.getCommonFreq(&freq);
+    if (OK != res) {
+        ALOGE("Failed to connect to common time service!");
+        return res;
+    }
+
+    return omx_.connect();
+}
+
+status_t AAH_RXPlayer::setDataSource(
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    AutoMutex api_lock(&api_lock_);
+    uint32_t a, b, c, d;
+    uint16_t port;
+
+    if (data_source_set_) {
+        return INVALID_OPERATION;
+    }
+
+    if (NULL == url) {
+        return BAD_VALUE;
+    }
+
+    if (5 != sscanf(url, "%*[^:/]://%u.%u.%u.%u:%hu", &a, &b, &c, &d, &port)) {
+        ALOGE("Failed to parse URL \"%s\"", url);
+        return BAD_VALUE;
+    }
+
+    if ((a > 255) || (b > 255) || (c > 255) || (d > 255) || (port == 0)) {
+        ALOGE("Bad multicast address \"%s\"", url);
+        return BAD_VALUE;
+    }
+
+    ALOGI("setDataSource :: %u.%u.%u.%u:%hu", a, b, c, d, port);
+
+    a = (a << 24) | (b << 16) | (c <<  8) | d;
+
+    memset(&listen_addr_, 0, sizeof(listen_addr_));
+    listen_addr_.sin_family      = AF_INET;
+    listen_addr_.sin_port        = htons(port);
+    listen_addr_.sin_addr.s_addr = htonl(a);
+    data_source_set_ = true;
+
+    return OK;
+}
+
+status_t AAH_RXPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
+    return INVALID_OPERATION;
+}
+
+status_t AAH_RXPlayer::setVideoSurface(const sp<Surface>& surface) {
+    return OK;
+}
+
+status_t AAH_RXPlayer::setVideoSurfaceTexture(
+        const sp<ISurfaceTexture>& surfaceTexture) {
+    return OK;
+}
+
+status_t AAH_RXPlayer::prepare() {
+    return OK;
+}
+
+status_t AAH_RXPlayer::prepareAsync() {
+    sendEvent(MEDIA_PREPARED);
+    return OK;
+}
+
+status_t AAH_RXPlayer::start() {
+    AutoMutex api_lock(&api_lock_);
+
+    if (is_playing_) {
+        return OK;
+    }
+
+    status_t res = startWorkThread();
+    is_playing_ = (res == OK);
+    return res;
+}
+
+status_t AAH_RXPlayer::stop() {
+    return pause();
+}
+
+status_t AAH_RXPlayer::pause() {
+    AutoMutex api_lock(&api_lock_);
+    stopWorkThread();
+    CHECK(sock_fd_ < 0);
+    is_playing_ = false;
+    return OK;
+}
+
+bool AAH_RXPlayer::isPlaying() {
+    AutoMutex api_lock(&api_lock_);
+    return is_playing_;
+}
+
+status_t AAH_RXPlayer::seekTo(int msec) {
+    sendEvent(MEDIA_SEEK_COMPLETE);
+    return OK;
+}
+
+status_t AAH_RXPlayer::getCurrentPosition(int *msec) {
+    if (NULL != msec) {
+        *msec = 0;
+    }
+    return OK;
+}
+
+status_t AAH_RXPlayer::getDuration(int *msec) {
+    if (NULL != msec) {
+        *msec = 1;
+    }
+    return OK;
+}
+
+status_t AAH_RXPlayer::reset() {
+    AutoMutex api_lock(&api_lock_);
+    reset_l();
+    return OK;
+}
+
+void AAH_RXPlayer::reset_l() {
+    stopWorkThread();
+    CHECK(sock_fd_ < 0);
+    CHECK(!multicast_joined_);
+    is_playing_ = false;
+    data_source_set_ = false;
+    transmitter_known_ = false;
+    memset(&listen_addr_, 0, sizeof(listen_addr_));
+}
+
+status_t AAH_RXPlayer::setLooping(int loop) {
+    return OK;
+}
+
+player_type AAH_RXPlayer::playerType() {
+    return AAH_RX_PLAYER;
+}
+
+status_t AAH_RXPlayer::setParameter(int key, const Parcel &request) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_RXPlayer::getParameter(int key, Parcel *reply) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_RXPlayer::invoke(const Parcel& request, Parcel *reply) {
+    if (!reply) {
+        return BAD_VALUE;
+    }
+
+    int32_t magic;
+    status_t err = request.readInt32(&magic);
+    if (err != OK) {
+        reply->writeInt32(err);
+        return OK;
+    }
+
+    if (magic != 0x12345) {
+        reply->writeInt32(BAD_VALUE);
+        return OK;
+    }
+
+    int32_t methodID;
+    err = request.readInt32(&methodID);
+    if (err != OK) {
+        reply->writeInt32(err);
+        return OK;
+    }
+
+    switch (methodID) {
+        // Get Volume
+        case INVOKE_GET_MASTER_VOLUME: {
+            if (audio_flinger_ != NULL) {
+                reply->writeInt32(OK);
+                reply->writeFloat(audio_flinger_->masterVolume());
+            } else {
+                reply->writeInt32(UNKNOWN_ERROR);
+            }
+        } break;
+
+        // Set Volume
+        case INVOKE_SET_MASTER_VOLUME: {
+            float targetVol = request.readFloat();
+            reply->writeInt32(audio_flinger_->setMasterVolume(targetVol));
+        } break;
+
+        default: return BAD_VALUE;
+    }
+
+    return OK;
+}
+
+void AAH_RXPlayer::fetchAudioFlinger() {
+    if (audio_flinger_ == NULL) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder;
+        binder = sm->getService(String16("media.audio_flinger"));
+
+        if (binder == NULL) {
+            ALOGW("AAH_RXPlayer failed to fetch handle to audio flinger."
+                  " Master volume control will not be possible.");
+        }
+
+        audio_flinger_ = interface_cast<IAudioFlinger>(binder);
+    }
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_rx_player.h b/media/libaah_rtp/aah_rx_player.h
new file mode 100644
index 0000000..7a1b6e3
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_RX_PLAYER_H__
+#define __AAH_RX_PLAYER_H__
+
+#include <common_time/cc_helper.h>
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXClient.h>
+#include <netinet/in.h>
+#include <utils/KeyedVector.h>
+#include <utils/LinearTransform.h>
+#include <utils/threads.h>
+
+#include "aah_decoder_pump.h"
+#include "pipe_event.h"
+
+namespace android {
+
+class AAH_RXPlayer : public MediaPlayerInterface {
+  public:
+    AAH_RXPlayer();
+
+    virtual status_t    initCheck();
+    virtual status_t    setDataSource(const char *url,
+                                      const KeyedVector<String8, String8>*
+                                      headers);
+    virtual status_t    setDataSource(int fd, int64_t offset, int64_t length);
+    virtual status_t    setVideoSurface(const sp<Surface>& surface);
+    virtual status_t    setVideoSurfaceTexture(const sp<ISurfaceTexture>&
+                                               surfaceTexture);
+    virtual status_t    prepare();
+    virtual status_t    prepareAsync();
+    virtual status_t    start();
+    virtual status_t    stop();
+    virtual status_t    pause();
+    virtual bool        isPlaying();
+    virtual status_t    seekTo(int msec);
+    virtual status_t    getCurrentPosition(int *msec);
+    virtual status_t    getDuration(int *msec);
+    virtual status_t    reset();
+    virtual status_t    setLooping(int loop);
+    virtual player_type playerType();
+    virtual status_t    setParameter(int key, const Parcel &request);
+    virtual status_t    getParameter(int key, Parcel *reply);
+    virtual status_t    invoke(const Parcel& request, Parcel *reply);
+
+  protected:
+    virtual ~AAH_RXPlayer();
+
+  private:
+    class ThreadWrapper : public Thread {
+      public:
+        friend class AAH_RXPlayer;
+        explicit ThreadWrapper(AAH_RXPlayer& player)
+            : Thread(false /* canCallJava */ )
+            , player_(player) { }
+
+        virtual bool threadLoop() { return player_.threadLoop(); }
+
+      private:
+        AAH_RXPlayer& player_;
+
+        DISALLOW_EVIL_CONSTRUCTORS(ThreadWrapper);
+    };
+
+#pragma pack(push, 1)
+    // PacketBuffers are structures used by the RX ring buffer.  The ring buffer
+    // is a ring of pointers to PacketBuffer structures which act as variable
+    // length byte arrays and hold the contents of received UDP packets.  Rather
+    // than make this a structure which hold a length and a pointer to another
+    // allocated structure (which would require two allocations), this struct
+    // uses a structure overlay pattern where allocation for the byte array
+    // consists of allocating (arrayLen + sizeof(ssize_t)) bytes of data from
+    // whatever pool/heap the packet buffer pulls from, and then overlaying the
+    // packed PacketBuffer structure on top of the allocation.  The one-byte
+    // array at the end of the structure serves as an offset to the the data
+    // portion of the allocation; packet buffers are never allocated on the
+    // stack or using the new operator.  Instead, the static allocate-byte-array
+    // and destroy methods handle the allocate and overlay pattern.  They also
+    // allow for a potential future optimization where instead of just
+    // allocating blocks from the process global heap and overlaying, the
+    // allocator is replaced with a different implementation (private heap,
+    // free-list, circular buffer, etc) which reduces potential heap
+    // fragmentation issues which might arise from the frequent allocation and
+    // destruction of the received UDP traffic.
+    struct PacketBuffer {
+        ssize_t length_;
+        uint8_t data_[1];
+
+        // TODO : consider changing this to be some form of ring buffer or free
+        // pool system instead of just using the heap in order to avoid heap
+        // fragmentation.
+        static PacketBuffer* allocate(ssize_t length);
+        static void destroy(PacketBuffer* pb);
+
+      private:
+        // Force people to use allocate/destroy instead of new/delete.
+        PacketBuffer() { }
+        ~PacketBuffer() { }
+    };
+
+    struct RetransRequest {
+        uint32_t magic_;
+        uint32_t mcast_ip_;
+        uint16_t mcast_port_;
+        uint16_t start_seq_;
+        uint16_t end_seq_;
+    };
+#pragma pack(pop)
+
+    enum GapStatus {
+        kGS_NoGap = 0,
+        kGS_NormalGap,
+        kGS_FastStartGap,
+    };
+
+    struct SeqNoGap {
+        uint16_t start_seq_;
+        uint16_t end_seq_;
+    };
+
+    class RXRingBuffer {
+      public:
+        explicit RXRingBuffer(uint32_t capacity);
+        ~RXRingBuffer();
+
+        bool initCheck() const { return (ring_ != NULL); }
+        void reset();
+
+        // Push a packet buffer with a given sequence number into the ring
+        // buffer.  pushBuffer will always consume the buffer pushed to it,
+        // either destroying it because it was a duplicate or overflow, or
+        // holding on to it in the ring.  Callers should not hold any references
+        // to PacketBuffers after they have been pushed to the ring.  Returns
+        // false in the case of a serious error (such as ring overflow).
+        // Callers should consider resetting the pipeline entirely in the event
+        // of a serious error.
+        bool pushBuffer(PacketBuffer* buf, uint16_t seq);
+
+        // Fetch the next buffer in the RTP sequence.  Returns NULL if there is
+        // no buffer to fetch.  If a non-NULL PacketBuffer is returned,
+        // is_discon will be set to indicate whether or not this PacketBuffer is
+        // discontiuous with any previously returned packet buffers.  Packet
+        // buffers returned by fetchBuffer are the caller's responsibility; they
+        // must be certain to destroy the buffers when they are done.
+        PacketBuffer* fetchBuffer(bool* is_discon);
+
+        // Returns true and fills out the gap structure if the read pointer of
+        // the ring buffer is currently pointing to a gap which would stall a
+        // fetchBuffer operation.  Returns false if the read pointer is not
+        // pointing to a gap in the sequence currently.
+        GapStatus fetchCurrentGap(SeqNoGap* gap);
+
+        // Causes the read pointer to skip over any portion of a gap indicated
+        // by nak.  If nak is NULL, any gap currently blocking the read pointer
+        // will be completely skipped.  If any portion of a gap is skipped, the
+        // next successful read from fetch buffer will indicate a discontinuity.
+        void processNAK(const SeqNoGap* nak = NULL);
+
+        // Compute the number of milliseconds until the inactivity timer for
+        // this RTP stream.  Returns -1 if there is no active timeout, or 0 if
+        // the system has already timed out.
+        int computeInactivityTimeout();
+
+      private:
+        Mutex          lock_;
+        PacketBuffer** ring_;
+        uint32_t       capacity_;
+        uint32_t       rd_;
+        uint32_t       wr_;
+
+        uint16_t       rd_seq_;
+        bool           rd_seq_known_;
+        bool           waiting_for_fast_start_;
+        bool           fetched_first_packet_;
+
+        uint64_t       rtp_activity_timeout_;
+        bool           rtp_activity_timeout_valid_;
+
+        DISALLOW_EVIL_CONSTRUCTORS(RXRingBuffer);
+    };
+
+    class Substream : public virtual RefBase {
+      public:
+        Substream(uint32_t ssrc, OMXClient& omx);
+
+        void cleanupBufferInProgress();
+        void shutdown();
+        void processPayloadStart(uint8_t* buf,
+                                 uint32_t amt,
+                                 int32_t ts_lower);
+        void processPayloadCont (uint8_t* buf,
+                                 uint32_t amt);
+        void processTSTransform(const LinearTransform& trans);
+
+        bool     isAboutToUnderflow();
+        uint32_t getSSRC()      const { return ssrc_; }
+        uint16_t getProgramID() const { return (ssrc_ >> 5) & 0x1F; }
+        status_t getStatus() const { return status_; }
+
+      protected:
+        virtual ~Substream() {
+            shutdown();
+        }
+
+      private:
+        void                cleanupDecoder();
+        bool                shouldAbort(const char* log_tag);
+        void                processCompletedBuffer();
+        bool                setupSubstreamType(uint8_t substream_type,
+                                               uint8_t codec_type);
+
+        uint32_t            ssrc_;
+        bool                waiting_for_rap_;
+        status_t            status_;
+
+        bool                substream_details_known_;
+        uint8_t             substream_type_;
+        uint8_t             codec_type_;
+        sp<MetaData>        substream_meta_;
+
+        MediaBuffer*        buffer_in_progress_;
+        uint32_t            expected_buffer_size_;
+        uint32_t            buffer_filled_;
+
+        sp<AAH_DecoderPump> decoder_;
+
+        static int64_t      kAboutToUnderflowThreshold;
+
+        DISALLOW_EVIL_CONSTRUCTORS(Substream);
+    };
+
+    typedef DefaultKeyedVector< uint32_t, sp<Substream> > SubstreamVec;
+
+    status_t            startWorkThread();
+    void                stopWorkThread();
+    virtual bool        threadLoop();
+    bool                setupSocket();
+    void                cleanupSocket();
+    void                resetPipeline();
+    void                reset_l();
+    bool                processRX(PacketBuffer* pb);
+    void                processRingBuffer();
+    void                processCommandPacket(PacketBuffer* pb);
+    bool                processGaps();
+    int                 computeNextGapRetransmitTimeout();
+    void                fetchAudioFlinger();
+
+    PipeEvent           wakeup_work_thread_evt_;
+    sp<ThreadWrapper>   thread_wrapper_;
+    Mutex               api_lock_;
+    bool                is_playing_;
+    bool                data_source_set_;
+
+    struct sockaddr_in  listen_addr_;
+    int                 sock_fd_;
+    bool                multicast_joined_;
+
+    struct sockaddr_in  transmitter_addr_;
+    bool                transmitter_known_;
+
+    uint32_t            current_epoch_;
+    bool                current_epoch_known_;
+
+    SeqNoGap            current_gap_;
+    GapStatus           current_gap_status_;
+    uint64_t            next_retrans_req_time_;
+
+    RXRingBuffer        ring_buffer_;
+    SubstreamVec        substreams_;
+    OMXClient           omx_;
+    CCHelper            cc_helper_;
+
+    // Connection to audio flinger used to hack a path to setMasterVolume.
+    sp<IAudioFlinger>   audio_flinger_;
+
+    static const uint32_t kRTPRingBufferSize;
+    static const uint32_t kRetransRequestMagic;
+    static const uint32_t kFastStartRequestMagic;
+    static const uint32_t kRetransNAKMagic;
+    static const uint32_t kGapRerequestTimeoutUSec;
+    static const uint32_t kFastStartTimeoutUSec;
+    static const uint32_t kRTPActivityTimeoutUSec;
+
+    static const uint32_t INVOKE_GET_MASTER_VOLUME = 3;
+    static const uint32_t INVOKE_SET_MASTER_VOLUME = 4;
+
+    static uint64_t monotonicUSecNow();
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_RXPlayer);
+};
+
+}  // namespace android
+
+#endif  // __AAH_RX_PLAYER_H__
diff --git a/media/libaah_rtp/aah_rx_player_core.cpp b/media/libaah_rtp/aah_rx_player_core.cpp
new file mode 100644
index 0000000..d2b3386
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player_core.cpp
@@ -0,0 +1,807 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <time.h>
+#include <utils/misc.h>
+
+#include <media/stagefright/Utils.h>
+
+#include "aah_rx_player.h"
+#include "aah_tx_packet.h"
+
+namespace android {
+
+const uint32_t AAH_RXPlayer::kRetransRequestMagic =
+    FOURCC('T','r','e','q');
+const uint32_t AAH_RXPlayer::kRetransNAKMagic =
+    FOURCC('T','n','a','k');
+const uint32_t AAH_RXPlayer::kFastStartRequestMagic =
+    FOURCC('T','f','s','t');
+const uint32_t AAH_RXPlayer::kGapRerequestTimeoutUSec = 75000;
+const uint32_t AAH_RXPlayer::kFastStartTimeoutUSec = 800000;
+const uint32_t AAH_RXPlayer::kRTPActivityTimeoutUSec = 10000000;
+
+static inline int16_t fetchInt16(uint8_t* data) {
+    return static_cast<int16_t>(U16_AT(data));
+}
+
+static inline int32_t fetchInt32(uint8_t* data) {
+    return static_cast<int32_t>(U32_AT(data));
+}
+
+static inline int64_t fetchInt64(uint8_t* data) {
+    return static_cast<int64_t>(U64_AT(data));
+}
+
+uint64_t AAH_RXPlayer::monotonicUSecNow() {
+    struct timespec now;
+    int res = clock_gettime(CLOCK_MONOTONIC, &now);
+    CHECK(res >= 0);
+
+    uint64_t ret = static_cast<uint64_t>(now.tv_sec) * 1000000;
+    ret += now.tv_nsec / 1000;
+
+    return ret;
+}
+
+status_t AAH_RXPlayer::startWorkThread() {
+    status_t res;
+    stopWorkThread();
+    res = thread_wrapper_->run("TRX_Player", PRIORITY_AUDIO);
+
+    if (res != OK) {
+        ALOGE("Failed to start work thread (res = %d)", res);
+    }
+
+    return res;
+}
+
+void AAH_RXPlayer::stopWorkThread() {
+    thread_wrapper_->requestExit();  // set the exit pending flag
+    wakeup_work_thread_evt_.setEvent();
+
+    status_t res;
+    res = thread_wrapper_->requestExitAndWait(); // block until thread exit.
+    if (res != OK) {
+        ALOGE("Failed to stop work thread (res = %d)", res);
+    }
+
+    wakeup_work_thread_evt_.clearPendingEvents();
+}
+
+void AAH_RXPlayer::cleanupSocket() {
+    if (sock_fd_ >= 0) {
+        if (multicast_joined_) {
+            int res;
+            struct ip_mreq mreq;
+            mreq.imr_multiaddr = listen_addr_.sin_addr;
+            mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+            res = setsockopt(sock_fd_,
+                             IPPROTO_IP,
+                             IP_DROP_MEMBERSHIP,
+                             &mreq, sizeof(mreq));
+            if (res < 0) {
+                ALOGW("Failed to leave multicast group. (%d, %d)", res, errno);
+            }
+            multicast_joined_ = false;
+        }
+
+        close(sock_fd_);
+        sock_fd_ = -1;
+    }
+
+    resetPipeline();
+}
+
+void AAH_RXPlayer::resetPipeline() {
+    ring_buffer_.reset();
+
+    // Explicitly shudown all of the active substreams, then call clear out the
+    // collection.  Failure to clear out a substream can result in its decoder
+    // holding a reference to itself and therefor not going away when the
+    // collection is cleared.
+    for (size_t i = 0; i < substreams_.size(); ++i)
+        substreams_.valueAt(i)->shutdown();
+
+    substreams_.clear();
+
+    current_gap_status_ = kGS_NoGap;
+}
+
+bool AAH_RXPlayer::setupSocket() {
+    long flags;
+    int  res, buf_size;
+    socklen_t opt_size;
+
+    cleanupSocket();
+    CHECK(sock_fd_ < 0);
+
+    // Make the socket
+    sock_fd_ = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+    if (sock_fd_ < 0) {
+        ALOGE("Failed to create listen socket (errno %d)", errno);
+        goto bailout;
+    }
+
+    // Set non-blocking operation
+    flags = fcntl(sock_fd_, F_GETFL);
+    res   = fcntl(sock_fd_, F_SETFL, flags | O_NONBLOCK);
+    if (res < 0) {
+        ALOGE("Failed to set socket (%d) to non-blocking mode (errno %d)",
+              sock_fd_, errno);
+        goto bailout;
+    }
+
+    // Bind to our port
+    struct sockaddr_in bind_addr;
+    memset(&bind_addr, 0, sizeof(bind_addr));
+    bind_addr.sin_family = AF_INET;
+    bind_addr.sin_addr.s_addr = INADDR_ANY;
+    bind_addr.sin_port = listen_addr_.sin_port;
+    res = bind(sock_fd_,
+               reinterpret_cast<const sockaddr*>(&bind_addr),
+               sizeof(bind_addr));
+    if (res < 0) {
+        uint32_t a = ntohl(bind_addr.sin_addr.s_addr);
+        uint16_t p = ntohs(bind_addr.sin_port);
+        ALOGE("Failed to bind socket (%d) to %d.%d.%d.%d:%hd. (errno %d)",
+              sock_fd_,
+              (a >> 24) & 0xFF,
+              (a >> 16) & 0xFF,
+              (a >>  8) & 0xFF,
+              (a      ) & 0xFF,
+              p,
+              errno);
+
+        goto bailout;
+    }
+
+    buf_size = 1 << 16;   // 64k
+    res = setsockopt(sock_fd_,
+                     SOL_SOCKET, SO_RCVBUF,
+                     &buf_size, sizeof(buf_size));
+    if (res < 0) {
+        ALOGW("Failed to increase socket buffer size to %d.  (errno %d)",
+              buf_size, errno);
+    }
+
+    buf_size = 0;
+    opt_size = sizeof(buf_size);
+    res = getsockopt(sock_fd_,
+                     SOL_SOCKET, SO_RCVBUF,
+                     &buf_size, &opt_size);
+    if (res < 0) {
+        ALOGW("Failed to fetch socket buffer size.  (errno %d)", errno);
+    } else {
+        ALOGI("RX socket buffer size is now %d bytes",  buf_size);
+    }
+
+    if (listen_addr_.sin_addr.s_addr) {
+        // Join the multicast group and we should be good to go.
+        struct ip_mreq mreq;
+        mreq.imr_multiaddr = listen_addr_.sin_addr;
+        mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+        res = setsockopt(sock_fd_,
+                         IPPROTO_IP,
+                         IP_ADD_MEMBERSHIP,
+                         &mreq, sizeof(mreq));
+        if (res < 0) {
+            ALOGE("Failed to join multicast group. (errno %d)", errno);
+            goto bailout;
+        }
+        multicast_joined_ = true;
+    }
+
+    return true;
+
+bailout:
+    cleanupSocket();
+    return false;
+}
+
+bool AAH_RXPlayer::threadLoop() {
+    struct pollfd poll_fds[2];
+    bool process_more_right_now = false;
+
+    if (!setupSocket()) {
+        sendEvent(MEDIA_ERROR);
+        goto bailout;
+    }
+
+    while (!thread_wrapper_->exitPending()) {
+        // Step 1: Wait until there is something to do.
+        int gap_timeout = computeNextGapRetransmitTimeout();
+        int ring_timeout = ring_buffer_.computeInactivityTimeout();
+        int timeout = -1;
+
+        if (!ring_timeout) {
+            ALOGW("RTP inactivity timeout reached, resetting pipeline.");
+            resetPipeline();
+            timeout = gap_timeout;
+        } else {
+            if (gap_timeout < 0) {
+                timeout = ring_timeout;
+            } else if (ring_timeout < 0) {
+                timeout = gap_timeout;
+            } else {
+                timeout = (gap_timeout < ring_timeout) ? gap_timeout
+                                                       : ring_timeout;
+            }
+        }
+
+        if ((0 != timeout) && (!process_more_right_now)) {
+            // Set up the events to wait on.  Start with the wakeup pipe.
+            memset(&poll_fds, 0, sizeof(poll_fds));
+            poll_fds[0].fd     = wakeup_work_thread_evt_.getWakeupHandle();
+            poll_fds[0].events = POLLIN;
+
+            // Add the RX socket.
+            poll_fds[1].fd     = sock_fd_;
+            poll_fds[1].events = POLLIN;
+
+            // Wait for something interesing to happen.
+            int poll_res = poll(poll_fds, NELEM(poll_fds), timeout);
+            if (poll_res < 0) {
+                ALOGE("Fatal error (%d,%d) while waiting on events",
+                      poll_res, errno);
+                sendEvent(MEDIA_ERROR);
+                goto bailout;
+            }
+        }
+
+        if (thread_wrapper_->exitPending()) {
+            break;
+        }
+
+        wakeup_work_thread_evt_.clearPendingEvents();
+        process_more_right_now = false;
+
+        // Step 2: Do we have data waiting in the socket?  If so, drain the
+        // socket moving valid RTP information into the ring buffer to be
+        // processed.
+        if (poll_fds[1].revents) {
+            struct sockaddr_in from;
+            socklen_t from_len;
+
+            ssize_t res = 0;
+            while (!thread_wrapper_->exitPending()) {
+                // Check the size of any pending packet.
+                res = recv(sock_fd_, NULL, 0, MSG_PEEK | MSG_TRUNC);
+
+                // Error?
+                if (res < 0) {
+                    // If the error is anything other than would block,
+                    // something has gone very wrong.
+                    if ((errno != EAGAIN) && (errno != EWOULDBLOCK)) {
+                        ALOGE("Fatal socket error during recvfrom (%d, %d)",
+                              (int)res, errno);
+                        goto bailout;
+                    }
+
+                    // Socket is out of data, just break out of processing and
+                    // wait for more.
+                    break;
+                }
+
+                // Allocate a payload.
+                PacketBuffer* pb = PacketBuffer::allocate(res);
+                if (NULL == pb) {
+                    ALOGE("Fatal error, failed to allocate packet buffer of"
+                          " length %u", static_cast<uint32_t>(res));
+                    goto bailout;
+                }
+
+                // Fetch the data.
+                from_len = sizeof(from);
+                res = recvfrom(sock_fd_, pb->data_, pb->length_, 0,
+                               reinterpret_cast<struct sockaddr*>(&from),
+                               &from_len);
+                if (res != pb->length_) {
+                    ALOGE("Fatal error, fetched packet length (%d) does not"
+                          " match peeked packet length (%u).  This should never"
+                          " happen.  (errno = %d)",
+                          static_cast<int>(res),
+                          static_cast<uint32_t>(pb->length_),
+                          errno);
+                }
+
+                bool drop_packet = false;
+                if (transmitter_known_) {
+                    if (from.sin_addr.s_addr !=
+                        transmitter_addr_.sin_addr.s_addr) {
+                        uint32_t a = ntohl(from.sin_addr.s_addr);
+                        uint16_t p = ntohs(from.sin_port);
+                        ALOGV("Dropping packet from unknown transmitter"
+                              " %u.%u.%u.%u:%hu",
+                              ((a >> 24) & 0xFF),
+                              ((a >> 16) & 0xFF),
+                              ((a >>  8) & 0xFF),
+                              ( a        & 0xFF),
+                              p);
+
+                        drop_packet = true;
+                    } else {
+                        transmitter_addr_.sin_port = from.sin_port;
+                    }
+                } else {
+                    memcpy(&transmitter_addr_, &from, sizeof(from));
+                    transmitter_known_ = true;
+                }
+
+                if (!drop_packet) {
+                    bool serious_error = !processRX(pb);
+
+                    if (serious_error) {
+                        // Something went "seriously wrong".  Currently, the
+                        // only trigger for this should be a ring buffer
+                        // overflow.  The current failsafe behavior for when
+                        // something goes seriously wrong is to just reset the
+                        // pipeline.  The system should behave as if this
+                        // AAH_RXPlayer was just set up for the first time.
+                        ALOGE("Something just went seriously wrong with the"
+                              " pipeline.  Resetting.");
+                        resetPipeline();
+                    }
+                } else {
+                    PacketBuffer::destroy(pb);
+                }
+            }
+        }
+
+        // Step 3: Process any data we mave have accumulated in the ring buffer
+        // so far.
+        if (!thread_wrapper_->exitPending()) {
+            processRingBuffer();
+        }
+
+        // Step 4: At this point in time, the ring buffer should either be
+        // empty, or stalled in front of a gap caused by some dropped packets.
+        // Check on the current gap situation and deal with it in an appropriate
+        // fashion.  If processGaps returns true, it means that it has given up
+        // on a gap and that we should try to process some more data
+        // immediately.
+        if (!thread_wrapper_->exitPending()) {
+            process_more_right_now = processGaps();
+        }
+
+        // Step 5: Check for fatal errors.  If any of our substreams has
+        // encountered a fatal, unrecoverable, error, then propagate the error
+        // up to user level and shut down.
+        for (size_t i = 0; i < substreams_.size(); ++i) {
+            status_t status;
+            CHECK(substreams_.valueAt(i) != NULL);
+
+            status = substreams_.valueAt(i)->getStatus();
+            if (OK != status) {
+                ALOGE("Substream index %d has encountered an unrecoverable"
+                      " error (%d).  Signalling application level and shutting"
+                      " down.", i, status);
+                sendEvent(MEDIA_ERROR);
+                goto bailout;
+            }
+        }
+    }
+
+bailout:
+    cleanupSocket();
+    return false;
+}
+
+bool AAH_RXPlayer::processRX(PacketBuffer* pb) {
+    CHECK(NULL != pb);
+
+    uint8_t* data = pb->data_;
+    ssize_t  amt  = pb->length_;
+    uint32_t nak_magic;
+    uint16_t seq_no;
+    uint32_t epoch;
+
+    // Every packet either starts with an RTP header which is at least 12 bytes
+    // long or is a retry NAK which is 14 bytes long.  If there are fewer than
+    // 12 bytes here, this cannot be a proper RTP packet.
+    if (amt < 12) {
+        ALOGV("Dropping packet, too short to contain RTP header (%u bytes)",
+              static_cast<uint32_t>(amt));
+        goto drop_packet;
+    }
+
+    // Check to see if this is the special case of a NAK packet.
+    nak_magic = ntohl(*(reinterpret_cast<uint32_t*>(data)));
+    if (nak_magic == kRetransNAKMagic) {
+        // Looks like a NAK packet; make sure its long enough.
+
+        if (amt < static_cast<ssize_t>(sizeof(RetransRequest))) {
+            ALOGV("Dropping packet, too short to contain NAK payload (%u bytes)",
+                  static_cast<uint32_t>(amt));
+            goto drop_packet;
+        }
+
+        SeqNoGap gap;
+        RetransRequest* rtr = reinterpret_cast<RetransRequest*>(data);
+        gap.start_seq_ = ntohs(rtr->start_seq_);
+        gap.end_seq_   = ntohs(rtr->end_seq_);
+
+        ALOGV("Process NAK for gap at [%hu, %hu]", gap.start_seq_, gap.end_seq_);
+        ring_buffer_.processNAK(&gap);
+
+        return true;
+    }
+
+    // According to the TRTP spec, version should be 2, padding should be 0,
+    // extension should be 0 and CSRCCnt should be 0.  If any of these tests
+    // fail, we chuck the packet.
+    if (data[0] != 0x80) {
+        ALOGV("Dropping packet, bad V/P/X/CSRCCnt field (0x%02x)",
+              data[0]);
+        goto drop_packet;
+    }
+
+    // Check the payload type.  For TRTP, it should always be 100.
+    if ((data[1] & 0x7F) != 100) {
+        ALOGV("Dropping packet, bad payload type. (%u)",
+              data[1] & 0x7F);
+        goto drop_packet;
+    }
+
+    // Check whether the transmitter has begun a new epoch.
+    epoch = (U32_AT(data + 8) >> 10) & 0x3FFFFF;
+    if (current_epoch_known_) {
+        if (epoch != current_epoch_) {
+            ALOGV("%s: new epoch %u", __PRETTY_FUNCTION__, epoch);
+            current_epoch_ = epoch;
+            resetPipeline();
+        }
+    } else {
+        current_epoch_ = epoch;
+        current_epoch_known_ = true;
+    }
+
+    // Extract the sequence number and hand the packet off to the ring buffer
+    // for dropped packet detection and later processing.
+    seq_no = U16_AT(data + 2);
+    return ring_buffer_.pushBuffer(pb, seq_no);
+
+drop_packet:
+    PacketBuffer::destroy(pb);
+    return true;
+}
+
+void AAH_RXPlayer::processRingBuffer() {
+    PacketBuffer* pb;
+    bool is_discon;
+    sp<Substream> substream;
+    LinearTransform trans;
+    bool foundTrans = false;
+
+    while (NULL != (pb = ring_buffer_.fetchBuffer(&is_discon))) {
+        if (is_discon) {
+            // Abort all partially assembled payloads.
+            for (size_t i = 0; i < substreams_.size(); ++i) {
+                CHECK(substreams_.valueAt(i) != NULL);
+                substreams_.valueAt(i)->cleanupBufferInProgress();
+            }
+        }
+
+        uint8_t* data = pb->data_;
+        ssize_t  amt  = pb->length_;
+
+        // Should not have any non-RTP packets in the ring buffer.  RTP packets
+        // must be at least 12 bytes long.
+        CHECK(amt >= 12);
+
+        // Extract the marker bit and the SSRC field.
+        bool     marker = (data[1] & 0x80) != 0;
+        uint32_t ssrc   = U32_AT(data + 8);
+
+        // Is this the start of a new TRTP payload?  If so, the marker bit
+        // should be set and there are some things we should be checking for.
+        if (marker) {
+            // TRTP headers need to have at least a byte for version, a byte for
+            // payload type and flags, and 4 bytes for length.
+            if (amt < 18) {
+                ALOGV("Dropping packet, too short to contain TRTP header"
+                      " (%u bytes)", static_cast<uint32_t>(amt));
+                goto process_next_packet;
+            }
+
+            // Check the TRTP version and extract the payload type/flags.
+            uint8_t trtp_version =  data[12];
+            uint8_t payload_type = (data[13] >> 4) & 0xF;
+            uint8_t trtp_flags   =  data[13]       & 0xF;
+
+            if (1 != trtp_version) {
+                ALOGV("Dropping packet, bad trtp version %hhu", trtp_version);
+                goto process_next_packet;
+            }
+
+            // Is there a timestamp transformation present on this packet?  If
+            // so, extract it and pass it to the appropriate substreams.
+            if (trtp_flags & 0x02) {
+                ssize_t offset = 18 + ((trtp_flags & 0x01) ? 4 : 0);
+                if (amt < (offset + 24)) {
+                    ALOGV("Dropping packet, too short to contain TRTP Timestamp"
+                          " Transformation (%u bytes)",
+                          static_cast<uint32_t>(amt));
+                    goto process_next_packet;
+                }
+
+                trans.a_zero = fetchInt64(data + offset);
+                trans.b_zero = fetchInt64(data + offset + 16);
+                trans.a_to_b_numer = static_cast<int32_t>(
+                        fetchInt32 (data + offset + 8));
+                trans.a_to_b_denom = U32_AT(data + offset + 12);
+                foundTrans = true;
+
+                uint32_t program_id = (ssrc >> 5) & 0x1F;
+                for (size_t i = 0; i < substreams_.size(); ++i) {
+                    sp<Substream> iter = substreams_.valueAt(i);
+                    CHECK(iter != NULL);
+
+                    if (iter->getProgramID() == program_id) {
+                        iter->processTSTransform(trans);
+                    }
+                }
+            }
+
+            // Is this a command packet?  If so, its not necessarily associate
+            // with one particular substream.  Just give it to the command
+            // packet handler and then move on.
+            if (4 == payload_type) {
+                processCommandPacket(pb);
+                goto process_next_packet;
+            }
+        }
+
+        // If we got to here, then we are a normal packet.  Find (or allocate)
+        // the substream we belong to and send the packet off to be processed.
+        substream = substreams_.valueFor(ssrc);
+        if (substream == NULL) {
+            substream = new Substream(ssrc, omx_);
+            if (substream == NULL) {
+                ALOGE("Failed to allocate substream for SSRC 0x%08x", ssrc);
+                goto process_next_packet;
+            }
+            substreams_.add(ssrc, substream);
+
+            if (foundTrans) {
+                substream->processTSTransform(trans);
+            }
+        }
+
+        CHECK(substream != NULL);
+
+        if (marker) {
+            // Start of a new TRTP payload for this substream.  Extract the
+            // lower 32 bits of the timestamp and hand the buffer to the
+            // substream for processing.
+            uint32_t ts_lower = U32_AT(data + 4);
+            substream->processPayloadStart(data + 12, amt - 12, ts_lower);
+        } else {
+            // Continuation of an existing TRTP payload.  Just hand it off to
+            // the substream for processing.
+            substream->processPayloadCont(data + 12, amt - 12);
+        }
+
+process_next_packet:
+        PacketBuffer::destroy(pb);
+    }  // end of main processing while loop.
+}
+
+void AAH_RXPlayer::processCommandPacket(PacketBuffer* pb) {
+    CHECK(NULL != pb);
+
+    uint8_t* data = pb->data_;
+    ssize_t  amt  = pb->length_;
+
+    // verify that this packet meets the minimum length of a command packet
+    if (amt < 20) {
+        return;
+    }
+
+    uint8_t trtp_version =  data[12];
+    uint8_t trtp_flags   =  data[13]       & 0xF;
+
+    if (1 != trtp_version) {
+        ALOGV("Dropping packet, bad trtp version %hhu", trtp_version);
+        return;
+    }
+
+    // calculate the start of the command payload
+    ssize_t offset = 18;
+    if (trtp_flags & 0x01) {
+        // timestamp is present (4 bytes)
+        offset += 4;
+    }
+    if (trtp_flags & 0x02) {
+        // transform is present (24 bytes)
+        offset += 24;
+    }
+
+    // the packet must contain 2 bytes of command payload beyond the TRTP header
+    if (amt < offset + 2) {
+        return;
+    }
+
+    uint16_t command_id = U16_AT(data + offset);
+
+    switch (command_id) {
+        case TRTPControlPacket::kCommandNop:
+            break;
+
+        case TRTPControlPacket::kCommandEOS:
+        case TRTPControlPacket::kCommandFlush: {
+            uint16_t program_id = (U32_AT(data + 8) >> 5) & 0x1F;
+            ALOGI("*** %s flushing program_id=%d",
+                  __PRETTY_FUNCTION__, program_id);
+
+            Vector<uint32_t> substreams_to_remove;
+            for (size_t i = 0; i < substreams_.size(); ++i) {
+                sp<Substream> iter = substreams_.valueAt(i);
+                if (iter->getProgramID() == program_id) {
+                    iter->shutdown();
+                    substreams_to_remove.add(iter->getSSRC());
+                }
+            }
+
+            for (size_t i = 0; i < substreams_to_remove.size(); ++i) {
+                substreams_.removeItem(substreams_to_remove[i]);
+            }
+        } break;
+    }
+}
+
+bool AAH_RXPlayer::processGaps() {
+    // Deal with the current gap situation.  Specifically...
+    //
+    // 1) If a new gap has shown up, send a retransmit request to the
+    //    transmitter.
+    // 2) If a gap we were working on has had a packet in the middle or at
+    //    the end filled in, send another retransmit request for the begining
+    //    portion of the gap.  TRTP was designed for LANs where packet
+    //    re-ordering is very unlikely; so see the middle or end of a gap
+    //    filled in before the begining is an almost certain indication that
+    //    a retransmission packet was also dropped.
+    // 3) If we have been working on a gap for a while and it still has not
+    //    been filled in, send another retransmit request.
+    // 4) If the are no more gaps in the ring, clear the current_gap_status_
+    //    flag to indicate that all is well again.
+
+    // Start by fetching the active gap status.
+    SeqNoGap gap;
+    bool send_retransmit_request = false;
+    bool ret_val = false;
+    GapStatus gap_status;
+    if (kGS_NoGap != (gap_status = ring_buffer_.fetchCurrentGap(&gap))) {
+        // Note: checking for a change in the end sequence number should cover
+        // moving on to an entirely new gap for case #1 as well as resending the
+        // begining of a gap range for case #2.
+        send_retransmit_request = (kGS_NoGap == current_gap_status_) ||
+                                  (current_gap_.end_seq_ != gap.end_seq_);
+
+        // If this is the same gap we have been working on, and it has timed
+        // out, then check to see if our substreams are about to underflow.  If
+        // so, instead of sending another retransmit request, just give up on
+        // this gap and move on.
+        if (!send_retransmit_request &&
+           (kGS_NoGap != current_gap_status_) &&
+           (0 == computeNextGapRetransmitTimeout())) {
+
+            // If out current gap is the fast-start gap, don't bother to skip it
+            // because substreams look like the are about to underflow.
+            if ((kGS_FastStartGap != gap_status) ||
+                (current_gap_.end_seq_ != gap.end_seq_)) {
+                for (size_t i = 0; i < substreams_.size(); ++i) {
+                    if (substreams_.valueAt(i)->isAboutToUnderflow()) {
+                        ALOGV("About to underflow, giving up on gap [%hu, %hu]",
+                              gap.start_seq_, gap.end_seq_);
+                        ring_buffer_.processNAK();
+                        current_gap_status_ = kGS_NoGap;
+                        return true;
+                    }
+                }
+            }
+
+            // Looks like no one is about to underflow.  Just go ahead and send
+            // the request.
+            send_retransmit_request = true;
+        }
+    } else {
+        current_gap_status_ = kGS_NoGap;
+    }
+
+    if (send_retransmit_request) {
+        // If we have been working on a fast start, and it is still not filled
+        // in, even after the extended retransmit time out, give up and skip it.
+        // The system should fall back into its normal slow-start behavior.
+        if ((kGS_FastStartGap == current_gap_status_) &&
+            (current_gap_.end_seq_ == gap.end_seq_)) {
+            ALOGV("Fast start is taking forever; giving up.");
+            ring_buffer_.processNAK();
+            current_gap_status_ = kGS_NoGap;
+            return true;
+        }
+
+        // Send the request.
+        RetransRequest req;
+        uint32_t magic  = (kGS_FastStartGap == gap_status)
+                        ? kFastStartRequestMagic
+                        : kRetransRequestMagic;
+        req.magic_      = htonl(magic);
+        req.mcast_ip_   = listen_addr_.sin_addr.s_addr;
+        req.mcast_port_ = listen_addr_.sin_port;
+        req.start_seq_  = htons(gap.start_seq_);
+        req.end_seq_    = htons(gap.end_seq_);
+
+        {
+            uint32_t a = ntohl(transmitter_addr_.sin_addr.s_addr);
+            uint16_t p = ntohs(transmitter_addr_.sin_port);
+            ALOGV("Sending to transmitter %u.%u.%u.%u:%hu",
+                    ((a >> 24) & 0xFF),
+                    ((a >> 16) & 0xFF),
+                    ((a >>  8) & 0xFF),
+                    ( a        & 0xFF),
+                    p);
+        }
+
+        int res = sendto(sock_fd_, &req, sizeof(req), 0,
+                         reinterpret_cast<struct sockaddr*>(&transmitter_addr_),
+                         sizeof(transmitter_addr_));
+        if (res < 0) {
+            ALOGE("Error when sending retransmit request (%d)", errno);
+        } else {
+            ALOGV("%s request for range [%hu, %hu] sent",
+                  (kGS_FastStartGap == gap_status) ? "Fast Start" : "Retransmit",
+                  gap.start_seq_, gap.end_seq_);
+        }
+
+        // Update the current gap info.
+        current_gap_ = gap;
+        current_gap_status_ = gap_status;
+        next_retrans_req_time_ = monotonicUSecNow() +
+                               ((kGS_FastStartGap == current_gap_status_)
+                                ? kFastStartTimeoutUSec
+                                : kGapRerequestTimeoutUSec);
+    }
+
+    return false;
+}
+
+// Compute when its time to send the next gap retransmission in milliseconds.
+// Returns < 0 for an infinite timeout (no gap) and 0 if its time to retransmit
+// right now.
+int AAH_RXPlayer::computeNextGapRetransmitTimeout() {
+    if (kGS_NoGap == current_gap_status_) {
+        return -1;
+    }
+
+    int64_t timeout_delta = next_retrans_req_time_ - monotonicUSecNow();
+
+    timeout_delta /= 1000;
+    if (timeout_delta <= 0) {
+        return 0;
+    }
+
+    return static_cast<uint32_t>(timeout_delta);
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_rx_player_ring_buffer.cpp b/media/libaah_rtp/aah_rx_player_ring_buffer.cpp
new file mode 100644
index 0000000..0d8b31f
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player_ring_buffer.cpp
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "aah_rx_player.h"
+
+namespace android {
+
+AAH_RXPlayer::RXRingBuffer::RXRingBuffer(uint32_t capacity) {
+    capacity_ = capacity;
+    rd_ = wr_ = 0;
+    ring_ = new PacketBuffer*[capacity];
+    memset(ring_, 0, sizeof(PacketBuffer*) * capacity);
+    reset();
+}
+
+AAH_RXPlayer::RXRingBuffer::~RXRingBuffer() {
+    reset();
+    delete[] ring_;
+}
+
+void AAH_RXPlayer::RXRingBuffer::reset() {
+    AutoMutex lock(&lock_);
+
+    if (NULL != ring_) {
+        while (rd_ != wr_) {
+            CHECK(rd_ < capacity_);
+            if (NULL != ring_[rd_]) {
+                PacketBuffer::destroy(ring_[rd_]);
+                ring_[rd_] = NULL;
+            }
+            rd_ = (rd_ + 1) % capacity_;
+        }
+    }
+
+    rd_ = wr_ = 0;
+    rd_seq_known_ = false;
+    waiting_for_fast_start_ = true;
+    fetched_first_packet_ = false;
+    rtp_activity_timeout_valid_ = false;
+}
+
+bool AAH_RXPlayer::RXRingBuffer::pushBuffer(PacketBuffer* buf,
+                                                uint16_t seq) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+    CHECK(NULL != buf);
+
+    rtp_activity_timeout_valid_ = true;
+    rtp_activity_timeout_ = monotonicUSecNow() + kRTPActivityTimeoutUSec;
+
+    // If the ring buffer is totally reset (we have never received a single
+    // payload) then we don't know the rd sequence number and this should be
+    // simple.  We just store the payload, advance the wr pointer and record the
+    // initial sequence number.
+    if (!rd_seq_known_) {
+        CHECK(rd_ == wr_);
+        CHECK(NULL == ring_[wr_]);
+        CHECK(wr_ < capacity_);
+
+        ring_[wr_] = buf;
+        wr_ = (wr_ + 1) % capacity_;
+        rd_seq_ = seq;
+        rd_seq_known_ = true;
+        return true;
+    }
+
+    // Compute the seqence number of this payload and of the write pointer,
+    // normalized around the read pointer.  IOW - transform the payload seq no
+    // and the wr pointer seq no into a space where the rd pointer seq no is
+    // zero.  This will define 4 cases we can consider...
+    //
+    // 1) norm_seq == norm_wr_seq
+    //    This payload is contiguous with the last.  All is good.
+    //
+    // 2)  ((norm_seq <  norm_wr_seq) && (norm_seq >= norm_rd_seq)
+    // aka ((norm_seq <  norm_wr_seq) && (norm_seq >= 0)
+    //    This payload is in the past, in the unprocessed region of the ring
+    //    buffer.  It is probably a retransmit intended to fill in a dropped
+    //    payload; it may be a duplicate.
+    //
+    // 3) ((norm_seq - norm_wr_seq) & 0x8000) != 0
+    //    This payload is in the past compared to the write pointer (or so very
+    //    far in the future that it has wrapped the seq no space), but not in
+    //    the unprocessed region of the ring buffer.  This could be a duplicate
+    //    retransmit; we just drop these payloads unless we are waiting for our
+    //    first fast start packet.  If we are waiting for fast start, than this
+    //    packet is probably the first packet of the fast start retransmission.
+    //    If it will fit in the buffer, back up the read pointer to its position
+    //    and clear the fast start flag, otherwise just drop it.
+    //
+    // 4) ((norm_seq - norm_wr_seq) & 0x8000) == 0
+    //    This payload which is ahead of the next write pointer.  This indicates
+    //    that we have missed some payloads and need to request a retransmit.
+    //    If norm_seq >= (capacity - 1), then the gap is so large that it would
+    //    overflow the ring buffer and we should probably start to panic.
+
+    uint16_t norm_wr_seq = ((wr_ + capacity_ - rd_) % capacity_);
+    uint16_t norm_seq    = seq - rd_seq_;
+
+    // Check for overflow first.
+    if ((!(norm_seq & 0x8000)) && (norm_seq >= (capacity_ - 1))) {
+        ALOGW("Ring buffer overflow; cap = %u, [rd, wr] = [%hu, %hu], seq = %hu",
+              capacity_, rd_seq_, norm_wr_seq + rd_seq_, seq);
+        PacketBuffer::destroy(buf);
+        return false;
+    }
+
+    // Check for case #1
+    if (norm_seq == norm_wr_seq) {
+        CHECK(wr_ < capacity_);
+        CHECK(NULL == ring_[wr_]);
+
+        ring_[wr_] = buf;
+        wr_ = (wr_ + 1) % capacity_;
+
+        CHECK(wr_ != rd_);
+        return true;
+    }
+
+    // Check case #2
+    uint32_t ring_pos = (rd_ + norm_seq) % capacity_;
+    if ((norm_seq < norm_wr_seq) && (!(norm_seq & 0x8000))) {
+        // Do we already have a payload for this slot?  If so, then this looks
+        // like a duplicate retransmit.  Just ignore it.
+        if (NULL != ring_[ring_pos]) {
+            ALOGD("RXed duplicate retransmit, seq = %hu", seq);
+            PacketBuffer::destroy(buf);
+        } else {
+            // Looks like we were missing this payload.  Go ahead and store it.
+            ring_[ring_pos] = buf;
+        }
+
+        return true;
+    }
+
+    // Check case #3
+    if ((norm_seq - norm_wr_seq) & 0x8000) {
+        if (!waiting_for_fast_start_) {
+            ALOGD("RXed duplicate retransmit from before rd pointer, seq = %hu",
+                  seq);
+            PacketBuffer::destroy(buf);
+        } else {
+            // Looks like a fast start fill-in.  Go ahead and store it, assuming
+            // that we can fit it in the buffer.
+            uint32_t implied_ring_size = static_cast<uint32_t>(norm_wr_seq)
+                                       + (rd_seq_ - seq);
+
+            if (implied_ring_size >= (capacity_ - 1)) {
+                ALOGD("RXed what looks like a fast start packet (seq = %hu),"
+                      " but packet is too far in the past to fit into the ring"
+                      "  buffer.  Dropping.", seq);
+                PacketBuffer::destroy(buf);
+            } else {
+                ring_pos = (rd_ + capacity_ + seq - rd_seq_) % capacity_;
+                rd_seq_ = seq;
+                rd_ = ring_pos;
+                waiting_for_fast_start_ = false;
+
+                CHECK(ring_pos < capacity_);
+                CHECK(NULL == ring_[ring_pos]);
+                ring_[ring_pos] = buf;
+            }
+
+        }
+        return true;
+    }
+
+    // Must be in case #4 with no overflow.  This packet fits in the current
+    // ring buffer, but is discontiuguous.  Advance the write pointer leaving a
+    // gap behind.
+    uint32_t gap_len = (ring_pos + capacity_ - wr_) % capacity_;
+    ALOGD("Drop detected; %u packets, seq_range [%hu, %hu]",
+          gap_len,
+          rd_seq_ + norm_wr_seq,
+          rd_seq_ + norm_wr_seq + gap_len - 1);
+
+    CHECK(NULL == ring_[ring_pos]);
+    ring_[ring_pos] = buf;
+    wr_ = (ring_pos + 1) % capacity_;
+    CHECK(wr_ != rd_);
+
+    return true;
+}
+
+AAH_RXPlayer::PacketBuffer*
+AAH_RXPlayer::RXRingBuffer::fetchBuffer(bool* is_discon) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+    CHECK(NULL != is_discon);
+
+    // If the read seqence number is not known, then this ring buffer has not
+    // received a packet since being reset and there cannot be any packets to
+    // return.  If we are still waiting for the first fast start packet to show
+    // up, we don't want to let any buffer be consumed yet because we expect to
+    // see a packet before the initial read sequence number show up shortly.
+    if (!rd_seq_known_ || waiting_for_fast_start_) {
+        *is_discon = false;
+        return NULL;
+    }
+
+    PacketBuffer* ret = NULL;
+    *is_discon = !fetched_first_packet_;
+
+    while ((rd_ != wr_) && (NULL == ret)) {
+        CHECK(rd_ < capacity_);
+
+        // If we hit a gap, stall and do not advance the read pointer.  Let the
+        // higher level code deal with requesting retries and/or deciding to
+        // skip the current gap.
+        ret = ring_[rd_];
+        if (NULL == ret) {
+            break;
+        }
+
+        ring_[rd_] = NULL;
+        rd_ = (rd_ + 1) % capacity_;
+        ++rd_seq_;
+    }
+
+    if (NULL != ret) {
+        fetched_first_packet_ = true;
+    }
+
+    return ret;
+}
+
+AAH_RXPlayer::GapStatus
+AAH_RXPlayer::RXRingBuffer::fetchCurrentGap(SeqNoGap* gap) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+    CHECK(NULL != gap);
+
+    // If the read seqence number is not known, then this ring buffer has not
+    // received a packet since being reset and there cannot be any gaps.
+    if (!rd_seq_known_) {
+        return kGS_NoGap;
+    }
+
+    // If we are waiting for fast start, then the current gap is a fast start
+    // gap and it includes all packets before the read sequence number.
+    if (waiting_for_fast_start_) {
+        gap->start_seq_ =
+        gap->end_seq_   = rd_seq_ - 1;
+        return kGS_FastStartGap;
+    }
+
+    // If rd == wr, then the buffer is empty and there cannot be any gaps.
+    if (rd_ == wr_) {
+        return kGS_NoGap;
+    }
+
+    // If rd_ is currently pointing at an unprocessed packet, then there is no
+    // current gap.
+    CHECK(rd_ < capacity_);
+    if (NULL != ring_[rd_]) {
+        return kGS_NoGap;
+    }
+
+    // Looks like there must be a gap here.  The start of the gap is the current
+    // rd sequence number, all we need to do now is determine its length in
+    // order to compute the end sequence number.
+    gap->start_seq_ = rd_seq_;
+    uint16_t end = rd_seq_;
+    uint32_t tmp = (rd_ + 1) % capacity_;
+    while ((tmp != wr_) && (NULL == ring_[tmp])) {
+        ++end;
+        tmp = (tmp + 1) % capacity_;
+    }
+    gap->end_seq_ = end;
+
+    return kGS_NormalGap;
+}
+
+void AAH_RXPlayer::RXRingBuffer::processNAK(const SeqNoGap* nak) {
+    AutoMutex lock(&lock_);
+    CHECK(NULL != ring_);
+
+    // If we were waiting for our first fast start fill-in packet, and we
+    // received a NAK, then apparantly we are not getting our fast start.  Just
+    // clear the waiting flag and go back to normal behavior.
+    if (waiting_for_fast_start_) {
+        waiting_for_fast_start_ = false;
+    }
+
+    // If we have not received a packet since last reset, or there is no data in
+    // the ring, then there is nothing to skip.
+    if ((!rd_seq_known_) || (rd_ == wr_)) {
+        return;
+    }
+
+    // If rd_ is currently pointing at an unprocessed packet, then there is no
+    // gap to skip.
+    CHECK(rd_ < capacity_);
+    if (NULL != ring_[rd_]) {
+        return;
+    }
+
+    // Looks like there must be a gap here.  Advance rd until we have passed
+    // over the portion of it indicated by nak (or all of the gap if nak is
+    // NULL).  Then reset fetched_first_packet_ so that the next read will show
+    // up as being discontiguous.
+    uint16_t seq_after_gap = (NULL == nak) ? 0 : nak->end_seq_ + 1;
+    while ((rd_ != wr_) &&
+           (NULL == ring_[rd_]) &&
+          ((NULL == nak) || (seq_after_gap != rd_seq_))) {
+        rd_ = (rd_ + 1) % capacity_;
+        ++rd_seq_;
+    }
+    fetched_first_packet_ = false;
+}
+
+int AAH_RXPlayer::RXRingBuffer::computeInactivityTimeout() {
+    AutoMutex lock(&lock_);
+
+    if (!rtp_activity_timeout_valid_) {
+        return -1;
+    }
+
+    uint64_t now = monotonicUSecNow();
+    if (rtp_activity_timeout_ <= now) {
+        return 0;
+    }
+
+    return (rtp_activity_timeout_ - now) / 1000;
+}
+
+AAH_RXPlayer::PacketBuffer*
+AAH_RXPlayer::PacketBuffer::allocate(ssize_t length) {
+    if (length <= 0) {
+        return NULL;
+    }
+
+    uint32_t alloc_len = sizeof(PacketBuffer) + length;
+    PacketBuffer* ret = reinterpret_cast<PacketBuffer*>(
+                        new uint8_t[alloc_len]);
+
+    if (NULL != ret) {
+        ret->length_ = length;
+    }
+
+    return ret;
+}
+
+void AAH_RXPlayer::PacketBuffer::destroy(PacketBuffer* pb) {
+    uint8_t* kill_me = reinterpret_cast<uint8_t*>(pb);
+    delete[] kill_me;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_rx_player_substream.cpp b/media/libaah_rtp/aah_rx_player_substream.cpp
new file mode 100644
index 0000000..1e4c784
--- /dev/null
+++ b/media/libaah_rtp/aah_rx_player_substream.cpp
@@ -0,0 +1,498 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include <include/avc_utils.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
+
+#include "aah_rx_player.h"
+
+namespace android {
+
+int64_t AAH_RXPlayer::Substream::kAboutToUnderflowThreshold =
+    50ull * 1000;
+
+AAH_RXPlayer::Substream::Substream(uint32_t ssrc, OMXClient& omx) {
+    ssrc_ = ssrc;
+    substream_details_known_ = false;
+    buffer_in_progress_ = NULL;
+    status_ = OK;
+
+    decoder_ = new AAH_DecoderPump(omx);
+    if (decoder_ == NULL) {
+        ALOGE("%s failed to allocate decoder pump!", __PRETTY_FUNCTION__);
+    }
+    if (OK != decoder_->initCheck()) {
+        ALOGE("%s failed to initialize decoder pump!", __PRETTY_FUNCTION__);
+    }
+
+    // cleanupBufferInProgress will reset most of the internal state variables.
+    // Just need to make sure that buffer_in_progress_ is NULL before calling.
+    cleanupBufferInProgress();
+}
+
+
+void AAH_RXPlayer::Substream::shutdown() {
+    substream_meta_ = NULL;
+    status_ = OK;
+    cleanupBufferInProgress();
+    cleanupDecoder();
+}
+
+void AAH_RXPlayer::Substream::cleanupBufferInProgress() {
+    if (NULL != buffer_in_progress_) {
+        buffer_in_progress_->release();
+        buffer_in_progress_ = NULL;
+    }
+
+    expected_buffer_size_ = 0;
+    buffer_filled_ = 0;
+    waiting_for_rap_ = true;
+}
+
+void AAH_RXPlayer::Substream::cleanupDecoder() {
+    if (decoder_ != NULL) {
+        decoder_->shutdown();
+    }
+}
+
+bool AAH_RXPlayer::Substream::shouldAbort(const char* log_tag) {
+    // If we have already encountered a fatal error, do nothing.  We are just
+    // waiting for our owner to shut us down now.
+    if (OK != status_) {
+        ALOGV("Skipping %s, substream has encountered fatal error (%d).",
+                log_tag, status_);
+        return true;
+    }
+
+    return false;
+}
+
+void AAH_RXPlayer::Substream::processPayloadStart(uint8_t* buf,
+                                                  uint32_t amt,
+                                                  int32_t ts_lower) {
+    uint32_t min_length = 6;
+
+    if (shouldAbort(__PRETTY_FUNCTION__)) {
+        return;
+    }
+
+    // Do we have a buffer in progress already?  If so, abort the buffer.  In
+    // theory, this should never happen.  If there were a discontinutity in the
+    // stream, the discon in the seq_nos at the RTP level should have already
+    // triggered a cleanup of the buffer in progress.  To see a problem at this
+    // level is an indication either of a bug in the transmitter, or some form
+    // of terrible corruption/tampering on the wire.
+    if (NULL != buffer_in_progress_) {
+        ALOGE("processPayloadStart is aborting payload already in progress.");
+        cleanupBufferInProgress();
+    }
+
+    // Parse enough of the header to know where we stand.  Since this is a
+    // payload start, it should begin with a TRTP header which has to be at
+    // least 6 bytes long.
+    if (amt < min_length) {
+        ALOGV("Discarding payload too short to contain TRTP header (len = %u)",
+                amt);
+        return;
+    }
+
+    // Check the TRTP version number.
+    if (0x01 != buf[0]) {
+        ALOGV("Unexpected TRTP version (%u) in header.  Expected %u.",
+                buf[0], 1);
+        return;
+    }
+
+    // Extract the substream type field and make sure its one we understand (and
+    // one that does not conflict with any previously received substream type.
+    uint8_t header_type = (buf[1] >> 4) & 0xF;
+    switch (header_type) {
+        case 0x01:
+            // Audio, yay!  Just break.  We understand audio payloads.
+            break;
+        case 0x02:
+            ALOGV("RXed packet with unhandled TRTP header type (Video).");
+            return;
+        case 0x03:
+            ALOGV("RXed packet with unhandled TRTP header type (Subpicture).");
+            return;
+        case 0x04:
+            ALOGV("RXed packet with unhandled TRTP header type (Control).");
+            return;
+        default:
+            ALOGV("RXed packet with unhandled TRTP header type (%u).",
+                    header_type);
+            return;
+    }
+
+    if (substream_details_known_ && (header_type != substream_type_)) {
+        ALOGV("RXed TRTP Payload for SSRC=0x%08x where header type (%u) does not"
+              " match previously received header type (%u)",
+              ssrc_, header_type, substream_type_);
+        return;
+    }
+
+    // Check the flags to see if there is another 32 bits of timestamp present.
+    uint32_t trtp_header_len = 6;
+    bool ts_valid = buf[1] & 0x1;
+    if (ts_valid) {
+        min_length += 4;
+        trtp_header_len += 4;
+        if (amt < min_length) {
+            ALOGV("Discarding payload too short to contain TRTP timestamp"
+                  " (len = %u)", amt);
+            return;
+        }
+    }
+
+    // Extract the TRTP length field and sanity check it.
+    uint32_t trtp_len;
+    trtp_len = (static_cast<uint32_t>(buf[2]) << 24) |
+        (static_cast<uint32_t>(buf[3]) << 16) |
+        (static_cast<uint32_t>(buf[4]) <<  8) |
+        static_cast<uint32_t>(buf[5]);
+    if (trtp_len < min_length) {
+        ALOGV("TRTP length (%u) is too short to be valid.  Must be at least %u"
+              " bytes.", trtp_len, min_length);
+        return;
+    }
+
+    // Extract the rest of the timestamp field if valid.
+    int64_t ts = 0;
+    uint32_t parse_offset = 6;
+    if (ts_valid) {
+        ts = (static_cast<int64_t>(buf[parse_offset    ]) << 56) |
+            (static_cast<int64_t>(buf[parse_offset + 1]) << 48) |
+            (static_cast<int64_t>(buf[parse_offset + 2]) << 40) |
+            (static_cast<int64_t>(buf[parse_offset + 3]) << 32);
+        ts |= ts_lower;
+        parse_offset += 4;
+    }
+
+    // Check the flags to see if there is another 24 bytes of timestamp
+    // transformation present.
+    if (buf[1] & 0x2) {
+        min_length += 24;
+        parse_offset += 24;
+        trtp_header_len += 24;
+        if (amt < min_length) {
+            ALOGV("Discarding payload too short to contain TRTP timestamp"
+                  " transformation (len = %u)", amt);
+            return;
+        }
+    }
+
+    // TODO : break the parsing into individual parsers for the different
+    // payload types (audio, video, etc).
+    //
+    // At this point in time, we know that this is audio.  Go ahead and parse
+    // the basic header, check the codec type, and find the payload portion of
+    // the packet.
+    min_length += 3;
+    if (trtp_len < min_length) {
+        ALOGV("TRTP length (%u) is too short to be a valid audio payload.  Must"
+              " be at least %u bytes.", trtp_len, min_length);
+        return;
+    }
+
+    if (amt < min_length) {
+        ALOGV("TRTP porttion of RTP payload (%u bytes) too small to contain"
+              " entire TRTP header.  TRTP does not currently support fragmenting"
+              " TRTP headers across RTP payloads", amt);
+        return;
+    }
+
+    uint8_t codec_type = buf[parse_offset    ];
+    uint8_t flags      = buf[parse_offset + 1];
+    uint8_t volume     = buf[parse_offset + 2];
+    parse_offset += 3;
+    trtp_header_len += 3;
+
+    if (!setupSubstreamType(header_type, codec_type)) {
+        return;
+    }
+
+    if (decoder_ != NULL) {
+        decoder_->setRenderVolume(volume);
+    }
+
+    // TODO : move all of the constant flag and offset definitions for TRTP up
+    // into some sort of common header file.
+    if (waiting_for_rap_ && !(flags & 0x08)) {
+        ALOGV("Dropping non-RAP TRTP Audio Payload while waiting for RAP.");
+        return;
+    }
+
+    if (flags & 0x10) {
+        ALOGV("Dropping TRTP Audio Payload with aux codec data present (only"
+              " handle MP3 right now, and it has no aux data)");
+        return;
+    }
+
+    // OK - everything left is just payload.  Compute the payload size, start
+    // the buffer in progress and pack as much payload as we can into it.  If
+    // the payload is finished once we are done, go ahead and send the payload
+    // to the decoder.
+    expected_buffer_size_ = trtp_len - trtp_header_len;
+    if (!expected_buffer_size_) {
+        ALOGV("Dropping TRTP Audio Payload with 0 Access Unit length");
+        return;
+    }
+
+    CHECK(amt >= trtp_header_len);
+    uint32_t todo = amt - trtp_header_len;
+    if (expected_buffer_size_ < todo) {
+        ALOGV("Extra data (%u > %u) present in initial TRTP Audio Payload;"
+              " dropping payload.", todo, expected_buffer_size_);
+        return;
+    }
+
+    buffer_filled_ = 0;
+    buffer_in_progress_ = new MediaBuffer(expected_buffer_size_);
+    if ((NULL == buffer_in_progress_) ||
+            (NULL == buffer_in_progress_->data())) {
+        ALOGV("Failed to allocate MediaBuffer of length %u",
+                expected_buffer_size_);
+        cleanupBufferInProgress();
+        return;
+    }
+
+    sp<MetaData> meta = buffer_in_progress_->meta_data();
+    if (meta == NULL) {
+        ALOGV("Missing metadata structure in allocated MediaBuffer; dropping"
+              " payload");
+        cleanupBufferInProgress();
+        return;
+    }
+
+    // TODO : set this based on the codec type indicated in the TRTP stream.
+    // Right now, we only support MP3, so the choice is obvious.
+    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+    if (ts_valid) {
+        meta->setInt64(kKeyTime, ts);
+    }
+
+    if (amt > 0) {
+        uint8_t* tgt =
+            reinterpret_cast<uint8_t*>(buffer_in_progress_->data());
+        memcpy(tgt + buffer_filled_, buf + trtp_header_len, todo);
+        buffer_filled_ += amt;
+    }
+
+    if (buffer_filled_ >= expected_buffer_size_) {
+        processCompletedBuffer();
+    }
+}
+
+void AAH_RXPlayer::Substream::processPayloadCont(uint8_t* buf,
+                                                 uint32_t amt) {
+    if (shouldAbort(__PRETTY_FUNCTION__)) {
+        return;
+    }
+
+    if (NULL == buffer_in_progress_) {
+        ALOGV("TRTP Receiver skipping payload continuation; no buffer currently"
+              " in progress.");
+        return;
+    }
+
+    CHECK(buffer_filled_ < expected_buffer_size_);
+    uint32_t buffer_left = expected_buffer_size_ - buffer_filled_;
+    if (amt > buffer_left) {
+        ALOGV("Extra data (%u > %u) present in continued TRTP Audio Payload;"
+              " dropping payload.", amt, buffer_left);
+        cleanupBufferInProgress();
+        return;
+    }
+
+    if (amt > 0) {
+        uint8_t* tgt =
+            reinterpret_cast<uint8_t*>(buffer_in_progress_->data());
+        memcpy(tgt + buffer_filled_, buf, amt);
+        buffer_filled_ += amt;
+    }
+
+    if (buffer_filled_ >= expected_buffer_size_) {
+        processCompletedBuffer();
+    }
+}
+
+void AAH_RXPlayer::Substream::processCompletedBuffer() {
+    const uint8_t* buffer_data = NULL;
+    int sample_rate;
+    int channel_count;
+    size_t frame_size;
+    status_t res;
+
+    CHECK(NULL != buffer_in_progress_);
+
+    if (decoder_ == NULL) {
+        ALOGV("Dropping complete buffer, no decoder pump allocated");
+        goto bailout;
+    }
+
+    buffer_data = reinterpret_cast<const uint8_t*>(buffer_in_progress_->data());
+    if (buffer_in_progress_->size() < 4) {
+        ALOGV("MP3 payload too short to contain header, dropping payload.");
+        goto bailout;
+    }
+
+    // Extract the channel count and the sample rate from the MP3 header.  The
+    // stagefright MP3 requires that these be delivered before decoing can
+    // begin.
+    if (!GetMPEGAudioFrameSize(U32_AT(buffer_data),
+                               &frame_size,
+                               &sample_rate,
+                               &channel_count,
+                               NULL,
+                               NULL)) {
+        ALOGV("Failed to parse MP3 header in payload, droping payload.");
+        goto bailout;
+    }
+
+
+    // Make sure that our substream metadata is set up properly.  If there has
+    // been a format change, be sure to reset the underlying decoder.  In
+    // stagefright, it seems like the only way to do this is to destroy and
+    // recreate the decoder.
+    if (substream_meta_ == NULL) {
+        substream_meta_ = new MetaData();
+
+        if (substream_meta_ == NULL) {
+            ALOGE("Failed to allocate MetaData structure for substream");
+            goto bailout;
+        }
+
+        substream_meta_->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+        substream_meta_->setInt32  (kKeyChannelCount, channel_count);
+        substream_meta_->setInt32  (kKeySampleRate,   sample_rate);
+    } else {
+        int32_t prev_sample_rate;
+        int32_t prev_channel_count;
+        substream_meta_->findInt32(kKeySampleRate,   &prev_sample_rate);
+        substream_meta_->findInt32(kKeyChannelCount, &prev_channel_count);
+
+        if ((prev_channel_count != channel_count) ||
+            (prev_sample_rate   != sample_rate)) {
+            ALOGW("Format change detected, forcing decoder reset.");
+            cleanupDecoder();
+
+            substream_meta_->setInt32(kKeyChannelCount, channel_count);
+            substream_meta_->setInt32(kKeySampleRate,   sample_rate);
+        }
+    }
+
+    // If our decoder has not be set up, do so now.
+    res = decoder_->init(substream_meta_);
+    if (OK != res) {
+        ALOGE("Failed to init decoder (res = %d)", res);
+        cleanupDecoder();
+        substream_meta_ = NULL;
+        goto bailout;
+    }
+
+    // Queue the payload for decode.
+    res = decoder_->queueForDecode(buffer_in_progress_);
+
+    if (res != OK) {
+        ALOGD("Failed to queue payload for decode, resetting decoder pump!"
+              " (res = %d)", res);
+        status_ = res;
+        cleanupDecoder();
+        cleanupBufferInProgress();
+    }
+
+    // NULL out buffer_in_progress before calling the cleanup helper.
+    //
+    // MediaBuffers use something of a hybrid ref-counting pattern which prevent
+    // the AAH_DecoderPump's input queue from adding their own reference to the
+    // MediaBuffer.  MediaBuffers start life with a reference count of 0, as
+    // well as an observer which starts as NULL.  Before being given an
+    // observer, the ref count cannot be allowed to become non-zero as it will
+    // cause calls to release() to assert.  Basically, before a MediaBuffer has
+    // an observer, they behave like non-ref counted obects where release()
+    // serves the roll of delete.  After a MediaBuffer has an observer, they
+    // become more like ref counted objects where add ref and release can be
+    // used, and when the ref count hits zero, the MediaBuffer is handed off to
+    // the observer.
+    //
+    // Given all of this, when we give the buffer to the decoder pump to wait in
+    // the to-be-processed queue, the decoder cannot add a ref to the buffer as
+    // it would in a traditional ref counting system.  Instead it needs to
+    // "steal" the non-existent ref.  In the case of queue failure, we need to
+    // make certain to release this non-existent reference so that the buffer is
+    // cleaned up during the cleanupBufferInProgress helper.  In the case of a
+    // successful queue operation, we need to make certain that the
+    // cleanupBufferInProgress helper does not release the buffer since it needs
+    // to remain alive in the queue.  We acomplish this by NULLing out the
+    // buffer pointer before calling the cleanup helper.
+    buffer_in_progress_ = NULL;
+
+bailout:
+    cleanupBufferInProgress();
+}
+
+
+void AAH_RXPlayer::Substream::processTSTransform(const LinearTransform& trans) {
+    if (decoder_ != NULL) {
+        decoder_->setRenderTSTransform(trans);
+    }
+}
+
+bool AAH_RXPlayer::Substream::isAboutToUnderflow() {
+    if (decoder_ == NULL) {
+        return false;
+    }
+
+    return decoder_->isAboutToUnderflow(kAboutToUnderflowThreshold);
+}
+
+bool AAH_RXPlayer::Substream::setupSubstreamType(uint8_t substream_type,
+                                                 uint8_t codec_type) {
+    // Sanity check the codec type.  Right now we only support MP3.  Also check
+    // for conflicts with previously delivered codec types.
+    if (substream_details_known_ && (codec_type != codec_type_)) {
+        ALOGV("RXed TRTP Payload for SSRC=0x%08x where codec type (%u) does not"
+              " match previously received codec type (%u)",
+              ssrc_, codec_type, codec_type_);
+        return false;
+    }
+
+    if (codec_type != 0x03) {
+        ALOGV("RXed TRTP Audio Payload for SSRC=0x%08x with unsupported codec"
+              " type (%u)", ssrc_, codec_type);
+        return false;
+    }
+
+    if (!substream_details_known_) {
+        substream_type_ = substream_type;
+        codec_type_ = codec_type;
+        substream_details_known_ = true;
+    }
+
+    return true;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_packet.cpp b/media/libaah_rtp/aah_tx_packet.cpp
new file mode 100644
index 0000000..3f6e0e9
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_packet.cpp
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <utils/Log.h>
+
+#include <arpa/inet.h>
+#include <string.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+
+#include "aah_tx_packet.h"
+
+namespace android {
+
+const int TRTPPacket::kRTPHeaderLen;
+const uint32_t TRTPPacket::kTRTPEpochMask;
+
+TRTPPacket::~TRTPPacket() {
+    delete mPacket;
+}
+
+/*** TRTP packet properties ***/
+
+void TRTPPacket::setSeqNumber(uint16_t val) {
+    mSeqNumber = val;
+
+    if (mIsPacked) {
+        const int kTRTPSeqNumberOffset = 2;
+        uint16_t* buf = reinterpret_cast<uint16_t*>(
+            mPacket + kTRTPSeqNumberOffset);
+        *buf = htons(mSeqNumber);
+    }
+}
+
+uint16_t TRTPPacket::getSeqNumber() const {
+    return mSeqNumber;
+}
+
+void TRTPPacket::setPTS(int64_t val) {
+    CHECK(!mIsPacked);
+    mPTS = val;
+    mPTSValid = true;
+}
+
+int64_t TRTPPacket::getPTS() const {
+    return mPTS;
+}
+
+void TRTPPacket::setEpoch(uint32_t val) {
+    mEpoch = val;
+
+    if (mIsPacked) {
+        const int kTRTPEpochOffset = 8;
+        uint32_t* buf = reinterpret_cast<uint32_t*>(
+            mPacket + kTRTPEpochOffset);
+        uint32_t val = ntohl(*buf);
+        val &= ~(kTRTPEpochMask << kTRTPEpochShift);
+        val |= (mEpoch & kTRTPEpochMask) << kTRTPEpochShift;
+        *buf = htonl(val);
+    }
+}
+
+void TRTPPacket::setProgramID(uint16_t val) {
+    CHECK(!mIsPacked);
+    mProgramID = val;
+}
+
+void TRTPPacket::setSubstreamID(uint16_t val) {
+    CHECK(!mIsPacked);
+    mSubstreamID = val;
+}
+
+
+void TRTPPacket::setClockTransform(const LinearTransform& trans) {
+    CHECK(!mIsPacked);
+    mClockTranform = trans;
+    mClockTranformValid = true;
+}
+
+uint8_t* TRTPPacket::getPacket() const {
+    CHECK(mIsPacked);
+    return mPacket;
+}
+
+int TRTPPacket::getPacketLen() const {
+    CHECK(mIsPacked);
+    return mPacketLen;
+}
+
+void TRTPPacket::setExpireTime(nsecs_t val) {
+    CHECK(!mIsPacked);
+    mExpireTime = val;
+}
+
+nsecs_t TRTPPacket::getExpireTime() const {
+    return mExpireTime;
+}
+
+/*** TRTP audio packet properties ***/
+
+void TRTPAudioPacket::setCodecType(TRTPAudioCodecType val) {
+    CHECK(!mIsPacked);
+    mCodecType = val;
+}
+
+void TRTPAudioPacket::setRandomAccessPoint(bool val) {
+    CHECK(!mIsPacked);
+    mRandomAccessPoint = val;
+}
+
+void TRTPAudioPacket::setDropable(bool val) {
+    CHECK(!mIsPacked);
+    mDropable = val;
+}
+
+void TRTPAudioPacket::setDiscontinuity(bool val) {
+    CHECK(!mIsPacked);
+    mDiscontinuity = val;
+}
+
+void TRTPAudioPacket::setEndOfStream(bool val) {
+    CHECK(!mIsPacked);
+    mEndOfStream = val;
+}
+
+void TRTPAudioPacket::setVolume(uint8_t val) {
+    CHECK(!mIsPacked);
+    mVolume = val;
+}
+
+void TRTPAudioPacket::setAccessUnitData(void* data, int len) {
+    CHECK(!mIsPacked);
+    mAccessUnitData = data;
+    mAccessUnitLen = len;
+}
+
+/*** TRTP control packet properties ***/
+
+void TRTPControlPacket::setCommandID(TRTPCommandID val) {
+    CHECK(!mIsPacked);
+    mCommandID = val;
+}
+
+/*** TRTP packet serializers ***/
+
+void TRTPPacket::writeU8(uint8_t*& buf, uint8_t val) {
+    *buf = val;
+    buf++;
+}
+
+void TRTPPacket::writeU16(uint8_t*& buf, uint16_t val) {
+    *reinterpret_cast<uint16_t*>(buf) = htons(val);
+    buf += 2;
+}
+
+void TRTPPacket::writeU32(uint8_t*& buf, uint32_t val) {
+    *reinterpret_cast<uint32_t*>(buf) = htonl(val);
+    buf += 4;
+}
+
+void TRTPPacket::writeU64(uint8_t*& buf, uint64_t val) {
+    buf[0] = static_cast<uint8_t>(val >> 56);
+    buf[1] = static_cast<uint8_t>(val >> 48);
+    buf[2] = static_cast<uint8_t>(val >> 40);
+    buf[3] = static_cast<uint8_t>(val >> 32);
+    buf[4] = static_cast<uint8_t>(val >> 24);
+    buf[5] = static_cast<uint8_t>(val >> 16);
+    buf[6] = static_cast<uint8_t>(val >>  8);
+    buf[7] = static_cast<uint8_t>(val);
+    buf += 8;
+}
+
+void TRTPPacket::writeTRTPHeader(uint8_t*& buf,
+                                 bool isFirstFragment,
+                                 int totalPacketLen) {
+    // RTP header
+    writeU8(buf,
+            ((mVersion & 0x03) << 6) |
+            (static_cast<int>(mPadding) << 5) |
+            (static_cast<int>(mExtension) << 4) |
+            (mCsrcCount & 0x0F));
+    writeU8(buf,
+            (static_cast<int>(isFirstFragment) << 7) |
+            (mPayloadType & 0x7F));
+    writeU16(buf, mSeqNumber);
+    if (isFirstFragment && mPTSValid) {
+        writeU32(buf, mPTS & 0xFFFFFFFF);
+    } else {
+        writeU32(buf, 0);
+    }
+    writeU32(buf,
+            ((mEpoch & kTRTPEpochMask) << kTRTPEpochShift) |
+            ((mProgramID & 0x1F) << 5) |
+            (mSubstreamID & 0x1F));
+
+    // TRTP header
+    writeU8(buf, mTRTPVersion);
+    writeU8(buf,
+            ((mTRTPHeaderType & 0x0F) << 4) |
+            (mClockTranformValid ? 0x02 : 0x00) |
+            (mPTSValid ? 0x01 : 0x00));
+    writeU32(buf, totalPacketLen - kRTPHeaderLen);
+    if (mPTSValid) {
+        writeU32(buf, mPTS >> 32);
+    }
+
+    if (mClockTranformValid) {
+        writeU64(buf, mClockTranform.a_zero);
+        writeU32(buf, mClockTranform.a_to_b_numer);
+        writeU32(buf, mClockTranform.a_to_b_denom);
+        writeU64(buf, mClockTranform.b_zero);
+    }
+}
+
+bool TRTPAudioPacket::pack() {
+    if (mIsPacked) {
+        return false;
+    }
+
+    int packetLen = kRTPHeaderLen +
+                    mAccessUnitLen +
+                    TRTPHeaderLen();
+
+    // TODO : support multiple fragments
+    const int kMaxUDPPayloadLen = 65507;
+    if (packetLen > kMaxUDPPayloadLen) {
+        return false;
+    }
+
+    mPacket = new uint8_t[packetLen];
+    if (!mPacket) {
+        return false;
+    }
+
+    mPacketLen = packetLen;
+
+    uint8_t* cur = mPacket;
+
+    writeTRTPHeader(cur, true, packetLen);
+    writeU8(cur, mCodecType);
+    writeU8(cur,
+            (static_cast<int>(mRandomAccessPoint) << 3) |
+            (static_cast<int>(mDropable) << 2) |
+            (static_cast<int>(mDiscontinuity) << 1) |
+            (static_cast<int>(mEndOfStream)));
+    writeU8(cur, mVolume);
+
+    memcpy(cur, mAccessUnitData, mAccessUnitLen);
+
+    mIsPacked = true;
+    return true;
+}
+
+int TRTPPacket::TRTPHeaderLen() const {
+    // 6 bytes for version, payload type, flags and length.  An additional 4 if
+    // there are upper timestamp bits present and another 24 if there is a clock
+    // transformation present.
+    return 6 +
+           (mClockTranformValid ? 24 : 0) +
+           (mPTSValid ? 4 : 0);
+}
+
+int TRTPAudioPacket::TRTPHeaderLen() const {
+    // TRTPPacket::TRTPHeaderLen() for the base TRTPHeader.  3 bytes for audio's
+    // codec type, flags and volume field.  Another 5 bytes if the codec type is
+    // PCM and we are sending sample rate/channel count. as well as however long
+    // the aux data (if present) is.
+
+    int pcmParamLength;
+    switch(mCodecType) {
+        case kCodecPCMBigEndian:
+        case kCodecPCMLittleEndian:
+            pcmParamLength = 5;
+            break;
+
+        default:
+            pcmParamLength = 0;
+            break;
+    }
+
+
+    // TODO : properly compute aux data length.  Currently, nothing
+    // uses aux data, so its length is always 0.
+    int auxDataLength = 0;
+    return TRTPPacket::TRTPHeaderLen() +
+           3 +
+           auxDataLength +
+           pcmParamLength;
+}
+
+bool TRTPControlPacket::pack() {
+    if (mIsPacked) {
+        return false;
+    }
+
+    // command packets contain a 2-byte command ID
+    int packetLen = kRTPHeaderLen +
+                    TRTPHeaderLen() +
+                    2;
+
+    mPacket = new uint8_t[packetLen];
+    if (!mPacket) {
+        return false;
+    }
+
+    mPacketLen = packetLen;
+
+    uint8_t* cur = mPacket;
+
+    writeTRTPHeader(cur, true, packetLen);
+    writeU16(cur, mCommandID);
+
+    mIsPacked = true;
+    return true;
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_packet.h b/media/libaah_rtp/aah_tx_packet.h
new file mode 100644
index 0000000..833803e
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_packet.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_TX_PACKET_H__
+#define __AAH_TX_PACKET_H__
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/LinearTransform.h>
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+
+namespace android {
+
+class TRTPPacket : public RefBase {
+  protected:
+    enum TRTPHeaderType {
+        kHeaderTypeAudio = 1,
+        kHeaderTypeVideo = 2,
+        kHeaderTypeSubpicture = 3,
+        kHeaderTypeControl = 4,
+    };
+
+    TRTPPacket(TRTPHeaderType headerType)
+        : mIsPacked(false)
+        , mVersion(2)
+        , mPadding(false)
+        , mExtension(false)
+        , mCsrcCount(0)
+        , mPayloadType(100)
+        , mSeqNumber(0)
+        , mPTSValid(false)
+        , mPTS(0)
+        , mEpoch(0)
+        , mProgramID(0)
+        , mSubstreamID(0)
+        , mClockTranformValid(false)
+        , mTRTPVersion(1)
+        , mTRTPLength(0)
+        , mTRTPHeaderType(headerType)
+        , mPacket(NULL)
+        , mPacketLen(0) { }
+
+  public:
+    virtual ~TRTPPacket();
+
+    void setSeqNumber(uint16_t val);
+    uint16_t getSeqNumber() const;
+
+    void setPTS(int64_t val);
+    int64_t getPTS() const;
+
+    void setEpoch(uint32_t val);
+    void setProgramID(uint16_t val);
+    void setSubstreamID(uint16_t val);
+    void setClockTransform(const LinearTransform& trans);
+
+    uint8_t* getPacket() const;
+    int getPacketLen() const;
+
+    void setExpireTime(nsecs_t val);
+    nsecs_t getExpireTime() const;
+
+    virtual bool pack() = 0;
+
+    // mask for the number of bits in a TRTP epoch
+    static const uint32_t kTRTPEpochMask = (1 << 22) - 1;
+    static const int kTRTPEpochShift = 10;
+
+  protected:
+    static const int kRTPHeaderLen = 12;
+    virtual int TRTPHeaderLen() const;
+
+    void writeTRTPHeader(uint8_t*& buf,
+                         bool isFirstFragment,
+                         int totalPacketLen);
+
+    void writeU8(uint8_t*& buf, uint8_t val);
+    void writeU16(uint8_t*& buf, uint16_t val);
+    void writeU32(uint8_t*& buf, uint32_t val);
+    void writeU64(uint8_t*& buf, uint64_t val);
+
+    bool mIsPacked;
+
+    uint8_t mVersion;
+    bool mPadding;
+    bool mExtension;
+    uint8_t mCsrcCount;
+    uint8_t mPayloadType;
+    uint16_t mSeqNumber;
+    bool mPTSValid;
+    int64_t  mPTS;
+    uint32_t mEpoch;
+    uint16_t mProgramID;
+    uint16_t mSubstreamID;
+    LinearTransform mClockTranform;
+    bool mClockTranformValid;
+    uint8_t mTRTPVersion;
+    uint32_t mTRTPLength;
+    TRTPHeaderType mTRTPHeaderType;
+
+    uint8_t* mPacket;
+    int mPacketLen;
+
+    nsecs_t mExpireTime;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TRTPPacket);
+};
+
+class TRTPAudioPacket : public TRTPPacket {
+  public:
+    TRTPAudioPacket()
+        : TRTPPacket(kHeaderTypeAudio)
+        , mCodecType(kCodecInvalid)
+        , mRandomAccessPoint(false)
+        , mDropable(false)
+        , mDiscontinuity(false)
+        , mEndOfStream(false)
+        , mVolume(0)
+        , mAccessUnitData(NULL) { }
+
+    enum TRTPAudioCodecType {
+        kCodecInvalid = 0,
+        kCodecPCMBigEndian = 1,
+        kCodecPCMLittleEndian = 2,
+        kCodecMPEG1Audio = 3,
+    };
+
+    void setCodecType(TRTPAudioCodecType val);
+    void setRandomAccessPoint(bool val);
+    void setDropable(bool val);
+    void setDiscontinuity(bool val);
+    void setEndOfStream(bool val);
+    void setVolume(uint8_t val);
+    void setAccessUnitData(void* data, int len);
+
+    virtual bool pack();
+
+  protected:
+    virtual int TRTPHeaderLen() const;
+
+  private:
+    TRTPAudioCodecType mCodecType;
+    bool mRandomAccessPoint;
+    bool mDropable;
+    bool mDiscontinuity;
+    bool mEndOfStream;
+    uint8_t mVolume;
+    void* mAccessUnitData;
+    int mAccessUnitLen;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TRTPAudioPacket);
+};
+
+class TRTPControlPacket : public TRTPPacket {
+  public:
+    TRTPControlPacket()
+        : TRTPPacket(kHeaderTypeControl)
+        , mCommandID(kCommandNop) {}
+
+    enum TRTPCommandID {
+        kCommandNop   = 1,
+        kCommandFlush = 2,
+        kCommandEOS   = 3,
+    };
+
+    void setCommandID(TRTPCommandID val);
+
+    virtual bool pack();
+
+  private:
+    TRTPCommandID mCommandID;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TRTPControlPacket);
+};
+
+}  // namespace android
+
+#endif  // __AAH_TX_PLAYER_H__
diff --git a/media/libaah_rtp/aah_tx_player.cpp b/media/libaah_rtp/aah_tx_player.cpp
new file mode 100644
index 0000000..a79a989
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_player.cpp
@@ -0,0 +1,1139 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <utils/Log.h>
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+#include <netdb.h>
+#include <netinet/ip.h>
+
+#include <common_time/cc_helper.h>
+#include <media/IMediaPlayer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MetaData.h>
+#include <utils/Timers.h>
+
+#include "aah_tx_packet.h"
+#include "aah_tx_player.h"
+
+namespace android {
+
+static int64_t kLowWaterMarkUs = 2000000ll;  // 2secs
+static int64_t kHighWaterMarkUs = 10000000ll;  // 10secs
+static const size_t kLowWaterMarkBytes = 40000;
+static const size_t kHighWaterMarkBytes = 200000;
+
+// When we start up, how much lead time should we put on the first access unit?
+static const int64_t kAAHStartupLeadTimeUs = 300000LL;
+
+// How much time do we attempt to lead the clock by in steady state?
+static const int64_t kAAHBufferTimeUs = 1000000LL;
+
+// how long do we keep data in our retransmit buffer after sending it.
+const int64_t AAH_TXPlayer::kAAHRetryKeepAroundTimeNs =
+    kAAHBufferTimeUs * 1100;
+
+sp<MediaPlayerBase> createAAH_TXPlayer() {
+    sp<MediaPlayerBase> ret = new AAH_TXPlayer();
+    return ret;
+}
+
+template <typename T> static T clamp(T val, T min, T max) {
+    if (val < min) {
+        return min;
+    } else if (val > max) {
+        return max;
+    } else {
+        return val;
+    }
+}
+
+struct AAH_TXEvent : public TimedEventQueue::Event {
+    AAH_TXEvent(AAH_TXPlayer *player,
+                void (AAH_TXPlayer::*method)()) : mPlayer(player)
+                                                , mMethod(method) {}
+
+  protected:
+    virtual ~AAH_TXEvent() {}
+
+    virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+        (mPlayer->*mMethod)();
+    }
+
+  private:
+    AAH_TXPlayer *mPlayer;
+    void (AAH_TXPlayer::*mMethod)();
+
+    AAH_TXEvent(const AAH_TXEvent &);
+    AAH_TXEvent& operator=(const AAH_TXEvent &);
+};
+
+AAH_TXPlayer::AAH_TXPlayer()
+        : mQueueStarted(false)
+        , mFlags(0)
+        , mExtractorFlags(0) {
+    DataSource::RegisterDefaultSniffers();
+
+    mBufferingEvent = new AAH_TXEvent(this, &AAH_TXPlayer::onBufferingUpdate);
+    mBufferingEventPending = false;
+
+    mPumpAudioEvent = new AAH_TXEvent(this, &AAH_TXPlayer::onPumpAudio);
+    mPumpAudioEventPending = false;
+
+    reset_l();
+}
+
+AAH_TXPlayer::~AAH_TXPlayer() {
+    if (mQueueStarted) {
+        mQueue.stop();
+    }
+
+    reset_l();
+}
+
+void AAH_TXPlayer::cancelPlayerEvents(bool keepBufferingGoing) {
+    if (!keepBufferingGoing) {
+        mQueue.cancelEvent(mBufferingEvent->eventID());
+        mBufferingEventPending = false;
+
+        mQueue.cancelEvent(mPumpAudioEvent->eventID());
+        mPumpAudioEventPending = false;
+    }
+}
+
+status_t AAH_TXPlayer::initCheck() {
+    // Check for the presense of the common time service by attempting to query
+    // for CommonTime's frequency.  If we get an error back, we cannot talk to
+    // the service at all and should abort now.
+    status_t res;
+    uint64_t freq;
+    res = mCCHelper.getCommonFreq(&freq);
+    if (OK != res) {
+        ALOGE("Failed to connect to common time service! (res %d)", res);
+        return res;
+    }
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setDataSource(
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    Mutex::Autolock autoLock(mLock);
+    return setDataSource_l(url, headers);
+}
+
+status_t AAH_TXPlayer::setDataSource_l(
+        const char *url,
+        const KeyedVector<String8, String8> *headers) {
+    reset_l();
+
+    // the URL must consist of "aahTX://" followed by the real URL of
+    // the data source
+    const char *kAAHPrefix = "aahTX://";
+    if (strncasecmp(url, kAAHPrefix, strlen(kAAHPrefix))) {
+        return INVALID_OPERATION;
+    }
+
+    mUri.setTo(url + strlen(kAAHPrefix));
+
+    if (headers) {
+        mUriHeaders = *headers;
+
+        ssize_t index = mUriHeaders.indexOfKey(String8("x-hide-urls-from-log"));
+        if (index >= 0) {
+            // Browser is in "incognito" mode, suppress logging URLs.
+
+            // This isn't something that should be passed to the server.
+            mUriHeaders.removeItemsAt(index);
+
+            mFlags |= INCOGNITO;
+        }
+    }
+
+    // The URL may optionally contain a "#" character followed by a Skyjam
+    // cookie.  Ideally the cookie header should just be passed in the headers
+    // argument, but the Java API for supplying headers is apparently not yet
+    // exposed in the SDK used by application developers.
+    const char kSkyjamCookieDelimiter = '#';
+    char* skyjamCookie = strrchr(mUri.string(), kSkyjamCookieDelimiter);
+    if (skyjamCookie) {
+        skyjamCookie++;
+        mUriHeaders.add(String8("Cookie"), String8(skyjamCookie));
+        mUri = String8(mUri.string(), skyjamCookie - mUri.string());
+    }
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
+    Mutex::Autolock autoLock(mLock);
+
+    reset_l();
+
+    sp<DataSource> dataSource = new FileSource(dup(fd), offset, length);
+
+    status_t err = dataSource->initCheck();
+
+    if (err != OK) {
+        return err;
+    }
+
+    mFileSource = dataSource;
+
+    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+
+    if (extractor == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    return setDataSource_l(extractor);
+}
+
+status_t AAH_TXPlayer::setVideoSurface(const sp<Surface>& surface) {
+    return OK;
+}
+
+status_t AAH_TXPlayer::setVideoSurfaceTexture(
+        const sp<ISurfaceTexture>& surfaceTexture) {
+    return OK;
+}
+
+status_t AAH_TXPlayer::prepare() {
+    return INVALID_OPERATION;
+}
+
+status_t AAH_TXPlayer::prepareAsync() {
+    Mutex::Autolock autoLock(mLock);
+
+    return prepareAsync_l();
+}
+
+status_t AAH_TXPlayer::prepareAsync_l() {
+    if (mFlags & PREPARING) {
+        return UNKNOWN_ERROR;  // async prepare already pending
+    }
+
+    mAAH_Sender = AAH_TXSender::GetInstance();
+    if (mAAH_Sender == NULL) {
+        return NO_MEMORY;
+    }
+
+    if (!mQueueStarted) {
+        mQueue.start();
+        mQueueStarted = true;
+    }
+
+    mFlags |= PREPARING;
+    mAsyncPrepareEvent = new AAH_TXEvent(
+            this, &AAH_TXPlayer::onPrepareAsyncEvent);
+
+    mQueue.postEvent(mAsyncPrepareEvent);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::finishSetDataSource_l() {
+    sp<DataSource> dataSource;
+
+    if (!strncasecmp("http://",  mUri.string(), 7) ||
+        !strncasecmp("https://", mUri.string(), 8)) {
+
+        mConnectingDataSource = HTTPBase::Create(
+                (mFlags & INCOGNITO)
+                    ? HTTPBase::kFlagIncognito
+                    : 0);
+
+        mLock.unlock();
+        status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders);
+        mLock.lock();
+
+        if (err != OK) {
+            mConnectingDataSource.clear();
+
+            ALOGI("mConnectingDataSource->connect() returned %d", err);
+            return err;
+        }
+
+        mCachedSource = new NuCachedSource2(mConnectingDataSource);
+        mConnectingDataSource.clear();
+
+        dataSource = mCachedSource;
+
+        // We're going to prefill the cache before trying to instantiate
+        // the extractor below, as the latter is an operation that otherwise
+        // could block on the datasource for a significant amount of time.
+        // During that time we'd be unable to abort the preparation phase
+        // without this prefill.
+
+        mLock.unlock();
+
+        for (;;) {
+            status_t finalStatus;
+            size_t cachedDataRemaining =
+                mCachedSource->approxDataRemaining(&finalStatus);
+
+            if (finalStatus != OK ||
+                cachedDataRemaining >= kHighWaterMarkBytes ||
+                (mFlags & PREPARE_CANCELLED)) {
+                break;
+            }
+
+            usleep(200000);
+        }
+
+        mLock.lock();
+
+        if (mFlags & PREPARE_CANCELLED) {
+            ALOGI("Prepare cancelled while waiting for initial cache fill.");
+            return UNKNOWN_ERROR;
+        }
+    } else {
+        dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders);
+    }
+
+    if (dataSource == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource);
+
+    if (extractor == NULL) {
+        return UNKNOWN_ERROR;
+    }
+
+    return setDataSource_l(extractor);
+}
+
+status_t AAH_TXPlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
+    // Attempt to approximate overall stream bitrate by summing all
+    // tracks' individual bitrates, if not all of them advertise bitrate,
+    // we have to fail.
+
+    int64_t totalBitRate = 0;
+
+    for (size_t i = 0; i < extractor->countTracks(); ++i) {
+        sp<MetaData> meta = extractor->getTrackMetaData(i);
+
+        int32_t bitrate;
+        if (!meta->findInt32(kKeyBitRate, &bitrate)) {
+            totalBitRate = -1;
+            break;
+        }
+
+        totalBitRate += bitrate;
+    }
+
+    mBitrate = totalBitRate;
+
+    ALOGV("mBitrate = %lld bits/sec", mBitrate);
+
+    bool haveAudio = false;
+    for (size_t i = 0; i < extractor->countTracks(); ++i) {
+        sp<MetaData> meta = extractor->getTrackMetaData(i);
+
+        const char *mime;
+        CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+        if (!strncasecmp(mime, "audio/", 6)) {
+            mAudioSource = extractor->getTrack(i);
+            CHECK(mAudioSource != NULL);
+            haveAudio = true;
+            break;
+        }
+    }
+
+    if (!haveAudio) {
+        return UNKNOWN_ERROR;
+    }
+
+    mExtractorFlags = extractor->flags();
+
+    return OK;
+}
+
+void AAH_TXPlayer::abortPrepare(status_t err) {
+    CHECK(err != OK);
+
+    notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+
+    mPrepareResult = err;
+    mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
+    mPreparedCondition.broadcast();
+}
+
+void AAH_TXPlayer::onPrepareAsyncEvent() {
+    Mutex::Autolock autoLock(mLock);
+
+    if (mFlags & PREPARE_CANCELLED) {
+        ALOGI("prepare was cancelled before doing anything");
+        abortPrepare(UNKNOWN_ERROR);
+        return;
+    }
+
+    if (mUri.size() > 0) {
+        status_t err = finishSetDataSource_l();
+
+        if (err != OK) {
+            abortPrepare(err);
+            return;
+        }
+    }
+
+    mAudioSource->getFormat()->findInt64(kKeyDuration, &mDurationUs);
+
+    status_t err = mAudioSource->start();
+    if (err != OK) {
+        ALOGI("failed to start audio source, err=%d", err);
+        abortPrepare(err);
+        return;
+    }
+
+    mFlags |= PREPARING_CONNECTED;
+
+    if (mCachedSource != NULL) {
+        postBufferingEvent_l();
+    } else {
+        finishAsyncPrepare_l();
+    }
+}
+
+void AAH_TXPlayer::finishAsyncPrepare_l() {
+    notifyListener_l(MEDIA_PREPARED);
+
+    mPrepareResult = OK;
+    mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED);
+    mFlags |= PREPARED;
+    mPreparedCondition.broadcast();
+}
+
+status_t AAH_TXPlayer::start() {
+    Mutex::Autolock autoLock(mLock);
+
+    mFlags &= ~CACHE_UNDERRUN;
+
+    return play_l();
+}
+
+status_t AAH_TXPlayer::play_l() {
+    if (mFlags & PLAYING) {
+        return OK;
+    }
+
+    if (!(mFlags & PREPARED)) {
+        return INVALID_OPERATION;
+    }
+
+    {
+        Mutex::Autolock lock(mEndpointLock);
+        if (!mEndpointValid) {
+            return INVALID_OPERATION;
+        }
+        if (!mEndpointRegistered) {
+            mProgramID = mAAH_Sender->registerEndpoint(mEndpoint);
+            mEndpointRegistered = true;
+        }
+    }
+
+    mFlags |= PLAYING;
+
+    updateClockTransform_l(false);
+
+    postPumpAudioEvent_l(-1);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::stop() {
+    status_t ret = pause();
+    sendEOS_l();
+    return ret;
+}
+
+status_t AAH_TXPlayer::pause() {
+    Mutex::Autolock autoLock(mLock);
+
+    mFlags &= ~CACHE_UNDERRUN;
+
+    return pause_l();
+}
+
+status_t AAH_TXPlayer::pause_l(bool doClockUpdate) {
+    if (!(mFlags & PLAYING)) {
+        return OK;
+    }
+
+    cancelPlayerEvents(true /* keepBufferingGoing */);
+
+    mFlags &= ~PLAYING;
+
+    if (doClockUpdate) {
+        updateClockTransform_l(true);
+    }
+
+    return OK;
+}
+
+void AAH_TXPlayer::updateClockTransform_l(bool pause) {
+    // record the new pause status so that onPumpAudio knows what rate to apply
+    // when it initializes the transform
+    mPlayRateIsPaused = pause;
+
+    // if we haven't yet established a valid clock transform, then we can't
+    // do anything here
+    if (!mCurrentClockTransformValid) {
+        return;
+    }
+
+    // sample the current common time
+    int64_t commonTimeNow;
+    if (OK != mCCHelper.getCommonTime(&commonTimeNow)) {
+        ALOGE("updateClockTransform_l get common time failed");
+        mCurrentClockTransformValid = false;
+        return;
+    }
+
+    // convert the current common time to media time using the old
+    // transform
+    int64_t mediaTimeNow;
+    if (!mCurrentClockTransform.doReverseTransform(
+            commonTimeNow, &mediaTimeNow)) {
+        ALOGE("updateClockTransform_l reverse transform failed");
+        mCurrentClockTransformValid = false;
+        return;
+    }
+
+    // calculate a new transform that preserves the old transform's
+    // result for the current time
+    mCurrentClockTransform.a_zero = mediaTimeNow;
+    mCurrentClockTransform.b_zero = commonTimeNow;
+    mCurrentClockTransform.a_to_b_numer = 1;
+    mCurrentClockTransform.a_to_b_denom = pause ? 0 : 1;
+
+    // send a packet announcing the new transform
+    sp<TRTPControlPacket> packet = new TRTPControlPacket();
+    packet->setClockTransform(mCurrentClockTransform);
+    packet->setCommandID(TRTPControlPacket::kCommandNop);
+    queuePacketToSender_l(packet);
+}
+
+void AAH_TXPlayer::sendEOS_l() {
+    sp<TRTPControlPacket> packet = new TRTPControlPacket();
+    packet->setCommandID(TRTPControlPacket::kCommandEOS);
+    queuePacketToSender_l(packet);
+}
+
+bool AAH_TXPlayer::isPlaying() {
+    return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN);
+}
+
+status_t AAH_TXPlayer::seekTo(int msec) {
+    if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
+        Mutex::Autolock autoLock(mLock);
+        return seekTo_l(static_cast<int64_t>(msec) * 1000);
+    }
+
+    notifyListener_l(MEDIA_SEEK_COMPLETE);
+    return OK;
+}
+
+status_t AAH_TXPlayer::seekTo_l(int64_t timeUs) {
+    mIsSeeking = true;
+    mSeekTimeUs = timeUs;
+
+    mCurrentClockTransformValid = false;
+    mLastQueuedMediaTimePTSValid = false;
+
+    // send a flush command packet
+    sp<TRTPControlPacket> packet = new TRTPControlPacket();
+    packet->setCommandID(TRTPControlPacket::kCommandFlush);
+    queuePacketToSender_l(packet);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::getCurrentPosition(int *msec) {
+    if (!msec) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mLock);
+
+    int position;
+
+    if (mIsSeeking) {
+        position = mSeekTimeUs / 1000;
+    } else if (mCurrentClockTransformValid) {
+        // sample the current common time
+        int64_t commonTimeNow;
+        if (OK != mCCHelper.getCommonTime(&commonTimeNow)) {
+            ALOGE("getCurrentPosition get common time failed");
+            return INVALID_OPERATION;
+        }
+
+        int64_t mediaTimeNow;
+        if (!mCurrentClockTransform.doReverseTransform(commonTimeNow,
+                    &mediaTimeNow)) {
+            ALOGE("getCurrentPosition reverse transform failed");
+            return INVALID_OPERATION;
+        }
+
+        position = static_cast<int>(mediaTimeNow / 1000);
+    } else {
+        position = 0;
+    }
+
+    int duration;
+    if (getDuration_l(&duration) == OK) {
+        *msec = clamp(position, 0, duration);
+    } else {
+        *msec = (position >= 0) ? position : 0;
+    }
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::getDuration(int* msec) {
+    if (!msec) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mLock);
+
+    return getDuration_l(msec);
+}
+
+status_t AAH_TXPlayer::getDuration_l(int* msec) {
+    if (mDurationUs < 0) {
+        return UNKNOWN_ERROR;
+    }
+
+    *msec = (mDurationUs + 500) / 1000;
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::reset() {
+    Mutex::Autolock autoLock(mLock);
+    reset_l();
+    return OK;
+}
+
+void AAH_TXPlayer::reset_l() {
+    if (mFlags & PREPARING) {
+        mFlags |= PREPARE_CANCELLED;
+        if (mConnectingDataSource != NULL) {
+            ALOGI("interrupting the connection process");
+            mConnectingDataSource->disconnect();
+        }
+
+        if (mFlags & PREPARING_CONNECTED) {
+            // We are basically done preparing, we're just buffering
+            // enough data to start playback, we can safely interrupt that.
+            finishAsyncPrepare_l();
+        }
+    }
+
+    while (mFlags & PREPARING) {
+        mPreparedCondition.wait(mLock);
+    }
+
+    cancelPlayerEvents();
+
+    sendEOS_l();
+
+    mCachedSource.clear();
+
+    if (mAudioSource != NULL) {
+        mAudioSource->stop();
+    }
+    mAudioSource.clear();
+
+    mFlags = 0;
+    mExtractorFlags = 0;
+
+    mDurationUs = -1;
+    mIsSeeking = false;
+    mSeekTimeUs = 0;
+
+    mUri.setTo("");
+    mUriHeaders.clear();
+
+    mFileSource.clear();
+
+    mBitrate = -1;
+
+    {
+        Mutex::Autolock lock(mEndpointLock);
+        if (mAAH_Sender != NULL && mEndpointRegistered) {
+            mAAH_Sender->unregisterEndpoint(mEndpoint);
+        }
+        mEndpointRegistered = false;
+        mEndpointValid = false;
+    }
+
+    mProgramID = 0;
+
+    mAAH_Sender.clear();
+    mLastQueuedMediaTimePTSValid = false;
+    mCurrentClockTransformValid = false;
+    mPlayRateIsPaused = false;
+
+    mTRTPVolume = 255;
+}
+
+status_t AAH_TXPlayer::setLooping(int loop) {
+    return OK;
+}
+
+player_type AAH_TXPlayer::playerType() {
+    return AAH_TX_PLAYER;
+}
+
+status_t AAH_TXPlayer::setParameter(int key, const Parcel &request) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_TXPlayer::getParameter(int key, Parcel *reply) {
+    return ERROR_UNSUPPORTED;
+}
+
+status_t AAH_TXPlayer::invoke(const Parcel& request, Parcel *reply) {
+    if (!reply) {
+        return BAD_VALUE;
+    }
+
+    int32_t methodID;
+    status_t err = request.readInt32(&methodID);
+    if (err != android::OK) {
+        return err;
+    }
+
+    switch (methodID) {
+        case kInvokeSetAAHDstIPPort:
+        case kInvokeSetAAHConfigBlob: {
+            if (mEndpointValid) {
+                return INVALID_OPERATION;
+            }
+
+            String8 addr;
+            uint16_t port;
+
+            if (methodID == kInvokeSetAAHDstIPPort) {
+                addr = String8(request.readString16());
+
+                int32_t port32;
+                err = request.readInt32(&port32);
+                if (err != android::OK) {
+                    return err;
+                }
+                port = static_cast<uint16_t>(port32);
+            } else {
+                String8 blob(request.readString16());
+
+                char addr_buf[101];
+                if (sscanf(blob.string(), "V1:%100s %" SCNu16,
+                           addr_buf, &port) != 2) {
+                    return BAD_VALUE;
+                }
+                if (addr.setTo(addr_buf) != OK) {
+                    return NO_MEMORY;
+                }
+            }
+
+            struct hostent* ent = gethostbyname(addr.string());
+            if (ent == NULL) {
+                return ERROR_UNKNOWN_HOST;
+            }
+            if (!(ent->h_addrtype == AF_INET && ent->h_length == 4)) {
+                return BAD_VALUE;
+            }
+
+            Mutex::Autolock lock(mEndpointLock);
+            mEndpoint = AAH_TXSender::Endpoint(
+                        reinterpret_cast<struct in_addr*>(ent->h_addr)->s_addr,
+                        port);
+            mEndpointValid = true;
+            return OK;
+        };
+
+        default:
+            return INVALID_OPERATION;
+    }
+}
+
+status_t AAH_TXPlayer::getMetadata(const media::Metadata::Filter& ids,
+                                   Parcel* records) {
+    using media::Metadata;
+
+    Metadata metadata(records);
+
+    metadata.appendBool(Metadata::kPauseAvailable, true);
+    metadata.appendBool(Metadata::kSeekBackwardAvailable, false);
+    metadata.appendBool(Metadata::kSeekForwardAvailable, false);
+    metadata.appendBool(Metadata::kSeekAvailable, false);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setVolume(float leftVolume, float rightVolume) {
+    if (leftVolume != rightVolume) {
+        ALOGE("%s does not support per channel volume: %f, %f",
+              __PRETTY_FUNCTION__, leftVolume, rightVolume);
+    }
+
+    float volume = clamp(leftVolume, 0.0f, 1.0f);
+
+    Mutex::Autolock lock(mLock);
+    mTRTPVolume = static_cast<uint8_t>((leftVolume * 255.0) + 0.5);
+
+    return OK;
+}
+
+status_t AAH_TXPlayer::setAudioStreamType(audio_stream_type_t streamType) {
+    return OK;
+}
+
+void AAH_TXPlayer::notifyListener_l(int msg, int ext1, int ext2) {
+    sendEvent(msg, ext1, ext2);
+}
+
+bool AAH_TXPlayer::getBitrate_l(int64_t *bitrate) {
+    off64_t size;
+    if (mDurationUs >= 0 &&
+        mCachedSource != NULL &&
+        mCachedSource->getSize(&size) == OK) {
+        *bitrate = size * 8000000ll / mDurationUs;  // in bits/sec
+        return true;
+    }
+
+    if (mBitrate >= 0) {
+        *bitrate = mBitrate;
+        return true;
+    }
+
+    *bitrate = 0;
+
+    return false;
+}
+
+// Returns true iff cached duration is available/applicable.
+bool AAH_TXPlayer::getCachedDuration_l(int64_t *durationUs, bool *eos) {
+    int64_t bitrate;
+
+    if (mCachedSource != NULL && getBitrate_l(&bitrate)) {
+        status_t finalStatus;
+        size_t cachedDataRemaining = mCachedSource->approxDataRemaining(
+                                        &finalStatus);
+        *durationUs = cachedDataRemaining * 8000000ll / bitrate;
+        *eos = (finalStatus != OK);
+        return true;
+    }
+
+    return false;
+}
+
+void AAH_TXPlayer::ensureCacheIsFetching_l() {
+    if (mCachedSource != NULL) {
+        mCachedSource->resumeFetchingIfNecessary();
+    }
+}
+
+void AAH_TXPlayer::postBufferingEvent_l() {
+    if (mBufferingEventPending) {
+        return;
+    }
+    mBufferingEventPending = true;
+    mQueue.postEventWithDelay(mBufferingEvent, 1000000ll);
+}
+
+void AAH_TXPlayer::postPumpAudioEvent_l(int64_t delayUs) {
+    if (mPumpAudioEventPending) {
+        return;
+    }
+    mPumpAudioEventPending = true;
+    mQueue.postEventWithDelay(mPumpAudioEvent, delayUs < 0 ? 10000 : delayUs);
+}
+
+void AAH_TXPlayer::onBufferingUpdate() {
+    Mutex::Autolock autoLock(mLock);
+    if (!mBufferingEventPending) {
+        return;
+    }
+    mBufferingEventPending = false;
+
+    if (mCachedSource != NULL) {
+        status_t finalStatus;
+        size_t cachedDataRemaining = mCachedSource->approxDataRemaining(
+                                        &finalStatus);
+        bool eos = (finalStatus != OK);
+
+        if (eos) {
+            if (finalStatus == ERROR_END_OF_STREAM) {
+                notifyListener_l(MEDIA_BUFFERING_UPDATE, 100);
+            }
+            if (mFlags & PREPARING) {
+                ALOGV("cache has reached EOS, prepare is done.");
+                finishAsyncPrepare_l();
+            }
+        } else {
+            int64_t bitrate;
+            if (getBitrate_l(&bitrate)) {
+                size_t cachedSize = mCachedSource->cachedSize();
+                int64_t cachedDurationUs = cachedSize * 8000000ll / bitrate;
+
+                int percentage = (100.0 * (double) cachedDurationUs)
+                               / mDurationUs;
+                if (percentage > 100) {
+                    percentage = 100;
+                }
+
+                notifyListener_l(MEDIA_BUFFERING_UPDATE, percentage);
+            } else {
+                // We don't know the bitrate of the stream, use absolute size
+                // limits to maintain the cache.
+
+                if ((mFlags & PLAYING) &&
+                    !eos &&
+                    (cachedDataRemaining < kLowWaterMarkBytes)) {
+                    ALOGI("cache is running low (< %d) , pausing.",
+                          kLowWaterMarkBytes);
+                    mFlags |= CACHE_UNDERRUN;
+                    pause_l();
+                    ensureCacheIsFetching_l();
+                    notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START);
+                } else if (eos || cachedDataRemaining > kHighWaterMarkBytes) {
+                    if (mFlags & CACHE_UNDERRUN) {
+                        ALOGI("cache has filled up (> %d), resuming.",
+                              kHighWaterMarkBytes);
+                        mFlags &= ~CACHE_UNDERRUN;
+                        play_l();
+                        notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_END);
+                    } else if (mFlags & PREPARING) {
+                        ALOGV("cache has filled up (> %d), prepare is done",
+                              kHighWaterMarkBytes);
+                        finishAsyncPrepare_l();
+                    }
+                }
+            }
+        }
+    }
+
+    int64_t cachedDurationUs;
+    bool eos;
+    if (getCachedDuration_l(&cachedDurationUs, &eos)) {
+        ALOGV("cachedDurationUs = %.2f secs, eos=%d",
+              cachedDurationUs / 1E6, eos);
+
+        if ((mFlags & PLAYING) &&
+            !eos &&
+            (cachedDurationUs < kLowWaterMarkUs)) {
+            ALOGI("cache is running low (%.2f secs) , pausing.",
+                  cachedDurationUs / 1E6);
+            mFlags |= CACHE_UNDERRUN;
+            pause_l();
+            ensureCacheIsFetching_l();
+            notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START);
+        } else if (eos || cachedDurationUs > kHighWaterMarkUs) {
+            if (mFlags & CACHE_UNDERRUN) {
+                ALOGI("cache has filled up (%.2f secs), resuming.",
+                      cachedDurationUs / 1E6);
+                mFlags &= ~CACHE_UNDERRUN;
+                play_l();
+                notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_END);
+            } else if (mFlags & PREPARING) {
+                ALOGV("cache has filled up (%.2f secs), prepare is done",
+                        cachedDurationUs / 1E6);
+                finishAsyncPrepare_l();
+            }
+        }
+    }
+
+    postBufferingEvent_l();
+}
+
+void AAH_TXPlayer::onPumpAudio() {
+    while (true) {
+        Mutex::Autolock autoLock(mLock);
+        // If this flag is clear, its because someone has externally canceled
+        // this pump operation (probably because we a resetting/shutting down).
+        // Get out immediately, do not reschedule ourselves.
+        if (!mPumpAudioEventPending) {
+            return;
+        }
+
+        // Start by checking if there is still work to be doing.  If we have
+        // never queued a payload (so we don't know what the last queued PTS is)
+        // or we have never established a MediaTime->CommonTime transformation,
+        // then we have work to do (one time through this loop should establish
+        // both).  Otherwise, we want to keep a fixed amt of presentation time
+        // worth of data buffered.  If we cannot get common time (service is
+        // unavailable, or common time is undefined)) then we don't have a lot
+        // of good options here.  For now, signal an error up to the app level
+        // and shut down the transmission pump.
+        int64_t commonTimeNow;
+        if (OK != mCCHelper.getCommonTime(&commonTimeNow)) {
+            // Failed to get common time; either the service is down or common
+            // time is not synced.  Raise an error and shutdown the player.
+            ALOGE("*** Cannot pump audio, unable to fetch common time."
+                  "  Shutting down.");
+            notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, UNKNOWN_ERROR);
+            mPumpAudioEventPending = false;
+            break;
+        }
+
+        if (mCurrentClockTransformValid && mLastQueuedMediaTimePTSValid) {
+            int64_t mediaTimeNow;
+            bool conversionResult = mCurrentClockTransform.doReverseTransform(
+                                        commonTimeNow,
+                                        &mediaTimeNow);
+            CHECK(conversionResult);
+
+            if ((mediaTimeNow +
+                 kAAHBufferTimeUs -
+                 mLastQueuedMediaTimePTS) <= 0) {
+                break;
+            }
+        }
+
+        MediaSource::ReadOptions options;
+        if (mIsSeeking) {
+            options.setSeekTo(mSeekTimeUs);
+        }
+
+        MediaBuffer* mediaBuffer;
+        status_t err = mAudioSource->read(&mediaBuffer, &options);
+        if (err != NO_ERROR) {
+            if (err == ERROR_END_OF_STREAM) {
+                ALOGI("*** %s reached end of stream", __PRETTY_FUNCTION__);
+                notifyListener_l(MEDIA_BUFFERING_UPDATE, 100);
+                notifyListener_l(MEDIA_PLAYBACK_COMPLETE);
+                pause_l(false);
+                sendEOS_l();
+            } else {
+                ALOGE("*** %s read failed err=%d", __PRETTY_FUNCTION__, err);
+            }
+            return;
+        }
+
+        if (mIsSeeking) {
+            mIsSeeking = false;
+            notifyListener_l(MEDIA_SEEK_COMPLETE);
+        }
+
+        uint8_t* data = (static_cast<uint8_t*>(mediaBuffer->data()) +
+                mediaBuffer->range_offset());
+        ALOGV("*** %s got media buffer data=[%02hhx %02hhx %02hhx %02hhx]"
+              " offset=%d length=%d", __PRETTY_FUNCTION__,
+              data[0], data[1], data[2], data[3],
+              mediaBuffer->range_offset(), mediaBuffer->range_length());
+
+        int64_t mediaTimeUs;
+        CHECK(mediaBuffer->meta_data()->findInt64(kKeyTime, &mediaTimeUs));
+        ALOGV("*** timeUs=%lld", mediaTimeUs);
+
+        if (!mCurrentClockTransformValid) {
+            if (OK == mCCHelper.getCommonTime(&commonTimeNow)) {
+                mCurrentClockTransform.a_zero = mediaTimeUs;
+                mCurrentClockTransform.b_zero = commonTimeNow +
+                                                kAAHStartupLeadTimeUs;
+                mCurrentClockTransform.a_to_b_numer = 1;
+                mCurrentClockTransform.a_to_b_denom = mPlayRateIsPaused ? 0 : 1;
+                mCurrentClockTransformValid = true;
+            } else {
+                // Failed to get common time; either the service is down or
+                // common time is not synced.  Raise an error and shutdown the
+                // player.
+                ALOGE("*** Cannot begin transmission, unable to fetch common"
+                      " time. Dropping sample with pts=%lld", mediaTimeUs);
+                notifyListener_l(MEDIA_ERROR,
+                                 MEDIA_ERROR_UNKNOWN,
+                                 UNKNOWN_ERROR);
+                mPumpAudioEventPending = false;
+                break;
+            }
+        }
+
+        ALOGV("*** transmitting packet with pts=%lld", mediaTimeUs);
+
+        sp<TRTPAudioPacket> packet = new TRTPAudioPacket();
+        packet->setPTS(mediaTimeUs);
+        packet->setSubstreamID(1);
+
+        packet->setCodecType(TRTPAudioPacket::kCodecMPEG1Audio);
+        packet->setVolume(mTRTPVolume);
+        // TODO : introduce a throttle for this so we can control the
+        // frequency with which transforms get sent.
+        packet->setClockTransform(mCurrentClockTransform);
+        packet->setAccessUnitData(data, mediaBuffer->range_length());
+        packet->setRandomAccessPoint(true);
+
+        queuePacketToSender_l(packet);
+        mediaBuffer->release();
+
+        mLastQueuedMediaTimePTSValid = true;
+        mLastQueuedMediaTimePTS = mediaTimeUs;
+    }
+
+    { // Explicit scope for the autolock pattern.
+        Mutex::Autolock autoLock(mLock);
+
+        // If someone externally has cleared this flag, its because we should be
+        // shutting down.  Do not reschedule ourselves.
+        if (!mPumpAudioEventPending) {
+            return;
+        }
+
+        // Looks like no one canceled us explicitly.  Clear our flag and post a
+        // new event to ourselves.
+        mPumpAudioEventPending = false;
+        postPumpAudioEvent_l(10000);
+    }
+}
+
+void AAH_TXPlayer::queuePacketToSender_l(const sp<TRTPPacket>& packet) {
+    if (mAAH_Sender == NULL) {
+        return;
+    }
+
+    sp<AMessage> message = new AMessage(AAH_TXSender::kWhatSendPacket,
+                                        mAAH_Sender->handlerID());
+
+    {
+        Mutex::Autolock lock(mEndpointLock);
+        if (!mEndpointValid) {
+            return;
+        }
+
+        message->setInt32(AAH_TXSender::kSendPacketIPAddr, mEndpoint.addr);
+        message->setInt32(AAH_TXSender::kSendPacketPort, mEndpoint.port);
+    }
+
+    packet->setProgramID(mProgramID);
+    packet->setExpireTime(systemTime() + kAAHRetryKeepAroundTimeNs);
+    packet->pack();
+
+    message->setObject(AAH_TXSender::kSendPacketTRTPPacket, packet);
+
+    message->post();
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_player.h b/media/libaah_rtp/aah_tx_player.h
new file mode 100644
index 0000000..64cf5dc
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_player.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_TX_PLAYER_H__
+#define __AAH_TX_PLAYER_H__
+
+#include <common_time/cc_helper.h>
+#include <libstagefright/include/HTTPBase.h>
+#include <libstagefright/include/NuCachedSource2.h>
+#include <libstagefright/include/TimedEventQueue.h>
+#include <media/MediaPlayerInterface.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
+#include <utils/LinearTransform.h>
+#include <utils/String8.h>
+#include <utils/threads.h>
+
+#include "aah_tx_sender.h"
+
+namespace android {
+
+class AAH_TXPlayer : public MediaPlayerHWInterface {
+  public:
+    AAH_TXPlayer();
+
+    virtual status_t    initCheck();
+    virtual status_t    setDataSource(const char *url,
+                                      const KeyedVector<String8, String8>*
+                                      headers);
+    virtual status_t    setDataSource(int fd, int64_t offset, int64_t length);
+    virtual status_t    setVideoSurface(const sp<Surface>& surface);
+    virtual status_t    setVideoSurfaceTexture(const sp<ISurfaceTexture>&
+                                               surfaceTexture);
+    virtual status_t    prepare();
+    virtual status_t    prepareAsync();
+    virtual status_t    start();
+    virtual status_t    stop();
+    virtual status_t    pause();
+    virtual bool        isPlaying();
+    virtual status_t    seekTo(int msec);
+    virtual status_t    getCurrentPosition(int *msec);
+    virtual status_t    getDuration(int *msec);
+    virtual status_t    reset();
+    virtual status_t    setLooping(int loop);
+    virtual player_type playerType();
+    virtual status_t    setParameter(int key, const Parcel &request);
+    virtual status_t    getParameter(int key, Parcel *reply);
+    virtual status_t    invoke(const Parcel& request, Parcel *reply);
+    virtual status_t    getMetadata(const media::Metadata::Filter& ids,
+                                    Parcel* records);
+    virtual status_t    setVolume(float leftVolume, float rightVolume);
+    virtual status_t    setAudioStreamType(audio_stream_type_t streamType);
+
+    // invoke method IDs
+    enum {
+        // set the IP address and port of the A@H receiver
+        kInvokeSetAAHDstIPPort = 1,
+
+        // set the destination IP address and port (and perhaps any additional
+        // parameters added in the future) packaged in one string
+        kInvokeSetAAHConfigBlob,
+    };
+
+    static const int64_t kAAHRetryKeepAroundTimeNs;
+
+  protected:
+    virtual ~AAH_TXPlayer();
+
+  private:
+    friend struct AwesomeEvent;
+
+    enum {
+        PLAYING             = 1,
+        PREPARING           = 8,
+        PREPARED            = 16,
+        PREPARE_CANCELLED   = 64,
+        CACHE_UNDERRUN      = 128,
+
+        // We are basically done preparing but are currently buffering
+        // sufficient data to begin playback and finish the preparation
+        // phase for good.
+        PREPARING_CONNECTED = 2048,
+
+        INCOGNITO           = 32768,
+    };
+
+    status_t setDataSource_l(const char *url,
+                             const KeyedVector<String8, String8> *headers);
+    status_t setDataSource_l(const sp<MediaExtractor>& extractor);
+    status_t finishSetDataSource_l();
+    status_t prepareAsync_l();
+    void onPrepareAsyncEvent();
+    void finishAsyncPrepare_l();
+    void abortPrepare(status_t err);
+    status_t play_l();
+    status_t pause_l(bool doClockUpdate = true);
+    status_t seekTo_l(int64_t timeUs);
+    void updateClockTransform_l(bool pause);
+    void sendEOS_l();
+    void cancelPlayerEvents(bool keepBufferingGoing = false);
+    void reset_l();
+    void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0);
+    bool getBitrate_l(int64_t* bitrate);
+    status_t getDuration_l(int* msec);
+    bool getCachedDuration_l(int64_t* durationUs, bool* eos);
+    void ensureCacheIsFetching_l();
+    void postBufferingEvent_l();
+    void postPumpAudioEvent_l(int64_t delayUs);
+    void onBufferingUpdate();
+    void onPumpAudio();
+    void queuePacketToSender_l(const sp<TRTPPacket>& packet);
+
+    Mutex mLock;
+
+    TimedEventQueue mQueue;
+    bool mQueueStarted;
+
+    sp<TimedEventQueue::Event> mBufferingEvent;
+    bool mBufferingEventPending;
+
+    uint32_t mFlags;
+    uint32_t mExtractorFlags;
+
+    String8 mUri;
+    KeyedVector<String8, String8> mUriHeaders;
+
+    sp<DataSource> mFileSource;
+
+    sp<TimedEventQueue::Event> mAsyncPrepareEvent;
+    Condition mPreparedCondition;
+    status_t mPrepareResult;
+
+    bool mIsSeeking;
+    int64_t mSeekTimeUs;
+
+    sp<TimedEventQueue::Event> mPumpAudioEvent;
+    bool mPumpAudioEventPending;
+
+    sp<HTTPBase> mConnectingDataSource;
+    sp<NuCachedSource2> mCachedSource;
+
+    sp<MediaSource> mAudioSource;
+    int64_t mDurationUs;
+    int64_t mBitrate;
+
+    sp<AAH_TXSender> mAAH_Sender;
+    LinearTransform  mCurrentClockTransform;
+    bool             mCurrentClockTransformValid;
+    int64_t          mLastQueuedMediaTimePTS;
+    bool             mLastQueuedMediaTimePTSValid;
+    bool             mPlayRateIsPaused;
+    CCHelper         mCCHelper;
+
+    Mutex mEndpointLock;
+    AAH_TXSender::Endpoint mEndpoint;
+    bool mEndpointValid;
+    bool mEndpointRegistered;
+    uint16_t mProgramID;
+    uint8_t mTRTPVolume;
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_TXPlayer);
+};
+
+}  // namespace android
+
+#endif  // __AAH_TX_PLAYER_H__
diff --git a/media/libaah_rtp/aah_tx_sender.cpp b/media/libaah_rtp/aah_tx_sender.cpp
new file mode 100644
index 0000000..d991ea7
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_sender.cpp
@@ -0,0 +1,602 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <netinet/in.h>
+#include <poll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <media/stagefright/foundation/AMessage.h>
+#include <utils/misc.h>
+
+#include "aah_tx_player.h"
+#include "aah_tx_sender.h"
+
+namespace android {
+
+const char* AAH_TXSender::kSendPacketIPAddr = "ipaddr";
+const char* AAH_TXSender::kSendPacketPort = "port";
+const char* AAH_TXSender::kSendPacketTRTPPacket = "trtp";
+
+const int AAH_TXSender::kRetryTrimIntervalUs = 100000;
+const int AAH_TXSender::kHeartbeatIntervalUs = 1000000;
+const int AAH_TXSender::kRetryBufferCapacity = 100;
+const nsecs_t AAH_TXSender::kHeartbeatTimeout = 600ull * 1000000000ull;
+
+Mutex AAH_TXSender::sLock;
+wp<AAH_TXSender> AAH_TXSender::sInstance;
+uint32_t AAH_TXSender::sNextEpoch;
+bool AAH_TXSender::sNextEpochValid = false;
+
+AAH_TXSender::AAH_TXSender() : mSocket(-1) {
+    mLastSentPacketTime = systemTime();
+}
+
+sp<AAH_TXSender> AAH_TXSender::GetInstance() {
+    Mutex::Autolock autoLock(sLock);
+
+    sp<AAH_TXSender> sender = sInstance.promote();
+
+    if (sender == NULL) {
+        sender = new AAH_TXSender();
+        if (sender == NULL) {
+            return NULL;
+        }
+
+        sender->mLooper = new ALooper();
+        if (sender->mLooper == NULL) {
+            return NULL;
+        }
+
+        sender->mReflector = new AHandlerReflector<AAH_TXSender>(sender.get());
+        if (sender->mReflector == NULL) {
+            return NULL;
+        }
+
+        sender->mSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+        if (sender->mSocket == -1) {
+            ALOGW("%s unable to create socket", __PRETTY_FUNCTION__);
+            return NULL;
+        }
+
+        struct sockaddr_in bind_addr;
+        memset(&bind_addr, 0, sizeof(bind_addr));
+        bind_addr.sin_family = AF_INET;
+        if (bind(sender->mSocket,
+                 reinterpret_cast<const sockaddr*>(&bind_addr),
+                 sizeof(bind_addr)) < 0) {
+            ALOGW("%s unable to bind socket (errno %d)",
+                  __PRETTY_FUNCTION__, errno);
+            return NULL;
+        }
+
+        sender->mRetryReceiver = new RetryReceiver(sender.get());
+        if (sender->mRetryReceiver == NULL) {
+            return NULL;
+        }
+
+        sender->mLooper->setName("AAH_TXSender");
+        sender->mLooper->registerHandler(sender->mReflector);
+        sender->mLooper->start(false, false, PRIORITY_AUDIO);
+
+        if (sender->mRetryReceiver->run("AAH_TXSenderRetry", PRIORITY_AUDIO)
+                != OK) {
+            ALOGW("%s unable to start retry thread", __PRETTY_FUNCTION__);
+            return NULL;
+        }
+
+        sInstance = sender;
+    }
+
+    return sender;
+}
+
+AAH_TXSender::~AAH_TXSender() {
+    mLooper->stop();
+    mLooper->unregisterHandler(mReflector->id());
+
+    if (mRetryReceiver != NULL) {
+        mRetryReceiver->requestExit();
+        mRetryReceiver->mWakeupEvent.setEvent();
+        if (mRetryReceiver->requestExitAndWait() != OK) {
+            ALOGW("%s shutdown of retry receiver failed", __PRETTY_FUNCTION__);
+        }
+        mRetryReceiver->mSender = NULL;
+        mRetryReceiver.clear();
+    }
+
+    if (mSocket != -1) {
+        close(mSocket);
+    }
+}
+
+// Return the next epoch number usable for a newly instantiated endpoint.
+uint32_t AAH_TXSender::getNextEpoch() {
+    Mutex::Autolock autoLock(sLock);
+
+    if (sNextEpochValid) {
+        sNextEpoch = (sNextEpoch + 1) & TRTPPacket::kTRTPEpochMask;
+    } else {
+        sNextEpoch = ns2ms(systemTime()) & TRTPPacket::kTRTPEpochMask;
+        sNextEpochValid = true;
+    }
+
+    return sNextEpoch;
+}
+
+// Notify the sender that a player has started sending to this endpoint.
+// Returns a program ID for use by the calling player.
+uint16_t AAH_TXSender::registerEndpoint(const Endpoint& endpoint) {
+    Mutex::Autolock lock(mEndpointLock);
+
+    EndpointState* eps = mEndpointMap.valueFor(endpoint);
+    if (eps) {
+        eps->playerRefCount++;
+    } else {
+        eps = new EndpointState(getNextEpoch());
+        mEndpointMap.add(endpoint, eps);
+    }
+
+    // if this is the first registered endpoint, then send a message to start
+    // trimming retry buffers and a message to start sending heartbeats.
+    if (mEndpointMap.size() == 1) {
+        sp<AMessage> trimMessage = new AMessage(kWhatTrimRetryBuffers,
+                                                handlerID());
+        trimMessage->post(kRetryTrimIntervalUs);
+
+        sp<AMessage> heartbeatMessage = new AMessage(kWhatSendHeartbeats,
+                                                     handlerID());
+        heartbeatMessage->post(kHeartbeatIntervalUs);
+    }
+
+    eps->nextProgramID++;
+    return eps->nextProgramID;
+}
+
+// Notify the sender that a player has ceased sending to this endpoint.
+// An endpoint's state can not be deleted until all of the endpoint's
+// registered players have called unregisterEndpoint.
+void AAH_TXSender::unregisterEndpoint(const Endpoint& endpoint) {
+    Mutex::Autolock lock(mEndpointLock);
+
+    EndpointState* eps = mEndpointMap.valueFor(endpoint);
+    if (eps) {
+        eps->playerRefCount--;
+        CHECK(eps->playerRefCount >= 0);
+    }
+}
+
+void AAH_TXSender::onMessageReceived(const sp<AMessage>& msg) {
+    switch (msg->what()) {
+        case kWhatSendPacket:
+            onSendPacket(msg);
+            break;
+
+        case kWhatTrimRetryBuffers:
+            trimRetryBuffers();
+            break;
+
+        case kWhatSendHeartbeats:
+            sendHeartbeats();
+            break;
+
+        default:
+            TRESPASS();
+            break;
+    }
+}
+
+void AAH_TXSender::onSendPacket(const sp<AMessage>& msg) {
+    sp<RefBase> obj;
+    CHECK(msg->findObject(kSendPacketTRTPPacket, &obj));
+    sp<TRTPPacket> packet = static_cast<TRTPPacket*>(obj.get());
+
+    uint32_t ipAddr;
+    CHECK(msg->findInt32(kSendPacketIPAddr,
+                         reinterpret_cast<int32_t*>(&ipAddr)));
+
+    int32_t port32;
+    CHECK(msg->findInt32(kSendPacketPort, &port32));
+    uint16_t port = port32;
+
+    Mutex::Autolock lock(mEndpointLock);
+    doSendPacket_l(packet, Endpoint(ipAddr, port));
+    mLastSentPacketTime = systemTime();
+}
+
+void AAH_TXSender::doSendPacket_l(const sp<TRTPPacket>& packet,
+                                  const Endpoint& endpoint) {
+    EndpointState* eps = mEndpointMap.valueFor(endpoint);
+    if (!eps) {
+        // the endpoint state has disappeared, so the player that sent this
+        // packet must be dead.
+        return;
+    }
+
+    // assign the packet's sequence number
+    packet->setEpoch(eps->epoch);
+    packet->setSeqNumber(eps->trtpSeqNumber++);
+
+    // add the packet to the retry buffer
+    RetryBuffer& retry = eps->retry;
+    retry.push_back(packet);
+
+    // send the packet
+    struct sockaddr_in addr;
+    memset(&addr, 0, sizeof(addr));
+    addr.sin_family = AF_INET;
+    addr.sin_addr.s_addr = endpoint.addr;
+    addr.sin_port = htons(endpoint.port);
+
+    ssize_t result = sendto(mSocket,
+                            packet->getPacket(),
+                            packet->getPacketLen(),
+                            0,
+                            (const struct sockaddr *) &addr,
+                            sizeof(addr));
+    if (result == -1) {
+        ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+    }
+}
+
+void AAH_TXSender::trimRetryBuffers() {
+    Mutex::Autolock lock(mEndpointLock);
+
+    nsecs_t localTimeNow = systemTime();
+
+    Vector<Endpoint> endpointsToRemove;
+
+    for (size_t i = 0; i < mEndpointMap.size(); i++) {
+        EndpointState* eps = mEndpointMap.editValueAt(i);
+        RetryBuffer& retry = eps->retry;
+
+        while (!retry.isEmpty()) {
+            if (retry[0]->getExpireTime() < localTimeNow) {
+                retry.pop_front();
+            } else {
+                break;
+            }
+        }
+
+        if (retry.isEmpty() && eps->playerRefCount == 0) {
+            endpointsToRemove.add(mEndpointMap.keyAt(i));
+        }
+    }
+
+    // remove the state for any endpoints that are no longer in use
+    for (size_t i = 0; i < endpointsToRemove.size(); i++) {
+        Endpoint& e = endpointsToRemove.editItemAt(i);
+        ALOGD("*** %s removing endpoint addr=%08x", __PRETTY_FUNCTION__, e.addr);
+        size_t index = mEndpointMap.indexOfKey(e);
+        delete mEndpointMap.valueAt(index);
+        mEndpointMap.removeItemsAt(index);
+    }
+
+    // schedule the next trim
+    if (mEndpointMap.size()) {
+        sp<AMessage> trimMessage = new AMessage(kWhatTrimRetryBuffers,
+                                                handlerID());
+        trimMessage->post(kRetryTrimIntervalUs);
+    }
+}
+
+void AAH_TXSender::sendHeartbeats() {
+    Mutex::Autolock lock(mEndpointLock);
+
+    if (shouldSendHeartbeats_l()) {
+        for (size_t i = 0; i < mEndpointMap.size(); i++) {
+            EndpointState* eps = mEndpointMap.editValueAt(i);
+            const Endpoint& ep = mEndpointMap.keyAt(i);
+
+            sp<TRTPControlPacket> packet = new TRTPControlPacket();
+            packet->setCommandID(TRTPControlPacket::kCommandNop);
+
+            packet->setExpireTime(systemTime() +
+                                  AAH_TXPlayer::kAAHRetryKeepAroundTimeNs);
+            packet->pack();
+
+            doSendPacket_l(packet, ep);
+        }
+    }
+
+    // schedule the next heartbeat
+    if (mEndpointMap.size()) {
+        sp<AMessage> heartbeatMessage = new AMessage(kWhatSendHeartbeats,
+                                                     handlerID());
+        heartbeatMessage->post(kHeartbeatIntervalUs);
+    }
+}
+
+bool AAH_TXSender::shouldSendHeartbeats_l() {
+    // assert(holding endpoint lock)
+    return (systemTime() < (mLastSentPacketTime + kHeartbeatTimeout));
+}
+
+// Receiver
+
+// initial 4-byte ID of a retry request packet
+const uint32_t AAH_TXSender::RetryReceiver::kRetryRequestID = 'Treq';
+
+// initial 4-byte ID of a retry NAK packet
+const uint32_t AAH_TXSender::RetryReceiver::kRetryNakID = 'Tnak';
+
+// initial 4-byte ID of a fast start request packet
+const uint32_t AAH_TXSender::RetryReceiver::kFastStartRequestID = 'Tfst';
+
+AAH_TXSender::RetryReceiver::RetryReceiver(AAH_TXSender* sender)
+        : Thread(false),
+    mSender(sender) {}
+
+    AAH_TXSender::RetryReceiver::~RetryReceiver() {
+        mWakeupEvent.clearPendingEvents();
+    }
+
+// Returns true if val is within the interval bounded inclusively by
+// start and end.  Also handles the case where there is a rollover of the
+// range between start and end.
+template <typename T>
+static inline bool withinIntervalWithRollover(T val, T start, T end) {
+    return ((start <= end && val >= start && val <= end) ||
+            (start > end && (val >= start || val <= end)));
+}
+
+bool AAH_TXSender::RetryReceiver::threadLoop() {
+    struct pollfd pollFds[2];
+    pollFds[0].fd = mSender->mSocket;
+    pollFds[0].events = POLLIN;
+    pollFds[0].revents = 0;
+    pollFds[1].fd = mWakeupEvent.getWakeupHandle();
+    pollFds[1].events = POLLIN;
+    pollFds[1].revents = 0;
+
+    int pollResult = poll(pollFds, NELEM(pollFds), -1);
+    if (pollResult == -1) {
+        ALOGE("%s poll failed", __PRETTY_FUNCTION__);
+        return false;
+    }
+
+    if (exitPending()) {
+        ALOGI("*** %s exiting", __PRETTY_FUNCTION__);
+        return false;
+    }
+
+    if (pollFds[0].revents) {
+        handleRetryRequest();
+    }
+
+    return true;
+}
+
+void AAH_TXSender::RetryReceiver::handleRetryRequest() {
+    ALOGV("*** RX %s start", __PRETTY_FUNCTION__);
+
+    RetryPacket request;
+    struct sockaddr requestSrcAddr;
+    socklen_t requestSrcAddrLen = sizeof(requestSrcAddr);
+
+    ssize_t result = recvfrom(mSender->mSocket, &request, sizeof(request), 0,
+                              &requestSrcAddr, &requestSrcAddrLen);
+    if (result == -1) {
+        ALOGE("%s recvfrom failed, errno=%d", __PRETTY_FUNCTION__, errno);
+        return;
+    }
+
+    if (static_cast<size_t>(result) < sizeof(RetryPacket)) {
+        ALOGW("%s short packet received", __PRETTY_FUNCTION__);
+        return;
+    }
+
+    uint32_t host_request_id = ntohl(request.id);
+    if ((host_request_id != kRetryRequestID) &&
+        (host_request_id != kFastStartRequestID)) {
+        ALOGW("%s received retry request with bogus ID (%08x)",
+                __PRETTY_FUNCTION__, host_request_id);
+        return;
+    }
+
+    Endpoint endpoint(request.endpointIP, ntohs(request.endpointPort));
+
+    Mutex::Autolock lock(mSender->mEndpointLock);
+
+    EndpointState* eps = mSender->mEndpointMap.valueFor(endpoint);
+
+    if (eps == NULL || eps->retry.isEmpty()) {
+        // we have no retry buffer or an empty retry buffer for this endpoint,
+        // so NAK the entire request
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+        return;
+    }
+
+    RetryBuffer& retry = eps->retry;
+
+    uint16_t startSeq = ntohs(request.seqStart);
+    uint16_t endSeq = ntohs(request.seqEnd);
+
+    uint16_t retryFirstSeq = retry[0]->getSeqNumber();
+    uint16_t retryLastSeq = retry[retry.size() - 1]->getSeqNumber();
+
+    // If this is a fast start, then force the start of the retry to match the
+    // start of the retransmit ring buffer (unless the end of the retransmit
+    // ring buffer is already past the point of fast start)
+    if ((host_request_id == kFastStartRequestID) &&
+        !((startSeq - retryFirstSeq) & 0x8000)) {
+        startSeq = retryFirstSeq;
+    }
+
+    int startIndex;
+    if (withinIntervalWithRollover(startSeq, retryFirstSeq, retryLastSeq)) {
+        startIndex = static_cast<uint16_t>(startSeq - retryFirstSeq);
+    } else {
+        startIndex = -1;
+    }
+
+    int endIndex;
+    if (withinIntervalWithRollover(endSeq, retryFirstSeq, retryLastSeq)) {
+        endIndex = static_cast<uint16_t>(endSeq - retryFirstSeq);
+    } else {
+        endIndex = -1;
+    }
+
+    if (startIndex == -1 && endIndex == -1) {
+        // no part of the request range is found in the retry buffer
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+        return;
+    }
+
+    if (startIndex == -1) {
+        // NAK a subrange at the front of the request range
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        nak.seqEnd = htons(retryFirstSeq - 1);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+
+        startIndex = 0;
+    } else if (endIndex == -1) {
+        // NAK a subrange at the back of the request range
+        RetryPacket nak = request;
+        nak.id = htonl(kRetryNakID);
+        nak.seqStart = htons(retryLastSeq + 1);
+        result = sendto(mSender->mSocket, &nak, sizeof(nak), 0,
+                        &requestSrcAddr, requestSrcAddrLen);
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+
+        endIndex = retry.size() - 1;
+    }
+
+    // send the retry packets
+    for (int i = startIndex; i <= endIndex; i++) {
+        const sp<TRTPPacket>& replyPacket = retry[i];
+
+        result = sendto(mSender->mSocket,
+                        replyPacket->getPacket(),
+                        replyPacket->getPacketLen(),
+                        0,
+                        &requestSrcAddr,
+                        requestSrcAddrLen);
+
+        if (result == -1) {
+            ALOGW("%s sendto failed", __PRETTY_FUNCTION__);
+        }
+    }
+}
+
+// Endpoint
+
+AAH_TXSender::Endpoint::Endpoint()
+        : addr(0)
+        , port(0) { }
+
+AAH_TXSender::Endpoint::Endpoint(uint32_t a, uint16_t p)
+        : addr(a)
+        , port(p) {}
+
+bool AAH_TXSender::Endpoint::operator<(const Endpoint& other) const {
+    return ((addr < other.addr) ||
+            (addr == other.addr && port < other.port));
+}
+
+// EndpointState
+
+AAH_TXSender::EndpointState::EndpointState(uint32_t _epoch)
+    : retry(kRetryBufferCapacity)
+    , playerRefCount(1)
+    , trtpSeqNumber(0)
+    , nextProgramID(0)
+    , epoch(_epoch) { }
+
+// CircularBuffer
+
+template <typename T>
+CircularBuffer<T>::CircularBuffer(size_t capacity)
+        : mCapacity(capacity)
+        , mHead(0)
+        , mTail(0)
+        , mFillCount(0) {
+    mBuffer = new T[capacity];
+}
+
+template <typename T>
+CircularBuffer<T>::~CircularBuffer() {
+    delete [] mBuffer;
+}
+
+template <typename T>
+void CircularBuffer<T>::push_back(const T& item) {
+    if (this->isFull()) {
+        this->pop_front();
+    }
+    mBuffer[mHead] = item;
+    mHead = (mHead + 1) % mCapacity;
+    mFillCount++;
+}
+
+template <typename T>
+void CircularBuffer<T>::pop_front() {
+    CHECK(!isEmpty());
+    mBuffer[mTail] = T();
+    mTail = (mTail + 1) % mCapacity;
+    mFillCount--;
+}
+
+template <typename T>
+size_t CircularBuffer<T>::size() const {
+    return mFillCount;
+}
+
+template <typename T>
+bool CircularBuffer<T>::isFull() const {
+    return (mFillCount == mCapacity);
+}
+
+template <typename T>
+bool CircularBuffer<T>::isEmpty() const {
+    return (mFillCount == 0);
+}
+
+template <typename T>
+const T& CircularBuffer<T>::itemAt(size_t index) const {
+    CHECK(index < mFillCount);
+    return mBuffer[(mTail + index) % mCapacity];
+}
+
+template <typename T>
+const T& CircularBuffer<T>::operator[](size_t index) const {
+    return itemAt(index);
+}
+
+}  // namespace android
diff --git a/media/libaah_rtp/aah_tx_sender.h b/media/libaah_rtp/aah_tx_sender.h
new file mode 100644
index 0000000..74206c4
--- /dev/null
+++ b/media/libaah_rtp/aah_tx_sender.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __AAH_TX_SENDER_H__
+#define __AAH_TX_SENDER_H__
+
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AHandlerReflector.h>
+#include <utils/RefBase.h>
+#include <utils/threads.h>
+
+#include "aah_tx_packet.h"
+#include "pipe_event.h"
+
+namespace android {
+
+template <typename T> class CircularBuffer {
+  public:
+    CircularBuffer(size_t capacity);
+    ~CircularBuffer();
+    void push_back(const T& item);;
+    void pop_front();
+    size_t size() const;
+    bool isFull() const;
+    bool isEmpty() const;
+    const T& itemAt(size_t index) const;
+    const T& operator[](size_t index) const;
+
+  private:
+    T* mBuffer;
+    size_t mCapacity;
+    size_t mHead;
+    size_t mTail;
+    size_t mFillCount;
+
+    DISALLOW_EVIL_CONSTRUCTORS(CircularBuffer);
+};
+
+class AAH_TXSender : public virtual RefBase {
+  public:
+    ~AAH_TXSender();
+
+    static sp<AAH_TXSender> GetInstance();
+
+    ALooper::handler_id handlerID() { return mReflector->id(); }
+
+    // an IP address and port
+    struct Endpoint {
+        Endpoint();
+        Endpoint(uint32_t a, uint16_t p);
+        bool operator<(const Endpoint& other) const;
+
+        uint32_t addr;
+        uint16_t port;
+    };
+
+    uint16_t registerEndpoint(const Endpoint& endpoint);
+    void unregisterEndpoint(const Endpoint& endpoint);
+
+    enum {
+        kWhatSendPacket,
+        kWhatTrimRetryBuffers,
+        kWhatSendHeartbeats,
+    };
+
+    // fields for SendPacket messages
+    static const char* kSendPacketIPAddr;
+    static const char* kSendPacketPort;
+    static const char* kSendPacketTRTPPacket;
+
+  private:
+    AAH_TXSender();
+
+    static Mutex sLock;
+    static wp<AAH_TXSender> sInstance;
+    static uint32_t sNextEpoch;
+    static bool sNextEpochValid;
+
+    static uint32_t getNextEpoch();
+
+    typedef CircularBuffer<sp<TRTPPacket> > RetryBuffer;
+
+    // state maintained on a per-endpoint basis
+    struct EndpointState {
+        EndpointState(uint32_t epoch);
+        RetryBuffer retry;
+        int playerRefCount;
+        uint16_t trtpSeqNumber;
+        uint16_t nextProgramID;
+        uint32_t epoch;
+    };
+
+    friend class AHandlerReflector<AAH_TXSender>;
+    void onMessageReceived(const sp<AMessage>& msg);
+    void onSendPacket(const sp<AMessage>& msg);
+    void doSendPacket_l(const sp<TRTPPacket>& packet,
+                        const Endpoint& endpoint);
+    void trimRetryBuffers();
+    void sendHeartbeats();
+    bool shouldSendHeartbeats_l();
+
+    sp<ALooper> mLooper;
+    sp<AHandlerReflector<AAH_TXSender> > mReflector;
+
+    int mSocket;
+    nsecs_t mLastSentPacketTime;
+
+    DefaultKeyedVector<Endpoint, EndpointState*> mEndpointMap;
+    Mutex mEndpointLock;
+
+    static const int kRetryTrimIntervalUs;
+    static const int kHeartbeatIntervalUs;
+    static const int kRetryBufferCapacity;
+    static const nsecs_t kHeartbeatTimeout;
+
+    class RetryReceiver : public Thread {
+      private:
+        friend class AAH_TXSender;
+
+        RetryReceiver(AAH_TXSender* sender);
+        virtual ~RetryReceiver();
+        virtual bool threadLoop();
+        void handleRetryRequest();
+
+        static const int kMaxReceiverPacketLen;
+        static const uint32_t kRetryRequestID;
+        static const uint32_t kFastStartRequestID;
+        static const uint32_t kRetryNakID;
+
+        AAH_TXSender* mSender;
+        PipeEvent mWakeupEvent;
+    };
+
+    sp<RetryReceiver> mRetryReceiver;
+
+    DISALLOW_EVIL_CONSTRUCTORS(AAH_TXSender);
+};
+
+struct RetryPacket {
+    uint32_t id;
+    uint32_t endpointIP;
+    uint16_t endpointPort;
+    uint16_t seqStart;
+    uint16_t seqEnd;
+} __attribute__((packed));
+
+}  // namespace android
+
+#endif  // __AAH_TX_SENDER_H__
diff --git a/media/libaah_rtp/pipe_event.cpp b/media/libaah_rtp/pipe_event.cpp
new file mode 100644
index 0000000..b8e6960
--- /dev/null
+++ b/media/libaah_rtp/pipe_event.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "LibAAH_RTP"
+#include <utils/Log.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <unistd.h>
+
+#include "pipe_event.h"
+
+namespace android {
+
+PipeEvent::PipeEvent() {
+    pipe_[0] = -1;
+    pipe_[1] = -1;
+
+    // Create the pipe.
+    if (pipe(pipe_) >= 0) {
+        // Set non-blocking mode on the read side of the pipe so we can
+        // easily drain it whenever we wakeup.
+        fcntl(pipe_[0], F_SETFL, O_NONBLOCK);
+    } else {
+        ALOGE("Failed to create pipe event %d %d %d",
+              pipe_[0], pipe_[1], errno);
+        pipe_[0] = -1;
+        pipe_[1] = -1;
+    }
+}
+
+PipeEvent::~PipeEvent() {
+    if (pipe_[0] >= 0) {
+        close(pipe_[0]);
+    }
+
+    if (pipe_[1] >= 0) {
+        close(pipe_[1]);
+    }
+}
+
+void PipeEvent::clearPendingEvents() {
+    char drain_buffer[16];
+    while (read(pipe_[0], drain_buffer, sizeof(drain_buffer)) > 0) {
+        // No body.
+    }
+}
+
+bool PipeEvent::wait(int timeout) {
+    struct pollfd wait_fd;
+
+    wait_fd.fd = getWakeupHandle();
+    wait_fd.events = POLLIN;
+    wait_fd.revents = 0;
+
+    int res = poll(&wait_fd, 1, timeout);
+
+    if (res < 0) {
+        ALOGE("Wait error in PipeEvent; sleeping to prevent overload!");
+        usleep(1000);
+    }
+
+    return (res > 0);
+}
+
+void PipeEvent::setEvent() {
+    char foo = 'q';
+    write(pipe_[1], &foo, 1);
+}
+
+}  // namespace android
+
diff --git a/media/libaah_rtp/pipe_event.h b/media/libaah_rtp/pipe_event.h
new file mode 100644
index 0000000..e53b0fd
--- /dev/null
+++ b/media/libaah_rtp/pipe_event.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __PIPE_EVENT_H__
+#define __PIPE_EVENT_H__
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+class PipeEvent {
+  public:
+    PipeEvent();
+   ~PipeEvent();
+
+    bool initCheck() const {
+        return ((pipe_[0] >= 0) && (pipe_[1] >= 0));
+    }
+
+    int getWakeupHandle() const { return pipe_[0]; }
+
+    // block until the event fires; returns true if the event fired and false if
+    // the wait timed out.  Timeout is expressed in milliseconds; negative
+    // values mean wait forever.
+    bool wait(int timeout = -1);
+
+    void clearPendingEvents();
+    void setEvent();
+
+  private:
+    int pipe_[2];
+
+    DISALLOW_EVIL_CONSTRUCTORS(PipeEvent);
+};
+
+}  // namespace android
+
+#endif  // __PIPE_EVENT_H__
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index f9f997f..19b7e32 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -202,7 +202,7 @@
 status_t AudioEffect::setEnabled(bool enabled)
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? INVALID_OPERATION : mStatus;
     }
 
     status_t status = NO_ERROR;
@@ -231,7 +231,7 @@
 {
     if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) {
         ALOGV("command() bad status %d", mStatus);
-        return INVALID_OPERATION;
+        return mStatus;
     }
 
     if (cmdCode == EFFECT_CMD_ENABLE || cmdCode == EFFECT_CMD_DISABLE) {
@@ -263,7 +263,7 @@
 status_t AudioEffect::setParameter(effect_param_t *param)
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? INVALID_OPERATION : mStatus;
     }
 
     if (param == NULL || param->psize == 0 || param->vsize == 0) {
@@ -281,7 +281,7 @@
 status_t AudioEffect::setParameterDeferred(effect_param_t *param)
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? INVALID_OPERATION : mStatus;
     }
 
     if (param == NULL || param->psize == 0 || param->vsize == 0) {
@@ -307,7 +307,7 @@
 status_t AudioEffect::setParameterCommit()
 {
     if (mStatus != NO_ERROR) {
-        return INVALID_OPERATION;
+        return (mStatus == ALREADY_EXISTS) ? INVALID_OPERATION : mStatus;
     }
 
     Mutex::Autolock _l(mCblk->lock);
@@ -321,7 +321,7 @@
 status_t AudioEffect::getParameter(effect_param_t *param)
 {
     if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) {
-        return INVALID_OPERATION;
+        return mStatus;
     }
 
     if (param == NULL || param->psize == 0 || param->vsize == 0) {
@@ -341,7 +341,7 @@
 void AudioEffect::binderDied()
 {
     ALOGW("IEffect died");
-    mStatus = NO_INIT;
+    mStatus = DEAD_OBJECT;
     if (mCbf != NULL) {
         status_t status = DEAD_OBJECT;
         mCbf(EVENT_ERROR, mUserData, &status);
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index b74b3e3..a4068ff 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -304,10 +304,25 @@
     if (mActive == 0) {
         mActive = 1;
 
+        pid_t tid;
+        if (t != 0) {
+            mReadyToRun = WOULD_BLOCK;
+            t->run("ClientRecordThread", ANDROID_PRIORITY_AUDIO);
+            tid = t->getTid();  // pid_t is unknown until run()
+            ALOGV("getTid=%d", tid);
+            if (tid == -1) {
+                tid = 0;
+            }
+            // thread blocks in readyToRun()
+        } else {
+            tid = 0;    // not gettid()
+        }
+
         cblk->lock.lock();
         if (!(cblk->flags & CBLK_INVALID_MSK)) {
             cblk->lock.unlock();
-            ret = mAudioRecord->start();
+            ALOGV("mAudioRecord->start(tid=%d)", tid);
+            ret = mAudioRecord->start(tid);
             cblk->lock.lock();
             if (ret == DEAD_OBJECT) {
                 android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -322,7 +337,9 @@
             cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
             cblk->waitTimeMs = 0;
             if (t != 0) {
-                t->run("ClientRecordThread", ANDROID_PRIORITY_AUDIO);
+                // thread unblocks in readyToRun() and returns NO_ERROR
+                mReadyToRun = NO_ERROR;
+                mCondition.signal();
             } else {
                 mPreviousPriority = getpriority(PRIO_PROCESS, 0);
                 mPreviousSchedulingGroup = androidGetThreadSchedulingGroup(0);
@@ -330,6 +347,9 @@
             }
         } else {
             mActive = 0;
+            // thread unblocks in readyToRun() and returns NO_INIT
+            mReadyToRun = NO_INIT;
+            mCondition.signal();
         }
     }
 
@@ -522,7 +542,7 @@
                     ALOGW(   "obtainBuffer timed out (is the CPU pegged?) "
                             "user=%08x, server=%08x", cblk->user, cblk->server);
                     cblk->lock.unlock();
-                    result = mAudioRecord->start();
+                    result = mAudioRecord->start(0);    // callback thread hasn't changed
                     cblk->lock.lock();
                     if (result == DEAD_OBJECT) {
                         android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -760,7 +780,7 @@
         result = openRecord_l(cblk->sampleRate, mFormat, mChannelMask,
                 mFrameCount, mFlags, getInput_l());
         if (result == NO_ERROR) {
-            result = mAudioRecord->start();
+            result = mAudioRecord->start(0);    // callback thread hasn't changed
         }
         if (result != NO_ERROR) {
             mActive = false;
@@ -811,6 +831,15 @@
     return mReceiver.processAudioBuffer(this);
 }
 
+status_t AudioRecord::ClientRecordThread::readyToRun()
+{
+    AutoMutex(mReceiver.mLock);
+    while (mReceiver.mReadyToRun == WOULD_BLOCK) {
+        mReceiver.mCondition.wait(mReceiver.mLock);
+    }
+    return mReceiver.mReadyToRun;
+}
+
 // -------------------------------------------------------------------------
 
 }; // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 087d7b2..74c97ed 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -1,4 +1,4 @@
-/* frameworks/base/media/libmedia/AudioTrack.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -80,7 +80,9 @@
 
 AudioTrack::AudioTrack()
     : mStatus(NO_INIT),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
 }
 
@@ -96,7 +98,9 @@
         int notificationFrames,
         int sessionId)
     : mStatus(NO_INIT),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
@@ -134,7 +138,9 @@
         int notificationFrames,
         int sessionId)
     : mStatus(NO_INIT),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
+      mIsTimed(false),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
+      mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0, flags, cbf, user, notificationFrames,
@@ -362,18 +368,26 @@
         cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
         cblk->waitTimeMs = 0;
         android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags);
+        pid_t tid;
         if (t != 0) {
             t->run("AudioTrackThread", ANDROID_PRIORITY_AUDIO);
+            tid = t->getTid();  // pid_t is unknown until run()
+            ALOGV("getTid=%d", tid);
+            if (tid == -1) {
+                tid = 0;
+            }
         } else {
             mPreviousPriority = getpriority(PRIO_PROCESS, 0);
             mPreviousSchedulingGroup = androidGetThreadSchedulingGroup(0);
             androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+            tid = 0;    // not gettid()
         }
 
         ALOGV("start %p before lock cblk %p", this, mCblk);
         if (!(cblk->flags & CBLK_INVALID_MSK)) {
             cblk->lock.unlock();
-            status = mAudioTrack->start();
+            ALOGV("mAudioTrack->start(tid=%d)", tid);
+            status = mAudioTrack->start(tid);
             cblk->lock.lock();
             if (status == DEAD_OBJECT) {
                 android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -532,6 +546,10 @@
 {
     int afSamplingRate;
 
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
         return NO_INIT;
     }
@@ -545,6 +563,10 @@
 
 uint32_t AudioTrack::getSampleRate() const
 {
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     AutoMutex lock(mLock);
     return mCblk->sampleRate;
 }
@@ -570,6 +592,10 @@
         return NO_ERROR;
     }
 
+    if (mIsTimed) {
+        return INVALID_OPERATION;
+    }
+
     if (loopStart >= loopEnd ||
         loopEnd - loopStart > cblk->frameCount ||
         cblk->server > loopStart) {
@@ -591,26 +617,6 @@
     return NO_ERROR;
 }
 
-status_t AudioTrack::getLoop(uint32_t *loopStart, uint32_t *loopEnd, int *loopCount) const
-{
-    AutoMutex lock(mLock);
-    if (loopStart != NULL) {
-        *loopStart = mCblk->loopStart;
-    }
-    if (loopEnd != NULL) {
-        *loopEnd = mCblk->loopEnd;
-    }
-    if (loopCount != NULL) {
-        if (mCblk->loopCount < 0) {
-            *loopCount = -1;
-        } else {
-            *loopCount = mCblk->loopCount;
-        }
-    }
-
-    return NO_ERROR;
-}
-
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
     if (mCbf == NULL) return INVALID_OPERATION;
@@ -653,6 +659,8 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
+    if (mIsTimed) return INVALID_OPERATION;
+
     AutoMutex lock(mLock);
 
     if (!stopped_l()) return INVALID_OPERATION;
@@ -784,7 +792,7 @@
                 }
             }
         } else {
-            // Ensure that buffer alignment matches channelcount
+            // Ensure that buffer alignment matches channelCount
             int channelCount = popcount(channelMask);
             if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
                 ALOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
@@ -803,6 +811,7 @@
                                                       ((uint16_t)flags) << 16,
                                                       sharedBuffer,
                                                       output,
+                                                      mIsTimed,
                                                       &mSessionId,
                                                       &status);
 
@@ -895,7 +904,7 @@
                                 "user=%08x, server=%08x", this, cblk->user, cblk->server);
                         //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140)
                         cblk->lock.unlock();
-                        result = mAudioTrack->start();
+                        result = mAudioTrack->start(0); // callback thread hasn't changed
                         cblk->lock.lock();
                         if (result == DEAD_OBJECT) {
                             android_atomic_or(CBLK_INVALID_ON, &cblk->flags);
@@ -927,7 +936,7 @@
     if (mActive && (cblk->flags & CBLK_DISABLED_MSK)) {
         android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags);
         ALOGW("obtainBuffer() track %p disabled, restarting", this);
-        mAudioTrack->start();
+        mAudioTrack->start(0);  // callback thread hasn't changed
     }
 
     cblk->waitTimeMs = 0;
@@ -969,9 +978,11 @@
 {
 
     if (mSharedBuffer != 0) return INVALID_OPERATION;
+    if (mIsTimed) return INVALID_OPERATION;
 
     if (ssize_t(userSize) < 0) {
-        // sanity-check. user is most-likely passing an error code.
+        // Sanity-check: user is most-likely passing an error code, and it would
+        // make the return value ambiguous (actualSize vs error).
         ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)",
                 buffer, userSize, userSize);
         return BAD_VALUE;
@@ -994,8 +1005,6 @@
     do {
         audioBuffer.frameCount = userSize/frameSz;
 
-        // Calling obtainBuffer() with a negative wait count causes
-        // an (almost) infinite wait time.
         status_t err = obtainBuffer(&audioBuffer, -1);
         if (err < 0) {
             // out of buffers, return #bytes written
@@ -1026,6 +1035,59 @@
 
 // -------------------------------------------------------------------------
 
+TimedAudioTrack::TimedAudioTrack() {
+    mIsTimed = true;
+}
+
+status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
+{
+    status_t result = UNKNOWN_ERROR;
+
+    // If the track is not invalid already, try to allocate a buffer.  alloc
+    // fails indicating that the server is dead, flag the track as invalid so
+    // we can attempt to restore in in just a bit.
+    if (!(mCblk->flags & CBLK_INVALID_MSK)) {
+        result = mAudioTrack->allocateTimedBuffer(size, buffer);
+        if (result == DEAD_OBJECT) {
+            android_atomic_or(CBLK_INVALID_ON, &mCblk->flags);
+        }
+    }
+
+    // If the track is invalid at this point, attempt to restore it. and try the
+    // allocation one more time.
+    if (mCblk->flags & CBLK_INVALID_MSK) {
+        mCblk->lock.lock();
+        result = restoreTrack_l(mCblk, false);
+        mCblk->lock.unlock();
+
+        if (result == OK)
+            result = mAudioTrack->allocateTimedBuffer(size, buffer);
+    }
+
+    return result;
+}
+
+status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
+                                           int64_t pts)
+{
+    // restart track if it was disabled by audioflinger due to previous underrun
+    if (mActive && (mCblk->flags & CBLK_DISABLED_MSK)) {
+        android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags);
+        ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
+        mAudioTrack->start(0);
+    }
+
+    return mAudioTrack->queueTimedBuffer(buffer, pts);
+}
+
+status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
+                                                TargetTimeline target)
+{
+    return mAudioTrack->setMediaTimeTransform(xform, target);
+}
+
+// -------------------------------------------------------------------------
+
 bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
 {
     Buffer audioBuffer;
@@ -1085,6 +1147,9 @@
         frames = mRemainingFrames;
     }
 
+    // See description of waitCount parameter at declaration of obtainBuffer().
+    // The logic below prevents us from being stuck below at obtainBuffer()
+    // not being able to handle timed events (position, markers, loops).
     int32_t waitCount = -1;
     if (mUpdatePeriod || (!mMarkerReached && mMarkerPosition) || mLoopCount) {
         waitCount = 1;
@@ -1094,9 +1159,6 @@
 
         audioBuffer.frameCount = frames;
 
-        // Calling obtainBuffer() with a wait count of 1
-        // limits wait time to WAIT_PERIOD_MS. This prevents from being
-        // stuck here not being able to handle timed events (position, markers, loops).
         status_t err = obtainBuffer(&audioBuffer, waitCount);
         if (err < NO_ERROR) {
             if (err != TIMED_OUT) {
@@ -1218,7 +1280,7 @@
                 }
             }
             if (mActive) {
-                result = mAudioTrack->start();
+                result = mAudioTrack->start(0); // callback thread hasn't changed
                 ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result);
             }
             if (fromStart && result == NO_ERROR) {
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 4507e5d..ebadbfa 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -90,6 +90,7 @@
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status)
     {
@@ -105,6 +106,7 @@
         data.writeInt32(flags);
         data.writeStrongBinder(sharedBuffer->asBinder());
         data.writeInt32((int32_t) output);
+        data.writeInt32(isTimed);
         int lSessionId = 0;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
@@ -689,11 +691,12 @@
             uint32_t flags = data.readInt32();
             sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder());
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+            bool isTimed = data.readInt32();
             int sessionId = data.readInt32();
             status_t status;
             sp<IAudioTrack> track = createTrack(pid,
                     (audio_stream_type_t) streamType, sampleRate, format,
-                    channelCount, bufferCount, flags, buffer, output, &sessionId, &status);
+                    channelCount, bufferCount, flags, buffer, output, isTimed, &sessionId, &status);
             reply->writeInt32(sessionId);
             reply->writeInt32(status);
             reply->writeStrongBinder(track->asBinder());
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 8c7a960..6b473c9 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -42,10 +42,11 @@
     {
     }
     
-    virtual status_t start()
+    virtual status_t start(pid_t tid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
+        data.writeInt32(tid);
         status_t status = remote()->transact(START, data, &reply);
         if (status == NO_ERROR) {
             status = reply.readInt32();
@@ -90,7 +91,7 @@
         } break;
         case START: {
             CHECK_INTERFACE(IAudioRecord, data, reply);
-            reply->writeInt32(start());
+            reply->writeInt32(start(data.readInt32()));
             return NO_ERROR;
         } break;
         case STOP: {
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index e618619..28ebbbf 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -1,4 +1,4 @@
-/* //device/extlibs/pv/android/IAudioTrack.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -35,7 +35,10 @@
     FLUSH,
     MUTE,
     PAUSE,
-    ATTACH_AUX_EFFECT
+    ATTACH_AUX_EFFECT,
+    ALLOCATE_TIMED_BUFFER,
+    QUEUE_TIMED_BUFFER,
+    SET_MEDIA_TIME_TRANSFORM,
 };
 
 class BpAudioTrack : public BpInterface<IAudioTrack>
@@ -58,10 +61,11 @@
         return cblk;
     }
 
-    virtual status_t start()
+    virtual status_t start(pid_t tid)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt32(tid);
         status_t status = remote()->transact(START, data, &reply);
         if (status == NO_ERROR) {
             status = reply.readInt32();
@@ -113,6 +117,52 @@
         }
         return status;
     }
+
+    virtual status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt32(size);
+        status_t status = remote()->transact(ALLOCATE_TIMED_BUFFER,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+            if (status == NO_ERROR) {
+                *buffer = interface_cast<IMemory>(reply.readStrongBinder());
+            }
+        }
+        return status;
+    }
+
+    virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
+                                      int64_t pts) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeStrongBinder(buffer->asBinder());
+        data.writeInt64(pts);
+        status_t status = remote()->transact(QUEUE_TIMED_BUFFER,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t setMediaTimeTransform(const LinearTransform& xform,
+                                           int target) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
+        data.writeInt64(xform.a_zero);
+        data.writeInt64(xform.b_zero);
+        data.writeInt32(xform.a_to_b_numer);
+        data.writeInt32(xform.a_to_b_denom);
+        data.writeInt32(target);
+        status_t status = remote()->transact(SET_MEDIA_TIME_TRANSFORM,
+                                             data, &reply);
+        if (status == NO_ERROR) {
+            status = reply.readInt32();
+        }
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioTrack, "android.media.IAudioTrack");
@@ -130,7 +180,7 @@
         } break;
         case START: {
             CHECK_INTERFACE(IAudioTrack, data, reply);
-            reply->writeInt32(start());
+            reply->writeInt32(start(data.readInt32()));
             return NO_ERROR;
         } break;
         case STOP: {
@@ -158,10 +208,38 @@
             reply->writeInt32(attachAuxEffect(data.readInt32()));
             return NO_ERROR;
         } break;
+        case ALLOCATE_TIMED_BUFFER: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            sp<IMemory> buffer;
+            status_t status = allocateTimedBuffer(data.readInt32(), &buffer);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeStrongBinder(buffer->asBinder());
+            }
+            return NO_ERROR;
+        } break;
+        case QUEUE_TIMED_BUFFER: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            sp<IMemory> buffer = interface_cast<IMemory>(
+                data.readStrongBinder());
+            uint64_t pts = data.readInt64();
+            reply->writeInt32(queueTimedBuffer(buffer, pts));
+            return NO_ERROR;
+        } break;
+        case SET_MEDIA_TIME_TRANSFORM: {
+            CHECK_INTERFACE(IAudioTrack, data, reply);
+            LinearTransform xform;
+            xform.a_zero = data.readInt64();
+            xform.b_zero = data.readInt64();
+            xform.a_to_b_numer = data.readInt32();
+            xform.a_to_b_denom = data.readInt32();
+            int target = data.readInt32();
+            reply->writeInt32(setMediaTimeTransform(xform, target));
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
 }
 
 }; // namespace android
-
diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp
index d469e28..5d40cc8 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libmedia/IEffect.cpp
@@ -83,8 +83,15 @@
             size = *pReplySize;
         }
         data.writeInt32(size);
-        remote()->transact(COMMAND, data, &reply);
-        status_t status = reply.readInt32();
+
+        status_t status = remote()->transact(COMMAND, data, &reply);
+        if (status != NO_ERROR) {
+            if (pReplySize != NULL)
+                *pReplySize = 0;
+            return status;
+        }
+
+        status = reply.readInt32();
         size = reply.readInt32();
         if (size != 0 && pReplyData != NULL && pReplySize != NULL) {
             reply.read(pReplyData, size);
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 6cb10aa..54eb98a 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -791,7 +791,7 @@
 //        generators, instantiates output audio track.
 //
 //    Input:
-//        streamType:        Type of stream used for tone playback (enum AudioTrack::stream_type)
+//        streamType:        Type of stream used for tone playback
 //        volume:            volume applied to tone (0.0 to 1.0)
 //
 //    Output:
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 13b64e9..70f8c0c 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -168,7 +168,7 @@
         uint32_t replySize = mCaptureSize;
         status = command(VISUALIZER_CMD_CAPTURE, 0, NULL, &replySize, waveform);
         ALOGV("getWaveForm() command returned %d", status);
-        if (replySize == 0) {
+        if ((status == NO_ERROR) && (replySize == 0)) {
             status = NOT_ENOUGH_DATA;
         }
     } else {
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index f1c47dd..250425b 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -1,4 +1,4 @@
-/* mediaplayer.cpp
+/*
 **
 ** Copyright 2006, The Android Open Source Project
 **
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index a3e2517..e521648 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -29,7 +29,8 @@
 	libstagefright_omx    			\
 	libstagefright_foundation       \
 	libgui                          \
-	libdl
+	libdl                           \
+	libaah_rtp
 
 LOCAL_STATIC_LIBRARIES := \
         libstagefright_nuplayer                 \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 4df7f3d..764eddc 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -70,6 +70,11 @@
 
 #include <OMX.h>
 
+namespace android {
+sp<MediaPlayerBase> createAAH_TXPlayer();
+sp<MediaPlayerBase> createAAH_RXPlayer();
+}
+
 namespace {
 using android::media::Metadata;
 using android::status_t;
@@ -593,6 +598,14 @@
         return NU_PLAYER;
     }
 
+    if (!strncasecmp("aahRX://", url, 8)) {
+        return AAH_RX_PLAYER;
+    }
+
+    if (!strncasecmp("aahTX://", url, 8)) {
+        return AAH_TX_PLAYER;
+    }
+
     // use MidiFile for MIDI extensions
     int lenURL = strlen(url);
     for (int i = 0; i < NELEM(FILE_EXTS); ++i) {
@@ -629,6 +642,14 @@
             ALOGV("Create Test Player stub");
             p = new TestPlayerStub();
             break;
+        case AAH_RX_PLAYER:
+            ALOGV(" create A@H RX Player");
+            p = createAAH_RXPlayer();
+            break;
+        case AAH_TX_PLAYER:
+            ALOGV(" create A@H TX Player");
+            p = createAAH_TXPlayer();
+            break;
         default:
             ALOGE("Unknown player type: %d", playerType);
             return NULL;
@@ -1031,9 +1052,21 @@
 status_t MediaPlayerService::Client::setVolume(float leftVolume, float rightVolume)
 {
     ALOGV("[%d] setVolume(%f, %f)", mConnId, leftVolume, rightVolume);
-    // TODO: for hardware output, call player instead
-    Mutex::Autolock l(mLock);
-    if (mAudioOutput != 0) mAudioOutput->setVolume(leftVolume, rightVolume);
+
+    // for hardware output, call player instead
+    sp<MediaPlayerBase> p = getPlayer();
+    {
+      Mutex::Autolock l(mLock);
+      if (p != 0 && p->hardwareOutput()) {
+          MediaPlayerHWInterface* hwp =
+                  reinterpret_cast<MediaPlayerHWInterface*>(p.get());
+          return hwp->setVolume(leftVolume, rightVolume);
+      } else {
+          if (mAudioOutput != 0) mAudioOutput->setVolume(leftVolume, rightVolume);
+          return NO_ERROR;
+      }
+    }
+
     return NO_ERROR;
 }
 
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 483e5ab..3f9ba47 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -93,13 +93,7 @@
 
 # The following was shamelessly copied from external/webkit/Android.mk and
 # currently must follow the same logic to determine how webkit was built and
-# if it's safe to link against libchromium.net
-
-# V8 also requires an ARMv7 CPU, and since we must use jsc, we cannot
-# use the Chrome http stack either.
-ifneq ($(strip $(ARCH_ARM_HAVE_ARMV7A)),true)
-  USE_ALT_HTTP := true
-endif
+# if it's safe to link against libchromium_net
 
 # See if the user has specified a stack they want to use
 HTTP_STACK = $(HTTP)
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 157405a..22fa752 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -7,6 +7,7 @@
     AudioMixer.cpp.arm          \
     AudioResampler.cpp.arm      \
     AudioPolicyService.cpp      \
+    AudioBufferProvider.cpp     \
     ServiceUtilities.cpp
 #   AudioResamplerSinc.cpp.arm
 #   AudioResamplerCubic.cpp.arm
@@ -17,6 +18,7 @@
 
 LOCAL_SHARED_LIBRARIES := \
     libaudioutils \
+    libcommon_time_client \
     libcutils \
     libutils \
     libbinder \
diff --git a/services/audioflinger/AudioBufferProvider.cpp b/services/audioflinger/AudioBufferProvider.cpp
new file mode 100644
index 0000000..678fd58
--- /dev/null
+++ b/services/audioflinger/AudioBufferProvider.cpp
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef __STRICT_ANSI__
+#define __STDINT_LIMITS
+#define __STDC_LIMIT_MACROS
+#include <stdint.h>
+
+#include "AudioBufferProvider.h"
+
+namespace android {
+
+const int64_t AudioBufferProvider::kInvalidPTS = INT64_MAX;
+
+}; // namespace android
diff --git a/services/audioflinger/AudioBufferProvider.h b/services/audioflinger/AudioBufferProvider.h
index 81c5c39..62ad6bd 100644
--- a/services/audioflinger/AudioBufferProvider.h
+++ b/services/audioflinger/AudioBufferProvider.h
@@ -38,8 +38,15 @@
     };
 
     virtual ~AudioBufferProvider() {}
-    
-    virtual status_t getNextBuffer(Buffer* buffer) = 0;
+
+    // value representing an invalid presentation timestamp
+    static const int64_t kInvalidPTS;
+
+    // pts is the local time when the next sample yielded by getNextBuffer
+    // will be rendered.
+    // Pass kInvalidPTS if the PTS is unknown or not applicable.
+    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts) = 0;
+
     virtual void releaseBuffer(Buffer* buffer) = 0;
 };
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 5c964b2..2e2834c 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioFlinger.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -61,6 +61,9 @@
 #include <powermanager/PowerManager.h>
 // #define DEBUG_CPU_USAGE 10  // log statistics every n wall clock seconds
 
+#include <common_time/cc_helper.h>
+#include <common_time/local_clock.h>
+
 // ----------------------------------------------------------------------------
 
 
@@ -69,7 +72,6 @@
 static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
 static const char kHardwareLockedString[] = "Hardware lock is taken\n";
 
-//static const nsecs_t kStandbyTimeInNsecs = seconds(3);
 static const float MAX_GAIN = 4096.0f;
 static const uint32_t MAX_GAIN_INT = 0x1000;
 
@@ -99,6 +101,7 @@
 // maximum divider applied to the active sleep time in the mixer thread loop
 static const uint32_t kMaxThreadSleepTimeShift = 2;
 
+nsecs_t AudioFlinger::mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
 
 // ----------------------------------------------------------------------------
 
@@ -147,11 +150,14 @@
 
 AudioFlinger::AudioFlinger()
     : BnAudioFlinger(),
-        mPrimaryHardwareDev(NULL),
-        mHardwareStatus(AUDIO_HW_IDLE), // see also onFirstRef()
-        mMasterVolume(1.0f), mMasterMute(false), mNextUniqueId(1),
-        mMode(AUDIO_MODE_INVALID),
-        mBtNrecIsOff(false)
+      mPrimaryHardwareDev(NULL),
+      mHardwareStatus(AUDIO_HW_IDLE), // see also onFirstRef()
+      mMasterVolume(1.0f),
+      mMasterVolumeSupportLvl(MVS_NONE),
+      mMasterMute(false),
+      mNextUniqueId(1),
+      mMode(AUDIO_MODE_INVALID),
+      mBtNrecIsOff(false)
 {
 }
 
@@ -162,6 +168,18 @@
     Mutex::Autolock _l(mLock);
 
     /* TODO: move all this work into an Init() function */
+    char val_str[PROPERTY_VALUE_MAX] = { 0 };
+    if (property_get("ro.audio.flinger_standbytime_ms", val_str, NULL) >= 0) {
+        uint32_t int_val;
+        if (1 == sscanf(val_str, "%u", &int_val)) {
+            mStandbyTimeInNsecs = milliseconds(int_val);
+            ALOGI("Using %u mSec as standby time.", int_val);
+        } else {
+            mStandbyTimeInNsecs = kDefaultStandbyTimeInNsecs;
+            ALOGI("Using default %u mSec as standby time.",
+                    (uint32_t)(mStandbyTimeInNsecs / 1000000));
+        }
+    }
 
     for (size_t i = 0; i < ARRAY_SIZE(audio_interfaces); i++) {
         const hw_module_t *mod;
@@ -193,6 +211,32 @@
 
     AutoMutex lock(mHardwareLock);
 
+    // Determine the level of master volume support the primary audio HAL has,
+    // and set the initial master volume at the same time.
+    float initialVolume = 1.0;
+    mMasterVolumeSupportLvl = MVS_NONE;
+    if (0 == mPrimaryHardwareDev->init_check(mPrimaryHardwareDev)) {
+        audio_hw_device_t *dev = mPrimaryHardwareDev;
+
+        mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+        if ((NULL != dev->get_master_volume) &&
+            (NO_ERROR == dev->get_master_volume(dev, &initialVolume))) {
+            mMasterVolumeSupportLvl = MVS_FULL;
+        } else {
+            mMasterVolumeSupportLvl = MVS_SETONLY;
+            initialVolume = 1.0;
+        }
+
+        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+        if ((NULL == dev->set_master_volume) ||
+            (NO_ERROR != dev->set_master_volume(dev, initialVolume))) {
+            mMasterVolumeSupportLvl = MVS_NONE;
+        }
+        mHardwareStatus = AUDIO_HW_INIT;
+    }
+
+    // Set the mode for each audio HAL, and try to set the initial volume (if
+    // supported) for all of the non-primary audio HALs.
     for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
         audio_hw_device_t *dev = mAudioHwDevs[i];
 
@@ -203,11 +247,22 @@
             mMode = AUDIO_MODE_NORMAL;  // assigned multiple times with same value
             mHardwareStatus = AUDIO_HW_SET_MODE;
             dev->set_mode(dev, mMode);
-            mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-            dev->set_master_volume(dev, 1.0f);
-            mHardwareStatus = AUDIO_HW_IDLE;
+
+            if ((dev != mPrimaryHardwareDev) &&
+                (NULL != dev->set_master_volume)) {
+                mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+                dev->set_master_volume(dev, initialVolume);
+            }
+
+            mHardwareStatus = AUDIO_HW_INIT;
         }
     }
+
+    mMasterVolumeSW = (MVS_NONE == mMasterVolumeSupportLvl)
+                    ? initialVolume
+                    : 1.0;
+    mMasterVolume   = initialVolume;
+    mHardwareStatus = AUDIO_HW_IDLE;
 }
 
 AudioFlinger::~AudioFlinger()
@@ -273,7 +328,10 @@
     String8 result;
     hardware_call_state hardwareStatus = mHardwareStatus;
 
-    snprintf(buffer, SIZE, "Hardware status: %d\n", hardwareStatus);
+    snprintf(buffer, SIZE, "Hardware status: %d\n"
+                           "Standby Time mSec: %u\n",
+                            hardwareStatus,
+                            (uint32_t)(mStandbyTimeInNsecs / 1000000));
     result.append(buffer);
     write(fd, result.string(), result.size());
     return NO_ERROR;
@@ -377,6 +435,7 @@
         uint32_t flags,
         const sp<IMemory>& sharedBuffer,
         audio_io_handle_t output,
+        bool isTimed,
         int *sessionId,
         status_t *status)
 {
@@ -435,7 +494,7 @@
         ALOGV("createTrack() lSessionId: %d", lSessionId);
 
         track = thread->createTrack_l(client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, lSessionId, &lStatus);
+                channelMask, frameCount, sharedBuffer, lSessionId, isTimed, &lStatus);
 
         // move effect chain to this output thread if an effect on same session was waiting
         // for a track to be created
@@ -528,20 +587,29 @@
         return PERMISSION_DENIED;
     }
 
+    float swmv = value;
+
     // when hw supports master volume, don't scale in sw mixer
-    { // scope for the lock
-        AutoMutex lock(mHardwareLock);
-        mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
-        if (mPrimaryHardwareDev->set_master_volume(mPrimaryHardwareDev, value) == NO_ERROR) {
-            value = 1.0f;
+    if (MVS_NONE != mMasterVolumeSupportLvl) {
+        for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+            AutoMutex lock(mHardwareLock);
+            audio_hw_device_t *dev = mAudioHwDevs[i];
+
+            mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
+            if (NULL != dev->set_master_volume) {
+                dev->set_master_volume(dev, value);
+            }
+            mHardwareStatus = AUDIO_HW_IDLE;
         }
-        mHardwareStatus = AUDIO_HW_IDLE;
+
+        swmv = 1.0;
     }
 
     Mutex::Autolock _l(mLock);
-    mMasterVolume = value;
-    for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
-       mPlaybackThreads.valueAt(i)->setMasterVolume(value);
+    mMasterVolume   = value;
+    mMasterVolumeSW = swmv;
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++)
+       mPlaybackThreads.valueAt(i)->setMasterVolume(swmv);
 
     return NO_ERROR;
 }
@@ -572,7 +640,7 @@
     if (NO_ERROR == ret) {
         Mutex::Autolock _l(mLock);
         mMode = mode;
-        for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
+        for (size_t i = 0; i < mPlaybackThreads.size(); i++)
            mPlaybackThreads.valueAt(i)->setMode(mode);
     }
 
@@ -621,8 +689,9 @@
     }
 
     Mutex::Autolock _l(mLock);
+    // This is an optimization, so PlaybackThread doesn't have to look at the one from AudioFlinger
     mMasterMute = muted;
-    for (uint32_t i = 0; i < mPlaybackThreads.size(); i++)
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++)
        mPlaybackThreads.valueAt(i)->setMasterMute(muted);
 
     return NO_ERROR;
@@ -634,12 +703,36 @@
     return masterVolume_l();
 }
 
+float AudioFlinger::masterVolumeSW() const
+{
+    Mutex::Autolock _l(mLock);
+    return masterVolumeSW_l();
+}
+
 bool AudioFlinger::masterMute() const
 {
     Mutex::Autolock _l(mLock);
     return masterMute_l();
 }
 
+float AudioFlinger::masterVolume_l() const
+{
+    if (MVS_FULL == mMasterVolumeSupportLvl) {
+        float ret_val;
+        AutoMutex lock(mHardwareLock);
+
+        mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
+        assert(NULL != mPrimaryHardwareDev);
+        assert(NULL != mPrimaryHardwareDev->get_master_volume);
+
+        mPrimaryHardwareDev->get_master_volume(mPrimaryHardwareDev, &ret_val);
+        mHardwareStatus = AUDIO_HW_IDLE;
+        return ret_val;
+    }
+
+    return mMasterVolume;
+}
+
 status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value,
         audio_io_handle_t output)
 {
@@ -665,7 +758,7 @@
     mStreamTypes[stream].volume = value;
 
     if (thread == NULL) {
-        for (uint32_t i = 0; i < mPlaybackThreads.size(); i++) {
+        for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
            mPlaybackThreads.valueAt(i)->setStreamVolume(stream, value);
         }
     } else {
@@ -711,7 +804,7 @@
         }
         volume = thread->streamVolume(stream);
     } else {
-        volume = mStreamTypes[stream].volume;
+        volume = streamVolume_l(stream);
     }
 
     return volume;
@@ -723,7 +816,8 @@
         return true;
     }
 
-    return mStreamTypes[stream].mute;
+    AutoMutex lock(mLock);
+    return streamMute_l(stream);
 }
 
 status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
@@ -812,7 +906,7 @@
         for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
             audio_hw_device_t *dev = mAudioHwDevs[i];
             char *s = dev->get_parameters(dev, keys.string());
-            out_s8 += String8(s);
+            out_s8 += String8(s ? s : "");
             free(s);
         }
         return out_s8;
@@ -928,7 +1022,7 @@
 {
     Mutex::Autolock _l(mLock);
 
-    int index = mNotificationClients.indexOfKey(pid);
+    ssize_t index = mNotificationClients.indexOfKey(pid);
     if (index >= 0) {
         sp <NotificationClient> client = mNotificationClients.valueFor(pid);
         ALOGV("removeNotificationClient() %p, pid %d", client.get(), pid);
@@ -936,9 +1030,9 @@
     }
 
     ALOGV("%d died, releasing its sessions", pid);
-    int num = mAudioSessionRefs.size();
+    size_t num = mAudioSessionRefs.size();
     bool removed = false;
-    for (int i = 0; i< num; i++) {
+    for (size_t i = 0; i< num; ) {
         AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
         ALOGV(" pid %d @ %d", ref->pid, i);
         if (ref->pid == pid) {
@@ -946,8 +1040,9 @@
             mAudioSessionRefs.removeAt(i);
             delete ref;
             removed = true;
-            i--;
             num--;
+        } else {
+            i++;
         }
     }
     if (removed) {
@@ -1238,7 +1333,7 @@
 
 void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain)
 {
-    int index = mSuspendedSessions.indexOfKey(chain->sessionId());
+    ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
     if (index < 0) {
         return;
     }
@@ -1264,7 +1359,7 @@
                                                          bool suspend,
                                                          int sessionId)
 {
-    int index = mSuspendedSessions.indexOfKey(sessionId);
+    ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
 
     KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
 
@@ -1364,7 +1459,7 @@
         mOutput(output),
         // Assumes constructor is called by AudioFlinger with it's mLock held,
         // but it would be safer to explicitly pass initial masterVolume as parameter
-        mMasterVolume(audioFlinger->masterVolume_l()),
+        mMasterVolume(audioFlinger->masterVolumeSW_l()),
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false)
 {
     snprintf(mName, kNameLength, "AudioOut_%d", id);
@@ -1375,11 +1470,13 @@
     // There is no AUDIO_STREAM_MIN, and ++ operator does not compile
     for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
             stream = (audio_stream_type_t) (stream + 1)) {
-        mStreamTypes[stream].volume = mAudioFlinger->streamVolumeInternal(stream);
-        mStreamTypes[stream].mute = mAudioFlinger->streamMute(stream);
+        mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
+        mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
         // initialized by stream_type_t default constructor
         // mStreamTypes[stream].valid = true;
     }
+    // mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here,
+    // because mAudioFlinger doesn't have one to copy from
 }
 
 AudioFlinger::PlaybackThread::~PlaybackThread()
@@ -1480,6 +1577,7 @@
         int frameCount,
         const sp<IMemory>& sharedBuffer,
         int sessionId,
+        bool isTimed,
         status_t *status)
 {
     sp<Track> track;
@@ -1530,9 +1628,14 @@
             }
         }
 
-        track = new Track(this, client, streamType, sampleRate, format,
-                channelMask, frameCount, sharedBuffer, sessionId);
-        if (track->getCblk() == NULL || track->name() < 0) {
+        if (!isTimed) {
+            track = new Track(this, client, streamType, sampleRate, format,
+                    channelMask, frameCount, sharedBuffer, sessionId);
+        } else {
+            track = TimedTrack::create(this, client, streamType, sampleRate, format,
+                    channelMask, frameCount, sharedBuffer, sessionId);
+        }
+        if (track == NULL || track->getCblk() == NULL || track->name() < 0) {
             lStatus = NO_MEMORY;
             goto Exit;
         }
@@ -1573,40 +1676,36 @@
     }
 }
 
-status_t AudioFlinger::PlaybackThread::setMasterVolume(float value)
+void AudioFlinger::PlaybackThread::setMasterVolume(float value)
 {
+    Mutex::Autolock _l(mLock);
     mMasterVolume = value;
-    return NO_ERROR;
 }
 
-status_t AudioFlinger::PlaybackThread::setMasterMute(bool muted)
+void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
 {
-    mMasterMute = muted;
-    return NO_ERROR;
+    Mutex::Autolock _l(mLock);
+    setMasterMute_l(muted);
 }
 
-status_t AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
+void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
 {
+    Mutex::Autolock _l(mLock);
     mStreamTypes[stream].volume = value;
-    return NO_ERROR;
 }
 
-status_t AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
+void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
 {
+    Mutex::Autolock _l(mLock);
     mStreamTypes[stream].mute = muted;
-    return NO_ERROR;
 }
 
 float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
 {
+    Mutex::Autolock _l(mLock);
     return mStreamTypes[stream].volume;
 }
 
-bool AudioFlinger::PlaybackThread::streamMute(audio_stream_type_t stream) const
-{
-    return mStreamTypes[stream].mute;
-}
-
 // addTrack_l() must be called with ThreadBase::mLock held
 status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
 {
@@ -1936,11 +2035,11 @@
                         property_get("ro.audio.silent", value, "0");
                         if (atoi(value)) {
                             ALOGD("Silence is golden");
-                            setMasterMute(true);
+                            setMasterMute_l(true);
                         }
                     }
 
-                    standbyTime = systemTime() + kStandbyTimeInNsecs;
+                    standbyTime = systemTime() + mStandbyTimeInNsecs;
                     sleepTime = idleSleepTime;
                     sleepTimeShift = 0;
                     continue;
@@ -1956,8 +2055,21 @@
         }
 
         if (CC_LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
+            // obtain the presentation timestamp of the next output buffer
+            int64_t pts;
+            status_t status = INVALID_OPERATION;
+
+            if (NULL != mOutput->stream->get_next_write_timestamp) {
+                status = mOutput->stream->get_next_write_timestamp(
+                        mOutput->stream, &pts);
+            }
+
+            if (status != NO_ERROR) {
+                pts = AudioBufferProvider::kInvalidPTS;
+            }
+
             // mix buffers...
-            mAudioMixer->process();
+            mAudioMixer->process(pts);
             // increase sleep time progressively when application underrun condition clears.
             // Only increase sleep time if the mixer is ready for two consecutive times to avoid
             // that a steady state of alternating ready/not ready conditions keeps the sleep time
@@ -1966,7 +2078,7 @@
                 sleepTimeShift--;
             }
             sleepTime = 0;
-            standbyTime = systemTime() + kStandbyTimeInNsecs;
+            standbyTime = systemTime() + mStandbyTimeInNsecs;
             //TODO: delay standby when effects have a tail
         } else {
             // If no tracks are ready, sleep once for the duration of an output
@@ -2113,7 +2225,7 @@
                 ALOG_ASSERT(minFrames <= cblk->frameCount);
             }
         }
-        if ((cblk->framesReady() >= minFrames) && track->isReady() &&
+        if ((track->framesReady() >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
             //ALOGV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server, this);
@@ -2183,7 +2295,7 @@
 
                 uint16_t sendLevel = cblk->getSendLevel_U4_12();
                 // send level comes from shared memory and so may be corrupt
-                if (sendLevel >= MAX_GAIN_INT) {
+                if (sendLevel > MAX_GAIN_INT) {
                     ALOGV("Track send level out of range: %04X", sendLevel);
                     sendLevel = MAX_GAIN_INT;
                 }
@@ -2204,25 +2316,21 @@
             }
 
             // Convert volumes from 8.24 to 4.12 format
-            int16_t left, right, aux;
             // This additional clamping is needed in case chain->setVolume_l() overshot
-            uint32_t v_clamped = (vl + (1 << 11)) >> 12;
-            if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
-            left = int16_t(v_clamped);
-            v_clamped = (vr + (1 << 11)) >> 12;
-            if (v_clamped > MAX_GAIN_INT) v_clamped = MAX_GAIN_INT;
-            right = int16_t(v_clamped);
+            vl = (vl + (1 << 11)) >> 12;
+            if (vl > MAX_GAIN_INT) vl = MAX_GAIN_INT;
+            vr = (vr + (1 << 11)) >> 12;
+            if (vr > MAX_GAIN_INT) vr = MAX_GAIN_INT;
 
-            if (va > MAX_GAIN_INT) va = MAX_GAIN_INT;
-            aux = int16_t(va);
+            if (va > MAX_GAIN_INT) va = MAX_GAIN_INT;   // va is uint32_t, so no need to check for -
 
             // XXX: these things DON'T need to be done each time
             mAudioMixer->setBufferProvider(name, track);
             mAudioMixer->enable(name);
 
-            mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)left);
-            mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)right);
-            mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)aux);
+            mAudioMixer->setParameter(name, param, AudioMixer::VOLUME0, (void *)vl);
+            mAudioMixer->setParameter(name, param, AudioMixer::VOLUME1, (void *)vr);
+            mAudioMixer->setParameter(name, param, AudioMixer::AUXLEVEL, (void *)va);
             mAudioMixer->setParameter(
                 name,
                 AudioMixer::TRACK,
@@ -2576,7 +2684,6 @@
     sp<Track> trackToRemove;
     sp<Track> activeTrack;
     nsecs_t standbyTime = systemTime();
-    int8_t *curBuf;
     size_t mixBufferSize = mFrameCount*mFrameSize;
     uint32_t activeSleepTime = activeSleepTimeUs();
     uint32_t idleSleepTime = idleSleepTimeUs();
@@ -2637,7 +2744,7 @@
                         property_get("ro.audio.silent", value, "0");
                         if (atoi(value)) {
                             ALOGD("Silence is golden");
-                            setMasterMute(true);
+                            setMasterMute_l(true);
                         }
                     }
 
@@ -2780,11 +2887,12 @@
         if (CC_LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
             AudioBufferProvider::Buffer buffer;
             size_t frameCount = mFrameCount;
-            curBuf = (int8_t *)mMixBuffer;
+            int8_t *curBuf = (int8_t *)mMixBuffer;
             // output audio to hardware
             while (frameCount) {
                 buffer.frameCount = frameCount;
-                activeTrack->getNextBuffer(&buffer);
+                activeTrack->getNextBuffer(&buffer,
+                                           AudioBufferProvider::kInvalidPTS);
                 if (CC_UNLIKELY(buffer.raw == NULL)) {
                     memset(curBuf, 0, frameCount * mFrameSize);
                     break;
@@ -3033,11 +3141,11 @@
                         property_get("ro.audio.silent", value, "0");
                         if (atoi(value)) {
                             ALOGD("Silence is golden");
-                            setMasterMute(true);
+                            setMasterMute_l(true);
                         }
                     }
 
-                    standbyTime = systemTime() + kStandbyTimeInNsecs;
+                    standbyTime = systemTime() + mStandbyTimeInNsecs;
                     sleepTime = idleSleepTime;
                     continue;
                 }
@@ -3054,7 +3162,7 @@
         if (CC_LIKELY(mixerStatus == MIXER_TRACKS_READY)) {
             // mix buffers...
             if (outputsReady(outputTracks)) {
-                mAudioMixer->process();
+                mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
             } else {
                 memset(mMixBuffer, 0, mixBufferSize);
             }
@@ -3091,7 +3199,7 @@
             // enable changes in effect chain
             unlockEffectChains(effectChains);
 
-            standbyTime = systemTime() + kStandbyTimeInNsecs;
+            standbyTime = systemTime() + mStandbyTimeInNsecs;
             for (size_t i = 0; i < outputTracks.size(); i++) {
                 outputTracks[i]->write(mMixBuffer, writeFrames);
             }
@@ -3121,6 +3229,7 @@
 
 void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
 {
+    // FIXME explain this formula
     int frameCount = (3 * mFrameCount * mSampleRate) / thread->sampleRate();
     OutputTrack *outputTrack = new OutputTrack((ThreadBase *)thread,
                                             this,
@@ -3140,7 +3249,7 @@
 {
     Mutex::Autolock _l(mLock);
     for (size_t i = 0; i < mOutputTracks.size(); i++) {
-        if (mOutputTracks[i]->thread() == (ThreadBase *)thread) {
+        if (mOutputTracks[i]->thread() == thread) {
             mOutputTracks[i]->destroy();
             mOutputTracks.removeAt(i);
             updateWaitTime();
@@ -3392,7 +3501,7 @@
 {
     // NOTE: destroyTrack_l() can remove a strong reference to this Track
     // by removing it from mTracks vector, so there is a risk that this Tracks's
-    // desctructor is called. As the destructor needs to lock mLock,
+    // destructor is called. As the destructor needs to lock mLock,
     // we must acquire a strong reference on this Track before locking mLock
     // here so that the destructor is called only when exiting this function.
     // On the other hand, as long as Track::destroy() is only called by
@@ -3441,7 +3550,8 @@
             (int)mAuxBuffer);
 }
 
-status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
+    AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
      audio_track_cblk_t* cblk = this->cblk();
      uint32_t framesReady;
@@ -3482,10 +3592,14 @@
      return NOT_ENOUGH_DATA;
 }
 
+uint32_t AudioFlinger::PlaybackThread::Track::framesReady() const{
+    return mCblk->framesReady();
+}
+
 bool AudioFlinger::PlaybackThread::Track::isReady() const {
     if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) return true;
 
-    if (mCblk->framesReady() >= mCblk->frameCount ||
+    if (framesReady() >= mCblk->frameCount ||
             (mCblk->flags & CBLK_FORCEREADY_MSK)) {
         mFillingUpStatus = FS_FILLED;
         android_atomic_and(~CBLK_FORCEREADY_MSK, &mCblk->flags);
@@ -3494,11 +3608,11 @@
     return false;
 }
 
-status_t AudioFlinger::PlaybackThread::Track::start()
+status_t AudioFlinger::PlaybackThread::Track::start(pid_t tid)
 {
     status_t status = NO_ERROR;
-    ALOGV("start(%d), calling pid %d session %d",
-            mName, IPCThreadState::self()->getCallingPid(), mSessionId);
+    ALOGV("start(%d), calling pid %d session %d tid %d",
+            mName, IPCThreadState::self()->getCallingPid(), mSessionId, tid);
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
@@ -3642,6 +3756,406 @@
     mAuxBuffer = buffer;
 }
 
+// timed audio tracks
+
+sp<AudioFlinger::PlaybackThread::TimedTrack>
+AudioFlinger::PlaybackThread::TimedTrack::create(
+            const wp<ThreadBase>& thread,
+            const sp<Client>& client,
+            audio_stream_type_t streamType,
+            uint32_t sampleRate,
+            audio_format_t format,
+            uint32_t channelMask,
+            int frameCount,
+            const sp<IMemory>& sharedBuffer,
+            int sessionId) {
+    if (!client->reserveTimedTrack())
+        return NULL;
+
+    sp<TimedTrack> track = new TimedTrack(
+        thread, client, streamType, sampleRate, format, channelMask, frameCount,
+        sharedBuffer, sessionId);
+
+    if (track == NULL) {
+        client->releaseTimedTrack();
+        return NULL;
+    }
+
+    return track;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
+            const wp<ThreadBase>& thread,
+            const sp<Client>& client,
+            audio_stream_type_t streamType,
+            uint32_t sampleRate,
+            audio_format_t format,
+            uint32_t channelMask,
+            int frameCount,
+            const sp<IMemory>& sharedBuffer,
+            int sessionId)
+    : Track(thread, client, streamType, sampleRate, format, channelMask,
+            frameCount, sharedBuffer, sessionId),
+      mTimedSilenceBuffer(NULL),
+      mTimedSilenceBufferSize(0),
+      mTimedAudioOutputOnTime(false),
+      mMediaTimeTransformValid(false)
+{
+    LocalClock lc;
+    mLocalTimeFreq = lc.getLocalFreq();
+
+    mLocalTimeToSampleTransform.a_zero = 0;
+    mLocalTimeToSampleTransform.b_zero = 0;
+    mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
+    mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
+    LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
+                            &mLocalTimeToSampleTransform.a_to_b_denom);
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
+    mClient->releaseTimedTrack();
+    delete [] mTimedSilenceBuffer;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
+    size_t size, sp<IMemory>* buffer) {
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    trimTimedBufferQueue_l();
+
+    // lazily initialize the shared memory heap for timed buffers
+    if (mTimedMemoryDealer == NULL) {
+        const int kTimedBufferHeapSize = 512 << 10;
+
+        mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
+                                              "AudioFlingerTimed");
+        if (mTimedMemoryDealer == NULL)
+            return NO_MEMORY;
+    }
+
+    sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
+    if (newBuffer == NULL) {
+        newBuffer = mTimedMemoryDealer->allocate(size);
+        if (newBuffer == NULL)
+            return NO_MEMORY;
+    }
+
+    *buffer = newBuffer;
+    return NO_ERROR;
+}
+
+// caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
+    int64_t mediaTimeNow;
+    {
+        Mutex::Autolock mttLock(mMediaTimeTransformLock);
+        if (!mMediaTimeTransformValid)
+            return;
+
+        int64_t targetTimeNow;
+        status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
+            ? mCCHelper.getCommonTime(&targetTimeNow)
+            : mCCHelper.getLocalTime(&targetTimeNow);
+
+        if (OK != res)
+            return;
+
+        if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
+                                                    &mediaTimeNow)) {
+            return;
+        }
+    }
+
+    size_t trimIndex;
+    for (trimIndex = 0; trimIndex < mTimedBufferQueue.size(); trimIndex++) {
+        if (mTimedBufferQueue[trimIndex].pts() > mediaTimeNow)
+            break;
+    }
+
+    if (trimIndex) {
+        mTimedBufferQueue.removeItemsAt(0, trimIndex);
+    }
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
+    const sp<IMemory>& buffer, int64_t pts) {
+
+    {
+        Mutex::Autolock mttLock(mMediaTimeTransformLock);
+        if (!mMediaTimeTransformValid)
+            return INVALID_OPERATION;
+    }
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    mTimedBufferQueue.add(TimedBuffer(buffer, pts));
+
+    return NO_ERROR;
+}
+
+status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
+    const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
+
+    ALOGV("%s az=%lld bz=%lld n=%d d=%u tgt=%d", __PRETTY_FUNCTION__,
+         xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
+         target);
+
+    if (!(target == TimedAudioTrack::LOCAL_TIME ||
+          target == TimedAudioTrack::COMMON_TIME)) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMediaTimeTransformLock);
+    mMediaTimeTransform = xform;
+    mMediaTimeTransformTarget = target;
+    mMediaTimeTransformValid = true;
+
+    return NO_ERROR;
+}
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+// implementation of getNextBuffer for tracks whose buffers have timestamps
+status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
+    AudioBufferProvider::Buffer* buffer, int64_t pts)
+{
+    if (pts == AudioBufferProvider::kInvalidPTS) {
+        buffer->raw = 0;
+        buffer->frameCount = 0;
+        return INVALID_OPERATION;
+    }
+
+    // get ahold of the output stream that these samples will be written to
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread == NULL) {
+        buffer->raw = 0;
+        buffer->frameCount = 0;
+        return INVALID_OPERATION;
+    }
+    PlaybackThread* playbackThread = static_cast<PlaybackThread*>(thread.get());
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    while (true) {
+
+        // if we have no timed buffers, then fail
+        if (mTimedBufferQueue.isEmpty()) {
+            buffer->raw = 0;
+            buffer->frameCount = 0;
+            return NOT_ENOUGH_DATA;
+        }
+
+        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+        // calculate the PTS of the head of the timed buffer queue expressed in
+        // local time
+        int64_t headLocalPTS;
+        {
+            Mutex::Autolock mttLock(mMediaTimeTransformLock);
+
+            assert(mMediaTimeTransformValid);
+
+            if (mMediaTimeTransform.a_to_b_denom == 0) {
+                // the transform represents a pause, so yield silence
+                timedYieldSilence(buffer->frameCount, buffer);
+                return NO_ERROR;
+            }
+
+            int64_t transformedPTS;
+            if (!mMediaTimeTransform.doForwardTransform(head.pts(),
+                                                        &transformedPTS)) {
+                // the transform failed.  this shouldn't happen, but if it does
+                // then just drop this buffer
+                ALOGW("timedGetNextBuffer transform failed");
+                buffer->raw = 0;
+                buffer->frameCount = 0;
+                mTimedBufferQueue.removeAt(0);
+                return NO_ERROR;
+            }
+
+            if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
+                if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
+                                                          &headLocalPTS)) {
+                    buffer->raw = 0;
+                    buffer->frameCount = 0;
+                    return INVALID_OPERATION;
+                }
+            } else {
+                headLocalPTS = transformedPTS;
+            }
+        }
+
+        // adjust the head buffer's PTS to reflect the portion of the head buffer
+        // that has already been consumed
+        int64_t effectivePTS = headLocalPTS +
+                ((head.position() / mCblk->frameSize) * mLocalTimeFreq / sampleRate());
+
+        // Calculate the delta in samples between the head of the input buffer
+        // queue and the start of the next output buffer that will be written.
+        // If the transformation fails because of over or underflow, it means
+        // that the sample's position in the output stream is so far out of
+        // whack that it should just be dropped.
+        int64_t sampleDelta;
+        if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
+            ALOGV("*** head buffer is too far from PTS: dropped buffer");
+            mTimedBufferQueue.removeAt(0);
+            continue;
+        }
+        if (!mLocalTimeToSampleTransform.doForwardTransform(
+                (effectivePTS - pts) << 32, &sampleDelta)) {
+            ALOGV("*** too late during sample rate transform: dropped buffer");
+            mTimedBufferQueue.removeAt(0);
+            continue;
+        }
+
+        ALOGV("*** %s head.pts=%lld head.pos=%d pts=%lld sampleDelta=[%d.%08x]",
+             __PRETTY_FUNCTION__, head.pts(), head.position(), pts,
+             static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) + (sampleDelta >> 32)),
+             static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
+
+        // if the delta between the ideal placement for the next input sample and
+        // the current output position is within this threshold, then we will
+        // concatenate the next input samples to the previous output
+        const int64_t kSampleContinuityThreshold =
+                (static_cast<int64_t>(sampleRate()) << 32) / 10;
+
+        // if this is the first buffer of audio that we're emitting from this track
+        // then it should be almost exactly on time.
+        const int64_t kSampleStartupThreshold = 1LL << 32;
+
+        if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
+            (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
+            // the next input is close enough to being on time, so concatenate it
+            // with the last output
+            timedYieldSamples(buffer);
+
+            ALOGV("*** on time: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+            return NO_ERROR;
+        } else if (sampleDelta > 0) {
+            // the gap between the current output position and the proper start of
+            // the next input sample is too big, so fill it with silence
+            uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
+
+            timedYieldSilence(framesUntilNextInput, buffer);
+            ALOGV("*** silence: frameCount=%u", buffer->frameCount);
+            return NO_ERROR;
+        } else {
+            // the next input sample is late
+            uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
+            size_t onTimeSamplePosition =
+                    head.position() + lateFrames * mCblk->frameSize;
+
+            if (onTimeSamplePosition > head.buffer()->size()) {
+                // all the remaining samples in the head are too late, so
+                // drop it and move on
+                ALOGV("*** too late: dropped buffer");
+                mTimedBufferQueue.removeAt(0);
+                continue;
+            } else {
+                // skip over the late samples
+                head.setPosition(onTimeSamplePosition);
+
+                // yield the available samples
+                timedYieldSamples(buffer);
+
+                ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
+                return NO_ERROR;
+            }
+        }
+    }
+}
+
+// Yield samples from the timed buffer queue head up to the given output
+// buffer's capacity.
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples(
+    AudioBufferProvider::Buffer* buffer) {
+
+    const TimedBuffer& head = mTimedBufferQueue[0];
+
+    buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
+                   head.position());
+
+    uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
+                                 mCblk->frameSize);
+    size_t framesRequested = buffer->frameCount;
+    buffer->frameCount = min(framesLeftInHead, framesRequested);
+
+    mTimedAudioOutputOnTime = true;
+}
+
+// Yield samples of silence up to the given output buffer's capacity
+//
+// Caller must hold mTimedBufferQueueLock
+void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence(
+    uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
+
+    // lazily allocate a buffer filled with silence
+    if (mTimedSilenceBufferSize < numFrames * mCblk->frameSize) {
+        delete [] mTimedSilenceBuffer;
+        mTimedSilenceBufferSize = numFrames * mCblk->frameSize;
+        mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
+        memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
+    }
+
+    buffer->raw = mTimedSilenceBuffer;
+    size_t framesRequested = buffer->frameCount;
+    buffer->frameCount = min(numFrames, framesRequested);
+
+    mTimedAudioOutputOnTime = false;
+}
+
+void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
+    AudioBufferProvider::Buffer* buffer) {
+
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    // If the buffer which was just released is part of the buffer at the head
+    // of the queue, be sure to update the amt of the buffer which has been
+    // consumed.  If the buffer being returned is not part of the head of the
+    // queue, its either because the buffer is part of the silence buffer, or
+    // because the head of the timed queue was trimmed after the mixer called
+    // getNextBuffer but before the mixer called releaseBuffer.
+    if ((buffer->raw != mTimedSilenceBuffer) && mTimedBufferQueue.size()) {
+        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
+
+        void* start = head.buffer()->pointer();
+        void* end   = head.buffer()->pointer() + head.buffer()->size();
+
+        if ((buffer->raw >= start) && (buffer->raw <= end)) {
+            head.setPosition(head.position() +
+                    (buffer->frameCount * mCblk->frameSize));
+            if (static_cast<size_t>(head.position()) >= head.buffer()->size()) {
+                mTimedBufferQueue.removeAt(0);
+            }
+        }
+    }
+
+    buffer->raw = 0;
+    buffer->frameCount = 0;
+}
+
+uint32_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
+    Mutex::Autolock _l(mTimedBufferQueueLock);
+
+    uint32_t frames = 0;
+    for (size_t i = 0; i < mTimedBufferQueue.size(); i++) {
+        const TimedBuffer& tb = mTimedBufferQueue[i];
+        frames += (tb.buffer()->size() - tb.position())  / mCblk->frameSize;
+    }
+
+    return frames;
+}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
+        : mPTS(0), mPosition(0) {}
+
+AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
+    const sp<IMemory>& buffer, int64_t pts)
+        : mBuffer(buffer), mPTS(pts), mPosition(0) {}
+
 // ----------------------------------------------------------------------------
 
 // RecordTrack constructor must be called with AudioFlinger::mLock held
@@ -3678,7 +4192,7 @@
     }
 }
 
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
     audio_track_cblk_t* cblk = this->cblk();
     uint32_t framesAvail;
@@ -3717,12 +4231,12 @@
     return NOT_ENOUGH_DATA;
 }
 
-status_t AudioFlinger::RecordThread::RecordTrack::start()
+status_t AudioFlinger::RecordThread::RecordTrack::start(pid_t tid)
 {
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         RecordThread *recordThread = (RecordThread *)thread.get();
-        return recordThread->start(this);
+        return recordThread->start(this, tid);
     } else {
         return BAD_VALUE;
     }
@@ -3789,9 +4303,9 @@
     clearBufferQueue();
 }
 
-status_t AudioFlinger::PlaybackThread::OutputTrack::start()
+status_t AudioFlinger::PlaybackThread::OutputTrack::start(pid_t tid)
 {
-    status_t status = Track::start();
+    status_t status = Track::start(tid);
     if (status != NO_ERROR) {
         return status;
     }
@@ -3821,7 +4335,7 @@
     uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
 
     if (!mActive && frames != 0) {
-        start();
+        start(0);
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
             MixerThread *mixerThread = (MixerThread *)thread.get();
@@ -3983,10 +4497,9 @@
 void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
 {
     size_t size = mBufferQueue.size();
-    Buffer *pBuffer;
 
     for (size_t i = 0; i < size; i++) {
-        pBuffer = mBufferQueue.itemAt(i);
+        Buffer *pBuffer = mBufferQueue.itemAt(i);
         delete [] pBuffer->mBuffer;
         delete pBuffer;
     }
@@ -3998,8 +4511,10 @@
 AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
     :   RefBase(),
         mAudioFlinger(audioFlinger),
+        // FIXME should be a "k" constant not hard-coded, in .h or ro. property, see 4 lines below
         mMemoryDealer(new MemoryDealer(1024*1024, "AudioFlinger::Client")),
-        mPid(pid)
+        mPid(pid),
+        mTimedTrackCount(0)
 {
     // 1 MB of address space is good for 32 tracks, 8 buffers each, 4 KB/buffer
 }
@@ -4015,6 +4530,31 @@
     return mMemoryDealer;
 }
 
+// Reserve one of the limited slots for a timed audio track associated
+// with this client
+bool AudioFlinger::Client::reserveTimedTrack()
+{
+    const int kMaxTimedTracksPerClient = 4;
+
+    Mutex::Autolock _l(mTimedTrackLock);
+
+    if (mTimedTrackCount >= kMaxTimedTracksPerClient) {
+        ALOGW("can not create timed track - pid %d has exceeded the limit",
+             mPid);
+        return false;
+    }
+
+    mTimedTrackCount++;
+    return true;
+}
+
+// Release a slot for a timed audio track
+void AudioFlinger::Client::releaseTimedTrack()
+{
+    Mutex::Autolock _l(mTimedTrackLock);
+    mTimedTrackCount--;
+}
+
 // ----------------------------------------------------------------------------
 
 AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
@@ -4031,9 +4571,7 @@
 void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who)
 {
     sp<NotificationClient> keep(this);
-    {
-        mAudioFlinger->removeNotificationClient(mPid);
-    }
+    mAudioFlinger->removeNotificationClient(mPid);
 }
 
 // ----------------------------------------------------------------------------
@@ -4056,8 +4594,8 @@
     return mTrack->getCblk();
 }
 
-status_t AudioFlinger::TrackHandle::start() {
-    return mTrack->start();
+status_t AudioFlinger::TrackHandle::start(pid_t tid) {
+    return mTrack->start(tid);
 }
 
 void AudioFlinger::TrackHandle::stop() {
@@ -4081,6 +4619,38 @@
     return mTrack->attachAuxEffect(EffectId);
 }
 
+status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
+                                                         sp<IMemory>* buffer) {
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->allocateTimedBuffer(size, buffer);
+}
+
+status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
+                                                     int64_t pts) {
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->queueTimedBuffer(buffer, pts);
+}
+
+status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
+    const LinearTransform& xform, int target) {
+
+    if (!mTrack->isTimedTrack())
+        return INVALID_OPERATION;
+
+    PlaybackThread::TimedTrack* tt =
+            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
+    return tt->setMediaTimeTransform(
+        xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
+}
+
 status_t AudioFlinger::TrackHandle::onTransact(
     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
 {
@@ -4179,9 +4749,9 @@
     return mRecordTrack->getCblk();
 }
 
-status_t AudioFlinger::RecordHandle::start() {
+status_t AudioFlinger::RecordHandle::start(pid_t tid) {
     ALOGV("RecordHandle::start()");
-    return mRecordTrack->start();
+    return mRecordTrack->start(tid);
 }
 
 void AudioFlinger::RecordHandle::stop() {
@@ -4310,7 +4880,8 @@
             }
 
             buffer.frameCount = mFrameCount;
-            if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {
+            if (CC_LIKELY(mActiveTrack->getNextBuffer(
+                    &buffer, AudioBufferProvider::kInvalidPTS) == NO_ERROR)) {
                 size_t framesOut = buffer.frameCount;
                 if (mResampler == NULL) {
                     // no resampling
@@ -4473,9 +5044,9 @@
     return track;
 }
 
-status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack)
+status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack, pid_t tid)
 {
-    ALOGV("RecordThread::start");
+    ALOGV("RecordThread::start tid=%d", tid);
     sp <ThreadBase> strongMe = this;
     status_t status = NO_ERROR;
     {
@@ -4588,7 +5159,7 @@
     return NO_ERROR;
 }
 
-status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer)
+status_t AudioFlinger::RecordThread::getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts)
 {
     size_t framesReq = buffer->frameCount;
     size_t framesReady = mFrameCount - mRsmpInIndex;
@@ -4659,7 +5230,7 @@
         }
         if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
             // do not accept frame count changes if tracks are open as the track buffer
-            // size depends on frame count and correct behavior would not be garantied
+            // size depends on frame count and correct behavior would not be guaranteed
             // if frame count is changed after track creation
             if (mActiveTrack != 0) {
                 status = INVALID_OPERATION;
@@ -4976,8 +5547,7 @@
                 }
             }
         }
-        void *param2 = NULL;
-        audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, param2);
+        audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
         mPlaybackThreads.removeItem(output);
     }
     thread->exit();
@@ -5121,8 +5691,7 @@
         }
 
         ALOGV("closeInput() %d", input);
-        void *param2 = NULL;
-        audioConfigChanged_l(AudioSystem::INPUT_CLOSED, input, param2);
+        audioConfigChanged_l(AudioSystem::INPUT_CLOSED, input, NULL);
         mRecordThreads.removeItem(input);
     }
     thread->exit();
@@ -5154,8 +5723,7 @@
 
     for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
         PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
-        if (thread != dstThread &&
-            thread->type() != ThreadBase::DIRECT) {
+        if (thread != dstThread && thread->type() != ThreadBase::DIRECT) {
             MixerThread *srcThread = (MixerThread *)thread;
             srcThread->setStreamValid(stream, false);
             srcThread->invalidateTracks(stream);
@@ -5176,8 +5744,8 @@
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
     ALOGV("acquiring %d from %d", audioSession, caller);
-    int num = mAudioSessionRefs.size();
-    for (int i = 0; i< num; i++) {
+    size_t num = mAudioSessionRefs.size();
+    for (size_t i = 0; i< num; i++) {
         AudioSessionRef *ref = mAudioSessionRefs.editItemAt(i);
         if (ref->sessionid == audioSession && ref->pid == caller) {
             ref->cnt++;
@@ -5194,8 +5762,8 @@
     Mutex::Autolock _l(mLock);
     pid_t caller = IPCThreadState::self()->getCallingPid();
     ALOGV("releasing %d from %d", audioSession, caller);
-    int num = mAudioSessionRefs.size();
-    for (int i = 0; i< num; i++) {
+    size_t num = mAudioSessionRefs.size();
+    for (size_t i = 0; i< num; i++) {
         AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
         if (ref->sessionid == audioSession && ref->pid == caller) {
             ref->cnt--;
@@ -5278,33 +5846,20 @@
 // checkPlaybackThread_l() must be called with AudioFlinger::mLock held
 AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
 {
-    PlaybackThread *thread = NULL;
-    if (mPlaybackThreads.indexOfKey(output) >= 0) {
-        thread = (PlaybackThread *)mPlaybackThreads.valueFor(output).get();
-    }
-    return thread;
+    return mPlaybackThreads.valueFor(output).get();
 }
 
 // checkMixerThread_l() must be called with AudioFlinger::mLock held
 AudioFlinger::MixerThread *AudioFlinger::checkMixerThread_l(audio_io_handle_t output) const
 {
     PlaybackThread *thread = checkPlaybackThread_l(output);
-    if (thread != NULL) {
-        if (thread->type() == ThreadBase::DIRECT) {
-            thread = NULL;
-        }
-    }
-    return (MixerThread *)thread;
+    return thread != NULL && thread->type() != ThreadBase::DIRECT ? (MixerThread *) thread : NULL;
 }
 
 // checkRecordThread_l() must be called with AudioFlinger::mLock held
 AudioFlinger::RecordThread *AudioFlinger::checkRecordThread_l(audio_io_handle_t input) const
 {
-    RecordThread *thread = NULL;
-    if (mRecordThreads.indexOfKey(input) >= 0) {
-        thread = (RecordThread *)mRecordThreads.valueFor(input).get();
-    }
-    return thread;
+    return mRecordThreads.valueFor(input).get();
 }
 
 uint32_t AudioFlinger::nextUniqueId()
@@ -5667,10 +6222,7 @@
         goto Exit;
     }
     // Only Pre processor effects are allowed on input threads and only on input threads
-    if ((mType == RECORD &&
-            (desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) ||
-            (mType != RECORD &&
-                    (desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
+    if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
         ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
                 desc->name, desc->flags, mType);
         lStatus = BAD_VALUE;
@@ -6107,7 +6659,6 @@
     status_t status;
 
     Mutex::Autolock _l(mLock);
-    // First handle in mHandles has highest priority and controls the effect module
     int priority = handle->priority();
     size_t size = mHandles.size();
     sp<EffectHandle> h;
@@ -7177,12 +7728,12 @@
         // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
         // already present
 
-        int size = (int)mEffects.size();
-        int idx_insert = size;
-        int idx_insert_first = -1;
-        int idx_insert_last = -1;
+        size_t size = mEffects.size();
+        size_t idx_insert = size;
+        ssize_t idx_insert_first = -1;
+        ssize_t idx_insert_last = -1;
 
-        for (int i = 0; i < size; i++) {
+        for (size_t i = 0; i < size; i++) {
             effect_descriptor_t d = mEffects[i]->desc();
             uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
             uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
@@ -7251,11 +7802,10 @@
 size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect)
 {
     Mutex::Autolock _l(mLock);
-    int size = (int)mEffects.size();
-    int i;
+    size_t size = mEffects.size();
     uint32_t type = effect->desc().flags & EFFECT_FLAG_TYPE_MASK;
 
-    for (i = 0; i < size; i++) {
+    for (size_t i = 0; i < size; i++) {
         if (effect == mEffects[i]) {
             // calling stop here will remove pre-processing effect from the audio HAL.
             // This is safe as we hold the EffectChain mutex which guarantees that we are not in
@@ -7402,7 +7952,7 @@
     sp<SuspendedEffectDesc> desc;
     // use effect type UUID timelow as key as there is no real risk of identical
     // timeLow fields among effect type UUIDs.
-    int index = mSuspendedEffects.indexOfKey(type->timeLow);
+    ssize_t index = mSuspendedEffects.indexOfKey(type->timeLow);
     if (suspend) {
         if (index >= 0) {
             desc = mSuspendedEffects.valueAt(index);
@@ -7452,7 +8002,7 @@
 {
     sp<SuspendedEffectDesc> desc;
 
-    int index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
+    ssize_t index = mSuspendedEffects.indexOfKey((int)kKeyForSuspendAll);
     if (suspend) {
         if (index >= 0) {
             desc = mSuspendedEffects.valueAt(index);
@@ -7534,7 +8084,7 @@
 void AudioFlinger::EffectChain::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
                                                             bool enabled)
 {
-    int index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
+    ssize_t index = mSuspendedEffects.indexOfKey(effect->desc().type.timeLow);
     if (enabled) {
         if (index < 0) {
             // if the effect is not suspend check if all effects are suspended
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index fdcd916..50712cf 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioFlinger.h
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -22,11 +22,14 @@
 #include <sys/types.h>
 #include <limits.h>
 
+#include <common_time/cc_helper.h>
+
 #include <media/IAudioFlinger.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioTrack.h>
 #include <media/IAudioRecord.h>
 #include <media/AudioSystem.h>
+#include <media/AudioTrack.h>
 
 #include <utils/Atomic.h>
 #include <utils/Errors.h>
@@ -55,7 +58,7 @@
 
 // ----------------------------------------------------------------------------
 
-static const nsecs_t kStandbyTimeInNsecs = seconds(3);
+static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
 
 class AudioFlinger :
     public BinderService<AudioFlinger>,
@@ -78,6 +81,7 @@
                                 uint32_t flags,
                                 const sp<IMemory>& sharedBuffer,
                                 audio_io_handle_t output,
+                                bool isTimed,
                                 int *sessionId,
                                 status_t *status);
 
@@ -102,6 +106,7 @@
     virtual     status_t    setMasterMute(bool muted);
 
     virtual     float       masterVolume() const;
+    virtual     float       masterVolumeSW() const;
     virtual     bool        masterMute() const;
 
     virtual     status_t    setStreamVolume(audio_stream_type_t stream, float value,
@@ -206,6 +211,8 @@
     audio_hw_device_t*      findSuitableHwDev_l(uint32_t devices);
     void                    purgeStaleEffects_l();
 
+    static nsecs_t          mStandbyTimeInNsecs;
+
     // Internal dump utilites.
     status_t dumpPermissionDenial(int fd, const Vector<String16>& args);
     status_t dumpClients(int fd, const Vector<String16>& args);
@@ -220,12 +227,18 @@
         pid_t               pid() const { return mPid; }
         sp<AudioFlinger>    audioFlinger() const { return mAudioFlinger; }
 
+        bool reserveTimedTrack();
+        void releaseTimedTrack();
+
     private:
                             Client(const Client&);
                             Client& operator = (const Client&);
         const sp<AudioFlinger> mAudioFlinger;
         const sp<MemoryDealer> mMemoryDealer;
         const pid_t         mPid;
+
+        Mutex               mTimedTrackLock;
+        int                 mTimedTrackCount;
     };
 
     // --- Notification Client ---
@@ -290,6 +303,8 @@
             enum track_state {
                 IDLE,
                 TERMINATED,
+                // These are order-sensitive; do not change order without reviewing the impact.
+                // In particular there are assumptions about > STOPPED.
                 STOPPED,
                 RESUMING,
                 ACTIVE,
@@ -314,7 +329,7 @@
                                         int sessionId);
             virtual             ~TrackBase();
 
-            virtual status_t    start() = 0;
+            virtual status_t    start(pid_t tid) = 0;
             virtual void        stop() = 0;
                     sp<IMemory> getCblk() const { return mCblkMemory; }
                     audio_track_cblk_t* cblk() const { return mCblk; }
@@ -331,7 +346,9 @@
                                 TrackBase(const TrackBase&);
                                 TrackBase& operator = (const TrackBase&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
+            virtual status_t getNextBuffer(
+                AudioBufferProvider::Buffer* buffer,
+                int64_t pts) = 0;
             virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
 
             audio_format_t format() const {
@@ -586,7 +603,7 @@
             virtual             ~Track();
 
                     void        dump(char* buffer, size_t size);
-            virtual status_t    start();
+            virtual status_t    start(pid_t tid);
             virtual void        stop();
                     void        pause();
 
@@ -607,7 +624,6 @@
                     int16_t     *mainBuffer() const { return mMainBuffer; }
                     int         auxEffectId() const { return mAuxEffectId; }
 
-
         protected:
             friend class ThreadBase;
             friend class TrackHandle;
@@ -618,7 +634,11 @@
                                 Track(const Track&);
                                 Track& operator = (const Track&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+            virtual status_t getNextBuffer(
+                AudioBufferProvider::Buffer* buffer,
+                int64_t pts);
+            virtual uint32_t framesReady() const;
+
             bool isMuted() const { return mMute; }
             bool isPausing() const {
                 return mState == PAUSING;
@@ -634,6 +654,8 @@
                 return (mStreamType == AUDIO_STREAM_CNT);
             }
 
+            virtual bool isTimedTrack() const { return false; }
+
             // we don't really need a lock for these
             volatile bool       mMute;
             // FILLED state is used for suppressing volume ramp at begin of playing
@@ -650,6 +672,79 @@
             bool                mHasVolumeController;
         };  // end of Track
 
+        class TimedTrack : public Track {
+          public:
+            static sp<TimedTrack> create(const wp<ThreadBase>& thread,
+                                         const sp<Client>& client,
+                                         audio_stream_type_t streamType,
+                                         uint32_t sampleRate,
+                                         audio_format_t format,
+                                         uint32_t channelMask,
+                                         int frameCount,
+                                         const sp<IMemory>& sharedBuffer,
+                                         int sessionId);
+            ~TimedTrack();
+
+            class TimedBuffer {
+              public:
+                TimedBuffer();
+                TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
+                const sp<IMemory>& buffer() const { return mBuffer; }
+                int64_t pts() const { return mPTS; }
+                int position() const { return mPosition; }
+                void setPosition(int pos) { mPosition = pos; }
+              private:
+                sp<IMemory> mBuffer;
+                int64_t mPTS;
+                int mPosition;
+            };
+
+            virtual bool isTimedTrack() const { return true; }
+
+            virtual uint32_t framesReady() const;
+
+            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                           int64_t pts);
+            virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
+            void timedYieldSamples(AudioBufferProvider::Buffer* buffer);
+            void timedYieldSilence(uint32_t numFrames,
+                                   AudioBufferProvider::Buffer* buffer);
+
+            status_t    allocateTimedBuffer(size_t size,
+                                            sp<IMemory>* buffer);
+            status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                         int64_t pts);
+            status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                              TimedAudioTrack::TargetTimeline target);
+            void        trimTimedBufferQueue_l();
+
+          private:
+            TimedTrack(const wp<ThreadBase>& thread,
+                       const sp<Client>& client,
+                       audio_stream_type_t streamType,
+                       uint32_t sampleRate,
+                       audio_format_t format,
+                       uint32_t channelMask,
+                       int frameCount,
+                       const sp<IMemory>& sharedBuffer,
+                       int sessionId);
+
+            uint64_t            mLocalTimeFreq;
+            LinearTransform     mLocalTimeToSampleTransform;
+            sp<MemoryDealer>    mTimedMemoryDealer;
+            Vector<TimedBuffer> mTimedBufferQueue;
+            uint8_t*            mTimedSilenceBuffer;
+            uint32_t            mTimedSilenceBufferSize;
+            mutable Mutex       mTimedBufferQueueLock;
+            bool                mTimedAudioOutputOnTime;
+            CCHelper            mCCHelper;
+
+            Mutex               mMediaTimeTransformLock;
+            LinearTransform     mMediaTimeTransform;
+            bool                mMediaTimeTransformValid;
+            TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
+        };
+
 
         // playback track
         class OutputTrack : public Track {
@@ -668,10 +763,10 @@
                                         int frameCount);
             virtual             ~OutputTrack();
 
-            virtual status_t    start();
+            virtual status_t    start(pid_t tid);
             virtual void        stop();
                     bool        write(int16_t* data, uint32_t frames);
-                    bool        bufferQueueEmpty() const { return (mBufferQueue.size() == 0) ? true : false; }
+                    bool        bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
                     bool        isActive() const { return mActive; }
             const wp<ThreadBase>& thread() const { return mThread; }
 
@@ -707,17 +802,13 @@
 
         virtual     uint32_t    latency() const;
 
-        virtual     status_t    setMasterVolume(float value);
-        virtual     status_t    setMasterMute(bool muted);
+                    void        setMasterVolume(float value);
+                    void        setMasterMute(bool muted);
 
-        virtual     float       masterVolume() const { return mMasterVolume; }
-        virtual     bool        masterMute() const { return mMasterMute; }
+                    void        setStreamVolume(audio_stream_type_t stream, float value);
+                    void        setStreamMute(audio_stream_type_t stream, bool muted);
 
-        virtual     status_t    setStreamVolume(audio_stream_type_t stream, float value);
-        virtual     status_t    setStreamMute(audio_stream_type_t stream, bool muted);
-
-        virtual     float       streamVolume(audio_stream_type_t stream) const;
-        virtual     bool        streamMute(audio_stream_type_t stream) const;
+                    float       streamVolume(audio_stream_type_t stream) const;
 
                     sp<Track>   createTrack_l(
                                     const sp<AudioFlinger::Client>& client,
@@ -728,6 +819,7 @@
                                     int frameCount,
                                     const sp<IMemory>& sharedBuffer,
                                     int sessionId,
+                                    bool isTimed,
                                     status_t *status);
 
                     AudioStreamOut* getOutput() const;
@@ -760,7 +852,11 @@
         int                             mSuspended;
         int                             mBytesWritten;
     private:
+        // mMasterMute is in both PlaybackThread and in AudioFlinger.  When a
+        // PlaybackThread needs to find out if master-muted, it checks it's local
+        // copy rather than the one in AudioFlinger.  This optimization saves a lock.
         bool                            mMasterMute;
+                    void        setMasterMute_l(bool muted) { mMasterMute = muted; }
     protected:
         SortedVector< wp<Track> >       mActiveTracks;
 
@@ -853,6 +949,8 @@
     private:
         void applyVolume(uint16_t leftVol, uint16_t rightVol, bool ramp);
 
+        // volumes last sent to audio HAL with stream->set_volume()
+        // FIXME use standard representation and names
         float mLeftVolFloat;
         float mRightVolFloat;
         uint16_t mLeftVolShort;
@@ -884,7 +982,11 @@
               PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
               MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
               RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
-              float streamVolumeInternal(audio_stream_type_t stream) const
+              // no range check, AudioFlinger::mLock held
+              bool streamMute_l(audio_stream_type_t stream) const
+                                { return mStreamTypes[stream].mute; }
+              // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held
+              float streamVolume_l(audio_stream_type_t stream) const
                                 { return mStreamTypes[stream].volume; }
               void audioConfigChanged_l(int event, audio_io_handle_t ioHandle, void *param2);
 
@@ -892,25 +994,32 @@
               uint32_t nextUniqueId();
 
               status_t moveEffectChain_l(int sessionId,
-                                     AudioFlinger::PlaybackThread *srcThread,
-                                     AudioFlinger::PlaybackThread *dstThread,
+                                     PlaybackThread *srcThread,
+                                     PlaybackThread *dstThread,
                                      bool reRegister);
               PlaybackThread *primaryPlaybackThread_l();
               uint32_t primaryOutputDevice_l();
 
     friend class AudioBuffer;
 
+    // server side of the client's IAudioTrack
     class TrackHandle : public android::BnAudioTrack {
     public:
                             TrackHandle(const sp<PlaybackThread::Track>& track);
         virtual             ~TrackHandle();
         virtual sp<IMemory> getCblk() const;
-        virtual status_t    start();
+        virtual status_t    start(pid_t tid);
         virtual void        stop();
         virtual void        flush();
         virtual void        mute(bool);
         virtual void        pause();
         virtual status_t    attachAuxEffect(int effectId);
+        virtual status_t    allocateTimedBuffer(size_t size,
+                                                sp<IMemory>* buffer);
+        virtual status_t    queueTimedBuffer(const sp<IMemory>& buffer,
+                                             int64_t pts);
+        virtual status_t    setMediaTimeTransform(const LinearTransform& xform,
+                                                  int target);
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
     private:
@@ -943,7 +1052,7 @@
                                         int sessionId);
             virtual             ~RecordTrack();
 
-            virtual status_t    start();
+            virtual status_t    start(pid_t tid);
             virtual void        stop();
 
                     bool        overflow() { bool tmp = mOverflow; mOverflow = false; return tmp; }
@@ -958,7 +1067,9 @@
                                 RecordTrack(const RecordTrack&);
                                 RecordTrack& operator = (const RecordTrack&);
 
-            virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
+            virtual status_t getNextBuffer(
+                AudioBufferProvider::Buffer* buffer,
+                int64_t pts);
 
             bool                mOverflow;
         };
@@ -988,13 +1099,15 @@
                         status_t *status);
 
                 status_t    start(RecordTrack* recordTrack);
+                status_t    start(RecordTrack* recordTrack, pid_t tid);
                 void        stop(RecordTrack* recordTrack);
                 status_t    dump(int fd, const Vector<String16>& args);
                 AudioStreamIn* getInput() const;
                 AudioStreamIn* clearInput();
                 virtual audio_stream_t* stream();
 
-        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer);
+        virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer,
+                                          int64_t pts);
         virtual void        releaseBuffer(AudioBufferProvider::Buffer* buffer);
         virtual bool        checkForNewParameters_l();
         virtual String8     getParameters(const String8& keys);
@@ -1023,12 +1136,13 @@
                 ssize_t                             mBytesRead;
     };
 
+    // server side of the client's IAudioRecord
     class RecordHandle : public android::BnAudioRecord {
     public:
         RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack);
         virtual             ~RecordHandle();
         virtual sp<IMemory> getCblk() const;
-        virtual status_t    start();
+        virtual status_t    start(pid_t tid);
         virtual void        stop();
         virtual status_t onTransact(
             uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
@@ -1151,6 +1265,7 @@
         status_t            mStatus;    // initialization status
         effect_state        mState;     // current activation state
         Vector< wp<EffectHandle> > mHandles;    // list of client handles
+                    // First handle in mHandles has highest priority and controls the effect module
         uint32_t mMaxDisableWaitCnt;    // maximum grace period before forcing an effect off after
                                         // sending disable command.
         uint32_t mDisableWaitCnt;       // current process() calls count during disable period.
@@ -1376,6 +1491,7 @@
             hwDev(dev), stream(in) {}
     };
 
+    // for mAudioSessionRefs only
     struct AudioSessionRef {
         // FIXME rename parameter names when fields get "m" prefix
         AudioSessionRef(int sessionid_, pid_t pid_) :
@@ -1388,6 +1504,28 @@
     friend class RecordThread;
     friend class PlaybackThread;
 
+    enum master_volume_support {
+        // MVS_NONE:
+        // Audio HAL has no support for master volume, either setting or
+        // getting.  All master volume control must be implemented in SW by the
+        // AudioFlinger mixing core.
+        MVS_NONE,
+
+        // MVS_SETONLY:
+        // Audio HAL has support for setting master volume, but not for getting
+        // master volume (original HAL design did not include a getter).
+        // AudioFlinger needs to keep track of the last set master volume in
+        // addition to needing to set an initial, default, master volume at HAL
+        // load time.
+        MVS_SETONLY,
+
+        // MVS_FULL:
+        // Audio HAL has support both for setting and getting master volume.
+        // AudioFlinger should send all set and get master volume requests
+        // directly to the HAL.
+        MVS_FULL,
+    };
+
     mutable     Mutex                               mLock;
 
                 DefaultKeyedVector< pid_t, wp<Client> >     mClients;   // see ~Client()
@@ -1416,6 +1554,7 @@
         AUDIO_SET_VOICE_VOLUME,
         AUDIO_SET_PARAMETER,
         AUDIO_HW_GET_INPUT_BUFFER_SIZE,
+        AUDIO_HW_GET_MASTER_VOLUME,
     };
 
     mutable     hardware_call_state                 mHardwareStatus;    // for dump only
@@ -1426,18 +1565,22 @@
 
                 // both are protected by mLock
                 float                               mMasterVolume;
+                float                               mMasterVolumeSW;
+                master_volume_support               mMasterVolumeSupportLvl;
                 bool                                mMasterMute;
 
                 DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> >    mRecordThreads;
 
                 DefaultKeyedVector< pid_t, sp<NotificationClient> >    mNotificationClients;
-                volatile int32_t                    mNextUniqueId;
+                volatile int32_t                    mNextUniqueId;  // updated by android_atomic_inc
                 audio_mode_t                        mMode;
                 bool                                mBtNrecIsOff;
 
+                // protected by mLock
                 Vector<AudioSessionRef*> mAudioSessionRefs;
 
-                float       masterVolume_l() const  { return mMasterVolume; }
+                float       masterVolume_l() const;
+                float       masterVolumeSW_l() const  { return mMasterVolumeSW; }
                 bool        masterMute_l() const    { return mMasterMute; }
 
 private:
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 191520a..020d62a 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioMixer.cpp
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -33,6 +33,8 @@
 #include <system/audio.h>
 
 #include <audio_utils/primitives.h>
+#include <common_time/local_clock.h>
+#include <common_time/cc_helper.h>
 
 #include "AudioMixer.h"
 
@@ -45,6 +47,9 @@
 {
     // AudioMixer is not yet capable of multi-channel beyond stereo
     assert(2 == MAX_NUM_CHANNELS);
+    
+    LocalClock lc;
+
     mState.enabledTracks= 0;
     mState.needsChanged = 0;
     mState.frameCount   = frameCount;
@@ -80,6 +85,7 @@
         t->sampleRate = mSampleRate;
         t->mainBuffer = NULL;
         t->auxBuffer = NULL;
+        t->localTimeFreq = lc.getLocalFreq();
         t++;
     }
 }
@@ -251,6 +257,7 @@
             }
             break;
         case AUXLEVEL:
+            //assert(0 <= valueInt && valueInt <= MAX_GAIN_INT);
             if (track.auxLevel != valueInt) {
                 ALOGV("setParameter(VOLUME, AUXLEVEL: %04x)", valueInt);
                 track.prevAuxLevel = track.auxLevel << 16;
@@ -289,6 +296,7 @@
             if (resampler == NULL) {
                 resampler = AudioResampler::create(
                         format, channelCount, devSampleRate);
+                resampler->setLocalTimeFreq(localTimeFreq);
             }
             return true;
         }
@@ -333,13 +341,13 @@
 
 
 
-void AudioMixer::process()
+void AudioMixer::process(int64_t pts)
 {
-    mState.hook(&mState);
+    mState.hook(&mState, pts);
 }
 
 
-void AudioMixer::process__validate(state_t* state)
+void AudioMixer::process__validate(state_t* state, int64_t pts)
 {
     ALOGW_IF(!state->needsChanged,
         "in process__validate() but nothing's invalid");
@@ -443,7 +451,7 @@
         countActiveTracks, state->enabledTracks,
         all16BitsStereoNoResample, resampling, volumeRamp);
 
-    state->hook(state);
+   state->hook(state, pts);
 
     // Now that the volume ramp has been done, set optimal state and
     // track hooks for subsequent mixer process
@@ -549,7 +557,7 @@
     }
     t->prevVolume[0] = vl;
     t->prevVolume[1] = vr;
-    t->adjustVolumeRamp((aux != NULL));
+    t->adjustVolumeRamp(aux != NULL);
 }
 
 void AudioMixer::volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
@@ -558,7 +566,7 @@
     const int16_t vr = t->volume[1];
 
     if (CC_UNLIKELY(aux != NULL)) {
-        const int16_t va = (int16_t)t->auxLevel;
+        const int16_t va = t->auxLevel;
         do {
             int16_t l = (int16_t)(*temp++ >> 12);
             int16_t r = (int16_t)(*temp++ >> 12);
@@ -757,7 +765,7 @@
 }
 
 // no-op case
-void AudioMixer::process__nop(state_t* state)
+void AudioMixer::process__nop(state_t* state, int64_t pts)
 {
     uint32_t e0 = state->enabledTracks;
     size_t bufSize = state->frameCount * sizeof(int16_t) * MAX_NUM_CHANNELS;
@@ -787,7 +795,9 @@
             size_t outFrames = state->frameCount;
             while (outFrames) {
                 t1.buffer.frameCount = outFrames;
-                t1.bufferProvider->getNextBuffer(&t1.buffer);
+                int64_t outputPTS = calculateOutputPTS(
+                    t1, pts, state->frameCount - outFrames);
+                t1.bufferProvider->getNextBuffer(&t1.buffer, outputPTS);
                 if (t1.buffer.raw == NULL) break;
                 outFrames -= t1.buffer.frameCount;
                 t1.bufferProvider->releaseBuffer(&t1.buffer);
@@ -797,7 +807,7 @@
 }
 
 // generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state)
+void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
 {
     int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
 
@@ -809,7 +819,7 @@
         e0 &= ~(1<<i);
         track_t& t = state->tracks[i];
         t.buffer.frameCount = state->frameCount;
-        t.bufferProvider->getNextBuffer(&t.buffer);
+        t.bufferProvider->getNextBuffer(&t.buffer, pts);
         t.frameCount = t.buffer.frameCount;
         t.in = t.buffer.raw;
         // t.in == NULL can happen if the track was flushed just after having
@@ -853,7 +863,7 @@
                 while (outFrames) {
                     size_t inFrames = (t.frameCount > outFrames)?outFrames:t.frameCount;
                     if (inFrames) {
-                        (t.hook)(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux);
+                        t.hook(&t, outTemp + (BLOCKSIZE-outFrames)*MAX_NUM_CHANNELS, inFrames, state->resampleTemp, aux);
                         t.frameCount -= inFrames;
                         outFrames -= inFrames;
                         if (CC_UNLIKELY(aux != NULL)) {
@@ -863,7 +873,9 @@
                     if (t.frameCount == 0 && outFrames) {
                         t.bufferProvider->releaseBuffer(&t.buffer);
                         t.buffer.frameCount = (state->frameCount - numFrames) - (BLOCKSIZE - outFrames);
-                        t.bufferProvider->getNextBuffer(&t.buffer);
+                        int64_t outputPTS = calculateOutputPTS(
+                            t, pts, numFrames + (BLOCKSIZE - outFrames));
+                        t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
                         t.in = t.buffer.raw;
                         if (t.in == NULL) {
                             enabledTracks &= ~(1<<i);
@@ -892,7 +904,7 @@
 
 
 // generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state)
+void AudioMixer::process__genericResampling(state_t* state, int64_t pts)
 {
     // this const just means that local variable outTemp doesn't change
     int32_t* const outTemp = state->outputTemp;
@@ -932,14 +944,16 @@
             // acquire/release the buffers because it's done by
             // the resampler.
             if ((t.needs & NEEDS_RESAMPLE__MASK) == NEEDS_RESAMPLE_ENABLED) {
-                (t.hook)(&t, outTemp, numFrames, state->resampleTemp, aux);
+                t.resampler->setPTS(pts);
+                t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
             } else {
 
                 size_t outFrames = 0;
 
                 while (outFrames < numFrames) {
                     t.buffer.frameCount = numFrames - outFrames;
-                    t.bufferProvider->getNextBuffer(&t.buffer);
+                    int64_t outputPTS = calculateOutputPTS(t, pts, outFrames);
+                    t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
                     t.in = t.buffer.raw;
                     // t.in == NULL can happen if the track was flushed just after having
                     // been enabled for mixing.
@@ -948,7 +962,7 @@
                     if (CC_UNLIKELY(aux != NULL)) {
                         aux += outFrames;
                     }
-                    (t.hook)(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, state->resampleTemp, aux);
+                    t.hook(&t, outTemp + outFrames*MAX_NUM_CHANNELS, t.buffer.frameCount, state->resampleTemp, aux);
                     outFrames += t.buffer.frameCount;
                     t.bufferProvider->releaseBuffer(&t.buffer);
                 }
@@ -959,9 +973,15 @@
 }
 
 // one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
+                                                           int64_t pts)
 {
+    // This method is only called when state->enabledTracks has exactly
+    // one bit set.  The asserts below would verify this, but are commented out
+    // since the whole point of this method is to optimize performance.
+    //assert(0 != state->enabledTracks);
     const int i = 31 - __builtin_clz(state->enabledTracks);
+    //assert((1 << i) == state->enabledTracks);
     const track_t& t = state->tracks[i];
 
     AudioBufferProvider::Buffer& b(t.buffer);
@@ -974,7 +994,8 @@
     const uint32_t vrl = t.volumeRL;
     while (numFrames) {
         b.frameCount = numFrames;
-        t.bufferProvider->getNextBuffer(&b);
+        int64_t outputPTS = calculateOutputPTS(t, pts, out - t.mainBuffer);
+        t.bufferProvider->getNextBuffer(&b, outputPTS);
         const int16_t *in = b.i16;
 
         // in == NULL can happen if the track was flushed just after having
@@ -1018,7 +1039,8 @@
 // 2 tracks is also a common case
 // NEVER used in current implementation of process__validate()
 // only use if the 2 tracks have the same output buffer
-void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state)
+void AudioMixer::process__TwoTracks16BitsStereoNoResampling(state_t* state,
+                                                            int64_t pts)
 {
     int i;
     uint32_t en = state->enabledTracks;
@@ -1052,7 +1074,9 @@
 
         if (frameCount0 == 0) {
             b0.frameCount = numFrames;
-            t0.bufferProvider->getNextBuffer(&b0);
+            int64_t outputPTS = calculateOutputPTS(t0, pts,
+                                                   out - t0.mainBuffer);
+            t0.bufferProvider->getNextBuffer(&b0, outputPTS);
             if (b0.i16 == NULL) {
                 if (buff == NULL) {
                     buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -1066,7 +1090,9 @@
         }
         if (frameCount1 == 0) {
             b1.frameCount = numFrames;
-            t1.bufferProvider->getNextBuffer(&b1);
+            int64_t outputPTS = calculateOutputPTS(t1, pts,
+                                                   out - t0.mainBuffer);
+            t1.bufferProvider->getNextBuffer(&b1, outputPTS);
             if (b1.i16 == NULL) {
                 if (buff == NULL) {
                     buff = new int16_t[MAX_NUM_CHANNELS * state->frameCount];
@@ -1112,5 +1138,14 @@
 }
 #endif
 
+int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
+                                       int outputFrameIndex)
+{
+    if (AudioBufferProvider::kInvalidPTS == basePTS)
+        return AudioBufferProvider::kInvalidPTS;
+
+    return basePTS + ((outputFrameIndex * t.localTimeFreq) / t.sampleRate);
+}
+
 // ----------------------------------------------------------------------------
 }; // namespace android
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index c709686..b210212 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -1,4 +1,4 @@
-/* //device/include/server/AudioFlinger/AudioMixer.h
+/*
 **
 ** Copyright 2007, The Android Open Source Project
 **
@@ -79,7 +79,7 @@
     void        setParameter(int name, int target, int param, void *value);
 
     void        setBufferProvider(int name, AudioBufferProvider* bufferProvider);
-    void        process();
+    void        process(int64_t pts);
 
     uint32_t    trackNames() const { return mTrackNames; }
 
@@ -114,7 +114,6 @@
     struct state_t;
     struct track_t;
 
-    typedef void (*mix_t)(state_t* state);
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp, int32_t* aux);
     static const int BLOCKSIZE = 16; // 4 cache lines
 
@@ -128,30 +127,46 @@
 
         int32_t     prevVolume[MAX_NUM_CHANNELS];
 
+        // 16-byte boundary
+
         int32_t     volumeInc[MAX_NUM_CHANNELS];
-        int32_t     auxLevel;
         int32_t     auxInc;
         int32_t     prevAuxLevel;
 
+        // 16-byte boundary
+
+        int16_t     auxLevel;       // 0 <= auxLevel <= MAX_GAIN_INT, but signed for mul performance
         uint16_t    frameCount;
 
-        uint8_t     channelCount : 4;
-        uint8_t     enabled      : 1;
-        uint8_t     reserved0    : 3;
-        uint8_t     format;
-        uint32_t    channelMask;
+        uint8_t     channelCount;   // 1 or 2, redundant with (needs & NEEDS_CHANNEL_COUNT__MASK)
+        uint8_t     format;         // always 16
+        uint16_t    enabled;        // actually bool
+        uint32_t    channelMask;    // currently under-used
 
         AudioBufferProvider*                bufferProvider;
-        mutable AudioBufferProvider::Buffer buffer;
+
+        // 16-byte boundary
+
+        mutable AudioBufferProvider::Buffer buffer; // 8 bytes
 
         hook_t      hook;
         const void* in;             // current location in buffer
 
+        // 16-byte boundary
+
         AudioResampler*     resampler;
         uint32_t            sampleRate;
         int32_t*           mainBuffer;
         int32_t*           auxBuffer;
 
+        // 16-byte boundary
+
+        uint64_t    localTimeFreq;
+
+        int64_t     padding;
+
+        // 16-byte boundary
+
         bool        setResampler(uint32_t sampleRate, uint32_t devSampleRate);
         bool        doesResample() const { return resampler != NULL; }
         void        resetResampler() { if (resampler != NULL) resampler->reset(); }
@@ -165,7 +180,7 @@
         uint32_t        enabledTracks;
         uint32_t        needsChanged;
         size_t          frameCount;
-        mix_t           hook;
+        void            (*hook)(state_t* state, int64_t pts);   // one of process__*, never NULL
         int32_t         *outputTemp;
         int32_t         *resampleTemp;
         int32_t         reserved[2];
@@ -187,14 +202,19 @@
     static void volumeRampStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
     static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux);
 
-    static void process__validate(state_t* state);
-    static void process__nop(state_t* state);
-    static void process__genericNoResampling(state_t* state);
-    static void process__genericResampling(state_t* state);
-    static void process__OneTrack16BitsStereoNoResampling(state_t* state);
+    static void process__validate(state_t* state, int64_t pts);
+    static void process__nop(state_t* state, int64_t pts);
+    static void process__genericNoResampling(state_t* state, int64_t pts);
+    static void process__genericResampling(state_t* state, int64_t pts);
+    static void process__OneTrack16BitsStereoNoResampling(state_t* state,
+                                                          int64_t pts);
 #if 0
-    static void process__TwoTracks16BitsStereoNoResampling(state_t* state);
+    static void process__TwoTracks16BitsStereoNoResampling(state_t* state,
+                                                           int64_t pts);
 #endif
+
+    static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
+                                      int outputFrameIndex);
 };
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 21b5811..987b039 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -116,19 +116,7 @@
 
     // release audio pre processing resources
     for (size_t i = 0; i < mInputSources.size(); i++) {
-        InputSourceDesc *source = mInputSources.valueAt(i);
-        Vector <EffectDesc *> effects = source->mEffects;
-        for (size_t j = 0; j < effects.size(); j++) {
-            delete effects[j]->mName;
-            Vector <effect_param_t *> params = effects[j]->mParams;
-            for (size_t k = 0; k < params.size(); k++) {
-                delete params[k];
-            }
-            params.clear();
-            delete effects[j];
-        }
-        effects.clear();
-        delete source;
+        delete mInputSources.valueAt(i);
     }
     mInputSources.clear();
 
@@ -616,8 +604,7 @@
 {
     Vector<sp<AudioEffect> > fxVector = inputDesc->mEffects;
     for (size_t i = 0; i < fxVector.size(); i++) {
-        sp<AudioEffect> fx = fxVector.itemAt(i);
-        fx->setEnabled(enabled);
+        fxVector.itemAt(i)->setEnabled(enabled);
     }
 }
 
@@ -768,7 +755,7 @@
     snprintf(buffer, SIZE, "- Commands:\n");
     result = String8(buffer);
     result.append("   Command Time        Wait pParam\n");
-    for (int i = 0; i < (int)mAudioCommands.size(); i++) {
+    for (size_t i = 0; i < mAudioCommands.size(); i++) {
         mAudioCommands[i]->dump(buffer, SIZE);
         result.append(buffer);
     }
@@ -902,7 +889,7 @@
 // insertCommand_l() must be called with mLock held
 void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
 {
-    ssize_t i;
+    ssize_t i;  // not size_t because i will count down to -1
     Vector <AudioCommand *> removedCommands;
 
     command->mTime = systemTime() + milliseconds(delayMs);
@@ -1243,7 +1230,7 @@
             node = node->next;
             continue;
         }
-        EffectDesc *effect = new EffectDesc(*effects[i]);
+        EffectDesc *effect = new EffectDesc(*effects[i]);   // deep copy
         loadEffectParameters(node, effect->mParams);
         ALOGV("loadInputSource() adding effect %s uuid %08x", effect->mName, effect->mUuid.timeLow);
         source->mEffects.add(effect);
@@ -1294,11 +1281,7 @@
         ALOGW("loadEffect() invalid uuid %s", node->value);
         return NULL;
     }
-    EffectDesc *effect = new EffectDesc();
-    effect->mName = strdup(root->name);
-    memcpy(&effect->mUuid, &uuid, sizeof(effect_uuid_t));
-
-    return effect;
+    return new EffectDesc(root->name, uuid);
 }
 
 status_t AudioPolicyService::loadEffects(cnode *root, Vector <EffectDesc *>& effects)
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index fdaf576..679fd30 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -233,8 +233,33 @@
 
     class EffectDesc {
     public:
-        EffectDesc() {}
-        virtual ~EffectDesc() {}
+        EffectDesc(const char *name, const effect_uuid_t& uuid) :
+                        mName(strdup(name)),
+                        mUuid(uuid) { }
+        EffectDesc(const EffectDesc& orig) :
+                        mName(strdup(orig.mName)),
+                        mUuid(orig.mUuid) {
+                            // deep copy mParams
+                            for (size_t k = 0; k < orig.mParams.size(); k++) {
+                                effect_param_t *origParam = orig.mParams[k];
+                                // psize and vsize are rounded up to an int boundary for allocation
+                                size_t origSize = sizeof(effect_param_t) +
+                                                  ((origParam->psize + 3) & ~3) +
+                                                  ((origParam->vsize + 3) & ~3);
+                                effect_param_t *dupParam = (effect_param_t *) malloc(origSize);
+                                memcpy(dupParam, origParam, origSize);
+                                // This works because the param buffer allocation is also done by
+                                // multiples of 4 bytes originally. In theory we should memcpy only
+                                // the actual param size, that is without rounding vsize.
+                                mParams.add(dupParam);
+                            }
+                        }
+        /*virtual*/ ~EffectDesc() {
+            free(mName);
+            for (size_t k = 0; k < mParams.size(); k++) {
+                free(mParams[k]);
+            }
+        }
         char *mName;
         effect_uuid_t mUuid;
         Vector <effect_param_t *> mParams;
@@ -243,7 +268,11 @@
     class InputSourceDesc {
     public:
         InputSourceDesc() {}
-        virtual ~InputSourceDesc() {}
+        /*virtual*/ ~InputSourceDesc() {
+            for (size_t j = 0; j < mEffects.size(); j++) {
+                delete mEffects[j];
+            }
+        }
         Vector <EffectDesc *> mEffects;
     };
 
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 9486b9c..398ba0b 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -122,7 +122,8 @@
         int32_t sampleRate) :
     mBitDepth(bitDepth), mChannelCount(inChannelCount),
             mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
-            mPhaseFraction(0) {
+            mPhaseFraction(0), mLocalTimeFreq(0),
+            mPTS(AudioBufferProvider::kInvalidPTS) {
     // sanity check on format
     if ((bitDepth != 16) ||(inChannelCount < 1) || (inChannelCount > 2)) {
         ALOGE("Unsupported sample format, %d bits, %d channels", bitDepth,
@@ -150,6 +151,23 @@
     mVolume[1] = right;
 }
 
+void AudioResampler::setLocalTimeFreq(uint64_t freq) {
+    mLocalTimeFreq = freq;
+}
+
+void AudioResampler::setPTS(int64_t pts) {
+    mPTS = pts;
+}
+
+int64_t AudioResampler::calculateOutputPTS(int outputFrameIndex) {
+
+    if (mPTS == AudioBufferProvider::kInvalidPTS) {
+        return AudioBufferProvider::kInvalidPTS;
+    } else {
+        return mPTS + ((outputFrameIndex * mLocalTimeFreq) / mSampleRate);
+    }
+}
+
 void AudioResampler::reset() {
     mInputIndex = 0;
     mPhaseFraction = 0;
@@ -196,7 +214,8 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 goto resampleStereo16_exit;
             }
@@ -290,7 +309,8 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 mInputIndex = inputIndex;
                 mPhaseFraction = phaseFraction;
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index c23016e..9deb796 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -49,6 +49,10 @@
     virtual void init() = 0;
     virtual void setSampleRate(int32_t inSampleRate);
     virtual void setVolume(int16_t left, int16_t right);
+    virtual void setLocalTimeFreq(uint64_t freq);
+
+    // set the PTS of the next buffer output by the resampler
+    virtual void setPTS(int64_t pts);
 
     virtual void resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider) = 0;
@@ -72,6 +76,8 @@
     AudioResampler(const AudioResampler&);
     AudioResampler& operator=(const AudioResampler&);
 
+    int64_t calculateOutputPTS(int outputFrameIndex);
+
     const int32_t mBitDepth;
     const int32_t mChannelCount;
     const int32_t mSampleRate;
@@ -85,6 +91,8 @@
     size_t mInputIndex;
     int32_t mPhaseIncrement;
     uint32_t mPhaseFraction;
+    uint64_t mLocalTimeFreq;
+    int64_t mPTS;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index c0e760e..18e59e9 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -65,7 +65,7 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer);
+        provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL)
             return;
         // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -95,7 +95,8 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer);
+                provider->getNextBuffer(&mBuffer,
+                                        calculateOutputPTS(outputIndex / 2));
                 if (mBuffer.raw == NULL)
                     goto save_state;  // ugly, but efficient
                 in = mBuffer.i16;
@@ -130,7 +131,7 @@
     // fetch first buffer
     if (mBuffer.frameCount == 0) {
         mBuffer.frameCount = inFrameCount;
-        provider->getNextBuffer(&mBuffer);
+        provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL)
             return;
         // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
@@ -160,7 +161,8 @@
                 inputIndex = 0;
                 provider->releaseBuffer(&mBuffer);
                 mBuffer.frameCount = inFrameCount;
-                provider->getNextBuffer(&mBuffer);
+                provider->getNextBuffer(&mBuffer,
+                                        calculateOutputPTS(outputIndex / 2));
                 if (mBuffer.raw == NULL)
                     goto save_state;  // ugly, but efficient
                 // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
@@ -181,4 +183,3 @@
 // ----------------------------------------------------------------------------
 }
 ; // namespace android
-
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index 7a27b81..d373c08 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -203,7 +203,8 @@
         // buffer is empty, fetch a new one
         while (mBuffer.frameCount == 0) {
             mBuffer.frameCount = inFrameCount;
-            provider->getNextBuffer(&mBuffer);
+            provider->getNextBuffer(&mBuffer,
+                                    calculateOutputPTS(outputIndex / 2));
             if (mBuffer.raw == NULL) {
                 goto resample_exit;
             }
@@ -354,4 +355,3 @@
 
 // ----------------------------------------------------------------------------
 }; // namespace android
-