Merge "Simplify RecordTrack::stop()"
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 58e0deb..da13a7f 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -277,7 +277,7 @@
      * make it active. If set, the callback will start being called.
      * If the track was previously paused, volume is ramped up over the first mix buffer.
      */
-            void        start();
+            status_t        start();
 
     /* Stop a track.
      * In static buffer mode, the track is stopped immediately.
@@ -635,11 +635,12 @@
             void setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount);
             audio_io_handle_t getOutput_l();
 
-            status_t getPosition_l(uint32_t *position);
-
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreTrack_l(const char *from);
 
+            bool     isOffloaded() const
+                { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
+
     // may be changed if IAudioTrack is re-created
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -676,7 +677,9 @@
         STATE_ACTIVE,
         STATE_STOPPED,
         STATE_PAUSED,
+        STATE_PAUSED_STOPPING,
         STATE_FLUSHED,
+        STATE_STOPPING,
     }                       mState;
 
     callback_t              mCbf;                   // callback handler for events, or NULL
@@ -694,7 +697,7 @@
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
-    int                     mObservedSequence;      // last observed value of mSequence
+    uint32_t                mObservedSequence;      // last observed value of mSequence
 
     sp<IMemory>             mSharedBuffer;
     uint32_t                mLoopPeriod;            // in frames, zero means looping is disabled
@@ -736,6 +739,7 @@
 
     sp<DeathNotifier>       mDeathNotifier;
     uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
+    audio_io_handle_t       mOutput;                // cached output io handle
 };
 
 class TimedAudioTrack : public AudioTrack
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 61f7dc7..3b151ef 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -111,7 +111,7 @@
                 audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                 const audio_offload_info_t *offloadInfo = NULL) = 0;
 
-        virtual void        start() = 0;
+        virtual status_t    start() = 0;
         virtual ssize_t     write(const void* buffer, size_t size) = 0;
         virtual void        stop() = 0;
         virtual void        flush() = 0;
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index ec9f2df..912a43c 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -38,7 +38,10 @@
 
     enum {
         ALLOW_DEEP_BUFFERING = 0x01,
-        USE_OFFLOAD = 0x02
+        USE_OFFLOAD = 0x02,
+        HAS_VIDEO   = 0x1000,
+        IS_STREAMING = 0x2000
+
     };
 
     AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink,
@@ -56,7 +59,7 @@
     status_t start(bool sourceAlreadyStarted = false);
 
     void pause(bool playPendingSamples = false);
-    void resume();
+    status_t resume();
 
     // Returns the timestamp of the last buffer played (in us).
     int64_t getMediaTimeUs();
@@ -104,11 +107,13 @@
     MediaBuffer *mFirstBuffer;
 
     sp<MediaPlayerBase::AudioSink> mAudioSink;
-    bool mAllowDeepBuffering;       // allow audio deep audio buffers. Helps with low power audio
-                                    // playback but implies high latency
     AwesomePlayer *mObserver;
     int64_t mPinnedTimeUs;
 
+    bool mPlaying;
+    int64_t mStartPosUs;
+    const uint32_t mCreateFlags;
+
     static void AudioCallback(int event, void *user, void *info);
     void AudioCallback(int event, void *info);
 
@@ -126,6 +131,9 @@
     uint32_t getNumFramesPendingPlayout() const;
     int64_t getOutputPlayPositionUs_l() const;
 
+    bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; }
+    bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; }
+
     AudioPlayer(const AudioPlayer &);
     AudioPlayer &operator=(const AudioPlayer &);
 };
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 0592683..b890180 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -291,6 +291,12 @@
     virtual uint32_t    getUnderrunFrames() const {
         return mCblk->u.mStreaming.mUnderrunFrames;
     }
+
+    bool        clearStreamEndDone();   // and return previous value
+
+    bool        getStreamEndDone() const;
+
+    status_t    waitStreamEndDone(const struct timespec *requested);
 };
 
 class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
@@ -374,8 +380,8 @@
 
 protected:
     size_t      mAvailToClient; // estimated frames available to client prior to releaseBuffer()
-private:
     int32_t     mFlush;         // our copy of cblk->u.mStreaming.mFlush, for streaming output only
+private:
     bool        mDeferWake;     // whether another releaseBuffer() is expected soon
 };
 
@@ -405,6 +411,8 @@
     // should avoid doing a state queue poll from within framesReady().
     // FIXME Change AudioFlinger to not call framesReady() from normal mixer thread.
     virtual void        framesReadyIsCalledByMultipleThreads() { }
+
+    bool     setStreamEndDone();    // and return previous value
 };
 
 class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
index dc360a5..176f8e9 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.cpp
@@ -149,7 +149,7 @@
     mStarted = false;
 }
 
-void VideoEditorAudioPlayer::resume() {
+status_t VideoEditorAudioPlayer::resume() {
     ALOGV("resume");
 
     AudioMixSettings audioMixSettings;
@@ -180,6 +180,7 @@
     } else {
         mAudioTrack->start();
     }
+    return OK;
 }
 
 status_t VideoEditorAudioPlayer::seekTo(int64_t time_us) {
diff --git a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
index d2e652d..2caf5e8 100755
--- a/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorAudioPlayer.h
@@ -58,7 +58,7 @@
 
     status_t start(bool sourceAlreadyStarted = false);
     void pause(bool playPendingSamples = false);
-    void resume();
+    status_t resume();
     status_t seekTo(int64_t time_us);
     bool isSeeking();
     bool reachedEOS(status_t *finalStatus);
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.cpp b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
index 3384e34..5aeba4f 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.cpp
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.cpp
@@ -468,14 +468,18 @@
     return NO_ERROR;
 }
 
-void VideoEditorPlayer::VeAudioOutput::start() {
+status_t VideoEditorPlayer::VeAudioOutput::start() {
 
     ALOGV("start");
     if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
-        mTrack->start();
-        mTrack->getPosition(&mNumFramesWritten);
+        status_t status = mTrack->start();
+        if (status == NO_ERROR) {
+            mTrack->getPosition(&mNumFramesWritten);
+        }
+        return status;
     }
+    return NO_INIT;
 }
 
 void VideoEditorPlayer::VeAudioOutput::snoopWrite(
diff --git a/libvideoeditor/lvpp/VideoEditorPlayer.h b/libvideoeditor/lvpp/VideoEditorPlayer.h
index 69323c3..ab6d731 100755
--- a/libvideoeditor/lvpp/VideoEditorPlayer.h
+++ b/libvideoeditor/lvpp/VideoEditorPlayer.h
@@ -55,7 +55,7 @@
                 AudioCallback cb, void *cookie, audio_output_flags_t flags,
                 const audio_offload_info_t *offloadInfo);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush();
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 7b6b38d..3653b7f 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -27,7 +27,9 @@
 #include <private/media/AudioTrackShared.h>
 #include <media/IAudioFlinger.h>
 
-#define WAIT_PERIOD_MS          10
+#define WAIT_PERIOD_MS                  10
+#define WAIT_STREAM_END_TIMEOUT_SEC     120
+
 
 namespace android {
 // ---------------------------------------------------------------------------
@@ -141,6 +143,7 @@
         // Otherwise the callback thread will never exit.
         stop();
         if (mAudioTrackThread != 0) {
+            mProxy->interrupt();
             mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
@@ -224,6 +227,8 @@
         return INVALID_OPERATION;
     }
 
+    mOutput = 0;
+
     // handle default values first.
     if (streamType == AUDIO_STREAM_DEFAULT) {
         streamType = AUDIO_STREAM_MUSIC;
@@ -259,7 +264,12 @@
     }
 
     // force direct flag if format is not linear PCM
-    if (!audio_is_linear_pcm(format)) {
+    // or offload was requested
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+            || !audio_is_linear_pcm(format)) {
+        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
+                    ? "Offload request, forcing to Direct Output"
+                    : "Not linear PCM, forcing to Direct Output");
         flags = (audio_output_flags_t)
                 // FIXME why can't we allow direct AND fast?
                 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
@@ -325,9 +335,14 @@
 
     if (status != NO_ERROR) {
         if (mAudioTrackThread != 0) {
-            mAudioTrackThread->requestExit();
+            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
+            mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
+        //Use of direct and offloaded output streams is ref counted by audio policy manager.
+        // As getOutput was called above and resulted in an output stream to be opened,
+        // we need to release it.
+        AudioSystem::releaseOutput(output);
         return status;
     }
 
@@ -346,23 +361,29 @@
     mSequence = 1;
     mObservedSequence = mSequence;
     mInUnderrun = false;
+    mOutput = output;
 
     return NO_ERROR;
 }
 
 // -------------------------------------------------------------------------
 
-void AudioTrack::start()
+status_t AudioTrack::start()
 {
     AutoMutex lock(mLock);
+
     if (mState == STATE_ACTIVE) {
-        return;
+        return INVALID_OPERATION;
     }
 
     mInUnderrun = true;
 
     State previousState = mState;
-    mState = STATE_ACTIVE;
+    if (previousState == STATE_PAUSED_STOPPING) {
+        mState = STATE_STOPPING;
+    } else {
+        mState = STATE_ACTIVE;
+    }
     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
         // reset current position as seen by client to 0
         mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
@@ -372,7 +393,11 @@
 
     sp<AudioTrackThread> t = mAudioTrackThread;
     if (t != 0) {
-        t->resume();
+        if (previousState == STATE_STOPPING) {
+            mProxy->interrupt();
+        } else {
+            t->resume();
+        }
     } else {
         mPreviousPriority = getpriority(PRIO_PROCESS, 0);
         get_sched_policy(0, &mPreviousSchedulingGroup);
@@ -394,14 +419,16 @@
         ALOGE("start() status %d", status);
         mState = previousState;
         if (t != 0) {
-            t->pause();
+            if (previousState != STATE_STOPPING) {
+                t->pause();
+            }
         } else {
             setpriority(PRIO_PROCESS, 0, mPreviousPriority);
             set_sched_policy(0, mPreviousSchedulingGroup);
         }
     }
 
-    // FIXME discarding status
+    return status;
 }
 
 void AudioTrack::stop()
@@ -412,7 +439,12 @@
         return;
     }
 
-    mState = STATE_STOPPED;
+    if (isOffloaded()) {
+        mState = STATE_STOPPING;
+    } else {
+        mState = STATE_STOPPED;
+    }
+
     mProxy->interrupt();
     mAudioTrack->stop();
     // the playback head position will reset to 0, so if a marker is set, we need
@@ -426,9 +458,12 @@
         flush_l();
     }
 #endif
+
     sp<AudioTrackThread> t = mAudioTrackThread;
     if (t != 0) {
-        t->pause();
+        if (!isOffloaded()) {
+            t->pause();
+        }
     } else {
         setpriority(PRIO_PROCESS, 0, mPreviousPriority);
         set_sched_policy(0, mPreviousSchedulingGroup);
@@ -461,8 +496,12 @@
     mMarkerPosition = 0;
     mMarkerReached = false;
     mUpdatePeriod = 0;
+    mRefreshRemaining = true;
 
     mState = STATE_FLUSHED;
+    if (isOffloaded()) {
+        mProxy->interrupt();
+    }
     mProxy->flush();
     mAudioTrack->flush();
 }
@@ -470,10 +509,13 @@
 void AudioTrack::pause()
 {
     AutoMutex lock(mLock);
-    if (mState != STATE_ACTIVE) {
+    if (mState == STATE_ACTIVE) {
+        mState = STATE_PAUSED;
+    } else if (mState == STATE_STOPPING) {
+        mState = STATE_PAUSED_STOPPING;
+    } else {
         return;
     }
-    mState = STATE_PAUSED;
     mProxy->interrupt();
     mAudioTrack->pause();
 }
@@ -520,7 +562,7 @@
 
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
-    if (mIsTimed) {
+    if (mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -552,7 +594,7 @@
 
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -586,7 +628,7 @@
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
 {
     // The only purpose of setting marker position is to get a callback
-    if (mCbf == NULL) {
+    if (mCbf == NULL || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -599,6 +641,9 @@
 
 status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
 {
+    if (isOffloaded()) {
+        return INVALID_OPERATION;
+    }
     if (marker == NULL) {
         return BAD_VALUE;
     }
@@ -612,19 +657,21 @@
 status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
 {
     // The only purpose of setting position update period is to get a callback
-    if (mCbf == NULL) {
+    if (mCbf == NULL || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
     AutoMutex lock(mLock);
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
-
     return NO_ERROR;
 }
 
 status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
 {
+    if (isOffloaded()) {
+        return INVALID_OPERATION;
+    }
     if (updatePeriod == NULL) {
         return BAD_VALUE;
     }
@@ -637,7 +684,7 @@
 
 status_t AudioTrack::setPosition(uint32_t position)
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
     if (position > mFrameCount) {
@@ -670,10 +717,19 @@
     }
 
     AutoMutex lock(mLock);
-    // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
-    *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
-            mProxy->getPosition();
+    if (isOffloaded()) {
+        uint32_t dspFrames = 0;
 
+        if (mOutput != 0) {
+            uint32_t halFrames;
+            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
+        }
+        *position = dspFrames;
+    } else {
+        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
+        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
+                mProxy->getPosition();
+    }
     return NO_ERROR;
 }
 
@@ -693,7 +749,7 @@
 
 status_t AudioTrack::reload()
 {
-    if (mSharedBuffer == 0 || mIsTimed) {
+    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
         return INVALID_OPERATION;
     }
 
@@ -713,14 +769,18 @@
 audio_io_handle_t AudioTrack::getOutput()
 {
     AutoMutex lock(mLock);
-    return getOutput_l();
+    return mOutput;
 }
 
 // must be called with mLock held
 audio_io_handle_t AudioTrack::getOutput_l()
 {
-    return AudioSystem::getOutput(mStreamType,
-            mSampleRate, mFormat, mChannelMask, mFlags);
+    if (mOutput) {
+        return mOutput;
+    } else {
+        return AudioSystem::getOutput(mStreamType,
+                                      mSampleRate, mFormat, mChannelMask, mFlags);
+    }
 }
 
 status_t AudioTrack::attachAuxEffect(int effectId)
@@ -791,7 +851,9 @@
             }
             frameCount = afFrameCount;
         }
-
+        if (mNotificationFramesAct != frameCount) {
+            mNotificationFramesAct = frameCount;
+        }
     } else if (sharedBuffer != 0) {
 
         // Ensure that buffer alignment matches channel count
@@ -875,6 +937,10 @@
         }
     }
 
+    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
+    }
+
     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                       sampleRate,
                                                       // AudioFlinger only sees 16-bit PCM
@@ -937,6 +1003,17 @@
             }
         }
     }
+    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
+            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
+        } else {
+            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
+            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+            mFlags = flags;
+            return NO_INIT;
+        }
+    }
+
     mRefreshRemaining = true;
 
     // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
@@ -1040,6 +1117,9 @@
                 if (newSequence == oldSequence) {
                     status = restoreTrack_l("obtainBuffer");
                     if (status != NO_ERROR) {
+                        buffer.mFrameCount = 0;
+                        buffer.mRaw = NULL;
+                        buffer.mNonContig = 0;
                         break;
                     }
                 }
@@ -1050,6 +1130,14 @@
             proxy = mProxy;
             iMem = mCblkMemory;
 
+            if (mState == STATE_STOPPING) {
+                status = -EINTR;
+                buffer.mFrameCount = 0;
+                buffer.mRaw = NULL;
+                buffer.mNonContig = 0;
+                break;
+            }
+
             // Non-blocking if track is stopped or paused
             if (mState != STATE_ACTIVE) {
                 requested = &ClientProxy::kNonBlocking;
@@ -1255,12 +1343,18 @@
 
     // Check for track invalidation
     if (flags & CBLK_INVALID) {
-        (void) restoreTrack_l("processAudioBuffer");
-        mLock.unlock();
-        // Run again immediately, but with a new IAudioTrack
-        return 0;
+        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
+        // AudioSystem cache. We should not exit here but after calling the callback so
+        // that the upper layers can recreate the track
+        if (!isOffloaded() || (mSequence == mObservedSequence)) {
+            status_t status = restoreTrack_l("processAudioBuffer");
+            mLock.unlock();
+            // Run again immediately, but with a new IAudioTrack
+            return 0;
+        }
     }
 
+    bool waitStreamEnd = mState == STATE_STOPPING;
     bool active = mState == STATE_ACTIVE;
 
     // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
@@ -1314,7 +1408,7 @@
         mRetryOnPartialBuffer = false;
     }
     size_t misalignment = mProxy->getMisalignment();
-    int32_t sequence = mSequence;
+    uint32_t sequence = mSequence;
 
     // These fields don't need to be cached, because they are assigned only by set():
     //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
@@ -1322,6 +1416,38 @@
 
     mLock.unlock();
 
+    if (waitStreamEnd) {
+        AutoMutex lock(mLock);
+
+        sp<AudioTrackClientProxy> proxy = mProxy;
+        sp<IMemory> iMem = mCblkMemory;
+
+        struct timespec timeout;
+        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
+        timeout.tv_nsec = 0;
+
+        mLock.unlock();
+        status_t status = mProxy->waitStreamEndDone(&timeout);
+        mLock.lock();
+        switch (status) {
+        case NO_ERROR:
+        case DEAD_OBJECT:
+        case TIMED_OUT:
+            mLock.unlock();
+            mCbf(EVENT_STREAM_END, mUserData, NULL);
+            mLock.lock();
+            if (mState == STATE_STOPPING) {
+                mState = STATE_STOPPED;
+                if (status != DEAD_OBJECT) {
+                   return NS_INACTIVE;
+                }
+            }
+            return 0;
+        default:
+            return 0;
+        }
+    }
+
     // perform callbacks while unlocked
     if (newUnderrun) {
         mCbf(EVENT_UNDERRUN, mUserData, NULL);
@@ -1343,9 +1469,14 @@
         newPosition += updatePeriod;
         newPosCount--;
     }
+
     if (mObservedSequence != sequence) {
         mObservedSequence = sequence;
         mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
+        // for offloaded tracks, just wait for the upper layers to recreate the track
+        if (isOffloaded()) {
+            return NS_INACTIVE;
+        }
     }
 
     // if inactive, then don't run me again until re-started
@@ -1404,10 +1535,11 @@
                 "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
         requested = &ClientProxy::kNonBlocking;
         size_t avail = audioBuffer.frameCount + nonContig;
-        ALOGV("obtainBuffer(%u) returned %u = %u + %u",
-                mRemainingFrames, avail, audioBuffer.frameCount, nonContig);
+        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
+                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
         if (err != NO_ERROR) {
-            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
+            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
+                    (isOffloaded() && (err == DEAD_OBJECT))) {
                 return 0;
             }
             ALOGE("Error %d obtaining an audio buffer, giving up.", err);
@@ -1500,7 +1632,8 @@
 
 status_t AudioTrack::restoreTrack_l(const char *from)
 {
-    ALOGW("dead IAudioTrack, creating a new one from %s()", from);
+    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
+          isOffloaded() ? "Offloaded" : "PCM", from);
     ++mSequence;
     status_t result;
 
@@ -1508,6 +1641,14 @@
     // output parameters in getOutput_l() and createTrack_l()
     AudioSystem::clearAudioConfigCache();
 
+    if (isOffloaded()) {
+        return DEAD_OBJECT;
+    }
+
+    // force new output query from audio policy manager;
+    mOutput = 0;
+    audio_io_handle_t output = getOutput_l();
+
     // if the new IAudioTrack is created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory
@@ -1520,7 +1661,7 @@
                            mReqFrameCount,  // so that frame count never goes down
                            mFlags,
                            mSharedBuffer,
-                           getOutput_l(),
+                           output,
                            position /*epoch*/);
 
     if (result == NO_ERROR) {
@@ -1549,6 +1690,10 @@
         }
     }
     if (result != NO_ERROR) {
+        //Use of direct and offloaded output streams is ref counted by audio policy manager.
+        // As getOutput was called above and resulted in an output stream to be opened,
+        // we need to release it.
+        AudioSystem::releaseOutput(output);
         ALOGW("restoreTrack_l() failed status %d", result);
         mState = STATE_STOPPED;
     }
@@ -1568,7 +1713,11 @@
 
 String8 AudioTrack::getParameters(const String8& keys)
 {
-    return String8::empty();
+    if (mOutput) {
+        return AudioSystem::getParameters(mOutput, keys);
+    } else {
+        return String8::empty();
+    }
 }
 
 status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 55bf175..aa45a2f 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -200,7 +200,7 @@
             ts = &remaining;
             break;
         default:
-            LOG_FATAL("%s timeout=%d", timeout);
+            LOG_FATAL("obtainBuffer() timeout=%d", timeout);
             ts = NULL;
             break;
         }
@@ -259,8 +259,9 @@
         requested = &kNonBlocking;
     }
     if (measure) {
-        ALOGV("requested %d.%03d elapsed %d.%03d", requested->tv_sec, requested->tv_nsec / 1000000,
-                total.tv_sec, total.tv_nsec / 1000000);
+        ALOGV("requested %ld.%03ld elapsed %ld.%03ld",
+              requested->tv_sec, requested->tv_nsec / 1000000,
+              total.tv_sec, total.tv_nsec / 1000000);
     }
     return status;
 }
@@ -322,6 +323,121 @@
     mCblk->u.mStreaming.mFlush++;
 }
 
+bool AudioTrackClientProxy::clearStreamEndDone() {
+    return (android_atomic_and(~CBLK_STREAM_END_DONE, &mCblk->flags) & CBLK_STREAM_END_DONE) != 0;
+}
+
+bool AudioTrackClientProxy::getStreamEndDone() const {
+    return (mCblk->flags & CBLK_STREAM_END_DONE) != 0;
+}
+
+status_t AudioTrackClientProxy::waitStreamEndDone(const struct timespec *requested)
+{
+    struct timespec total;          // total elapsed time spent waiting
+    total.tv_sec = 0;
+    total.tv_nsec = 0;
+    audio_track_cblk_t* cblk = mCblk;
+    status_t status;
+    enum {
+        TIMEOUT_ZERO,       // requested == NULL || *requested == 0
+        TIMEOUT_INFINITE,   // *requested == infinity
+        TIMEOUT_FINITE,     // 0 < *requested < infinity
+        TIMEOUT_CONTINUE,   // additional chances after TIMEOUT_FINITE
+    } timeout;
+    if (requested == NULL) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == 0 && requested->tv_nsec == 0) {
+        timeout = TIMEOUT_ZERO;
+    } else if (requested->tv_sec == INT_MAX) {
+        timeout = TIMEOUT_INFINITE;
+    } else {
+        timeout = TIMEOUT_FINITE;
+    }
+    for (;;) {
+        int32_t flags = android_atomic_and(~(CBLK_INTERRUPT|CBLK_STREAM_END_DONE), &cblk->flags);
+        // check for track invalidation by server, or server death detection
+        if (flags & CBLK_INVALID) {
+            ALOGV("Track invalidated");
+            status = DEAD_OBJECT;
+            goto end;
+        }
+        if (flags & CBLK_STREAM_END_DONE) {
+            ALOGV("stream end received");
+            status = NO_ERROR;
+            goto end;
+        }
+        // check for obtainBuffer interrupted by client
+        // check for obtainBuffer interrupted by client
+        if (flags & CBLK_INTERRUPT) {
+            ALOGV("waitStreamEndDone() interrupted by client");
+            status = -EINTR;
+            goto end;
+        }
+        struct timespec remaining;
+        const struct timespec *ts;
+        switch (timeout) {
+        case TIMEOUT_ZERO:
+            status = WOULD_BLOCK;
+            goto end;
+        case TIMEOUT_INFINITE:
+            ts = NULL;
+            break;
+        case TIMEOUT_FINITE:
+            timeout = TIMEOUT_CONTINUE;
+            if (MAX_SEC == 0) {
+                ts = requested;
+                break;
+            }
+            // fall through
+        case TIMEOUT_CONTINUE:
+            // FIXME we do not retry if requested < 10ms? needs documentation on this state machine
+            if (requested->tv_sec < total.tv_sec ||
+                    (requested->tv_sec == total.tv_sec && requested->tv_nsec <= total.tv_nsec)) {
+                status = TIMED_OUT;
+                goto end;
+            }
+            remaining.tv_sec = requested->tv_sec - total.tv_sec;
+            if ((remaining.tv_nsec = requested->tv_nsec - total.tv_nsec) < 0) {
+                remaining.tv_nsec += 1000000000;
+                remaining.tv_sec++;
+            }
+            if (0 < MAX_SEC && MAX_SEC < remaining.tv_sec) {
+                remaining.tv_sec = MAX_SEC;
+                remaining.tv_nsec = 0;
+            }
+            ts = &remaining;
+            break;
+        default:
+            LOG_FATAL("waitStreamEndDone() timeout=%d", timeout);
+            ts = NULL;
+            break;
+        }
+        int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &cblk->mFutex);
+        if (!(old & CBLK_FUTEX_WAKE)) {
+            int rc;
+            int ret = __futex_syscall4(&cblk->mFutex,
+                    mClientInServer ? FUTEX_WAIT_PRIVATE : FUTEX_WAIT, old & ~CBLK_FUTEX_WAKE, ts);
+            switch (ret) {
+            case 0:             // normal wakeup by server, or by binderDied()
+            case -EWOULDBLOCK:  // benign race condition with server
+            case -EINTR:        // wait was interrupted by signal or other spurious wakeup
+            case -ETIMEDOUT:    // time-out expired
+                break;
+            default:
+                ALOGE("%s unexpected error %d", __func__, ret);
+                status = -ret;
+                goto end;
+            }
+        }
+    }
+
+end:
+    if (requested == NULL) {
+        requested = &kNonBlocking;
+    }
+    return status;
+}
+
 // ---------------------------------------------------------------------------
 
 StaticAudioTrackClientProxy::StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers,
@@ -385,13 +501,19 @@
     if (mIsOut) {
         int32_t flush = cblk->u.mStreaming.mFlush;
         rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
+        front = cblk->u.mStreaming.mFront;
         if (flush != mFlush) {
-            front = rear;
             mFlush = flush;
             // effectively obtain then release whatever is in the buffer
             android_atomic_release_store(rear, &cblk->u.mStreaming.mFront);
-        } else {
-            front = cblk->u.mStreaming.mFront;
+            if (front != rear) {
+                int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
+                if (!(old & CBLK_FUTEX_WAKE)) {
+                    (void) __futex_syscall3(&cblk->mFutex,
+                            mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
+                }
+            }
+            front = rear;
         }
     } else {
         front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
@@ -509,6 +631,11 @@
         return 0;
     }
     audio_track_cblk_t* cblk = mCblk;
+
+    int32_t flush = cblk->u.mStreaming.mFlush;
+    if (flush != mFlush) {
+        return mFrameCount;
+    }
     // the acquire might not be necessary since not doing a subsequent read
     int32_t rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
     ssize_t filled = rear - cblk->u.mStreaming.mFront;
@@ -524,6 +651,16 @@
     return filled;
 }
 
+bool  AudioTrackServerProxy::setStreamEndDone() {
+    bool old =
+            (android_atomic_or(CBLK_STREAM_END_DONE, &mCblk->flags) & CBLK_STREAM_END_DONE) != 0;
+    if (!old) {
+        (void) __futex_syscall3(&mCblk->mFutex, mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE,
+                1);
+    }
+    return old;
+}
+
 // ---------------------------------------------------------------------------
 
 StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 2e2c0cc..c670936 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -372,7 +372,6 @@
         audio_channel_mask_t channelMask = pChannelMask != NULL ?
                 *pChannelMask : (audio_channel_mask_t)0;
         uint32_t latency = pLatencyMs != NULL ? *pLatencyMs : 0;
-
         data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
         data.writeInt32(module);
         data.writeInt32(devices);
@@ -381,6 +380,12 @@
         data.writeInt32(channelMask);
         data.writeInt32(latency);
         data.writeInt32((int32_t) flags);
+        if (offloadInfo == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.write(offloadInfo, sizeof(audio_offload_info_t));
+        }
         remote()->transact(OPEN_OUTPUT, data, &reply);
         audio_io_handle_t output = (audio_io_handle_t) reply.readInt32();
         ALOGV("openOutput() returned output, %d", output);
@@ -881,13 +886,19 @@
             audio_channel_mask_t channelMask = (audio_channel_mask_t)data.readInt32();
             uint32_t latency = data.readInt32();
             audio_output_flags_t flags = (audio_output_flags_t) data.readInt32();
+            bool hasOffloadInfo = data.readInt32() != 0;
+            audio_offload_info_t offloadInfo;
+            if (hasOffloadInfo) {
+                data.read(&offloadInfo, sizeof(audio_offload_info_t));
+            }
             audio_io_handle_t output = openOutput(module,
                                                  &devices,
                                                  &samplingRate,
                                                  &format,
                                                  &channelMask,
                                                  &latency,
-                                                 flags);
+                                                 flags,
+                                                 hasOffloadInfo ? &offloadInfo : NULL);
             ALOGV("OPEN_OUTPUT output, %p", output);
             reply->writeInt32((int32_t) output);
             reply->writeInt32(devices);
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 57de58f..4be3c09 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -137,6 +137,12 @@
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(static_cast <uint32_t>(flags));
+        if (offloadInfo == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.write(offloadInfo, sizeof(audio_offload_info_t));
+        }
         remote()->transact(GET_OUTPUT, data, &reply);
         return static_cast <audio_io_handle_t> (reply.readInt32());
     }
@@ -379,9 +385,11 @@
 
     virtual bool isOffloadSupported(const audio_offload_info_t& info)
     {
-        // stub function
-        return false;
-    }
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.write(&info, sizeof(audio_offload_info_t));
+        remote()->transact(IS_OFFLOAD_SUPPORTED, data, &reply);
+        return reply.readInt32();    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -450,12 +458,17 @@
             audio_channel_mask_t channelMask = data.readInt32();
             audio_output_flags_t flags =
                     static_cast <audio_output_flags_t>(data.readInt32());
-
+            bool hasOffloadInfo = data.readInt32() != 0;
+            audio_offload_info_t offloadInfo;
+            if (hasOffloadInfo) {
+                data.read(&offloadInfo, sizeof(audio_offload_info_t));
+            }
             audio_io_handle_t output = getOutput(stream,
                                                  samplingRate,
                                                  format,
                                                  channelMask,
-                                                 flags);
+                                                 flags,
+                                                 hasOffloadInfo ? &offloadInfo : NULL);
             reply->writeInt32(static_cast <int>(output));
             return NO_ERROR;
         } break;
@@ -662,6 +675,15 @@
             return status;
         }
 
+        case IS_OFFLOAD_SUPPORTED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_offload_info_t info;
+            data.read(&info, sizeof(audio_offload_info_t));
+            bool isSupported = isOffloadSupported(info);
+            reply->writeInt32(isSupported);
+            return NO_ERROR;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index afde373..8833bd7 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -53,6 +53,8 @@
 #include <media/AudioTrack.h>
 #include <media/MemoryLeakTrackUtil.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/AudioPlayer.h>
+#include <media/stagefright/foundation/ADebug.h>
 
 #include <system/audio.h>
 
@@ -1381,6 +1383,45 @@
     return OK;
 }
 
+status_t MediaPlayerService::AudioOutput::setParameters(const String8& keyValuePairs)
+{
+    if (mTrack == 0) return NO_INIT;
+    return mTrack->setParameters(keyValuePairs);
+}
+
+String8  MediaPlayerService::AudioOutput::getParameters(const String8& keys)
+{
+    if (mTrack == 0) return String8::empty();
+    return mTrack->getParameters(keys);
+}
+
+void MediaPlayerService::AudioOutput::deleteRecycledTrack()
+{
+    ALOGV("deleteRecycledTrack");
+
+    if (mRecycledTrack != 0) {
+
+        if (mCallbackData != NULL) {
+            mCallbackData->setOutput(NULL);
+            mCallbackData->endTrackSwitch();
+        }
+
+        if ((mRecycledTrack->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
+            mRecycledTrack->flush();
+        }
+        // An offloaded track isn't flushed because the STREAM_END is reported
+        // slightly prematurely to allow time for the gapless track switch
+        // but this means that if we decide not to recycle the track there
+        // could be a small amount of residual data still playing. We leave
+        // AudioFlinger to drain the track.
+
+        mRecycledTrack.clear();
+        delete mCallbackData;
+        mCallbackData = NULL;
+        close();
+    }
+}
+
 status_t MediaPlayerService::AudioOutput::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
@@ -1397,20 +1438,34 @@
         bufferCount = mMinBufferCount;
 
     }
-    ALOGV("open(%u, %d, 0x%x, %d, %d, %d)", sampleRate, channelCount, channelMask,
-            format, bufferCount, mSessionId);
+    ALOGV("open(%u, %d, 0x%x, 0x%x, %d, %d 0x%x)", sampleRate, channelCount, channelMask,
+                format, bufferCount, mSessionId, flags);
     uint32_t afSampleRate;
     size_t afFrameCount;
     uint32_t frameCount;
 
-    if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
-        return NO_INIT;
-    }
-    if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
-        return NO_INIT;
+    // offloading is only supported in callback mode for now.
+    // offloadInfo must be present if offload flag is set
+    if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) &&
+            ((cb == NULL) || (offloadInfo == NULL))) {
+        return BAD_VALUE;
     }
 
-    frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        frameCount = 0; // AudioTrack will get frame count from AudioFlinger
+    } else {
+        uint32_t afSampleRate;
+        size_t afFrameCount;
+
+        if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
+            return NO_INIT;
+        }
+        if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
+            return NO_INIT;
+        }
+
+        frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+    }
 
     if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
         channelMask = audio_channel_out_mask_from_count(channelCount);
@@ -1420,65 +1475,108 @@
         }
     }
 
-    sp<AudioTrack> t;
-    CallbackData *newcbd = NULL;
-    if (mCallback != NULL) {
-        newcbd = new CallbackData(this);
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags,
-                CallbackWrapper,
-                newcbd,
-                0,  // notification frames
-                mSessionId);
-    } else {
-        t = new AudioTrack(
-                mStreamType,
-                sampleRate,
-                format,
-                channelMask,
-                frameCount,
-                flags,
-                NULL,
-                NULL,
-                0,
-                mSessionId);
-    }
-
-    if ((t == 0) || (t->initCheck() != NO_ERROR)) {
-        ALOGE("Unable to create audio track");
-        delete newcbd;
-        return NO_INIT;
-    }
-
+    // Check whether we can recycle the track
+    bool reuse = false;
+    bool bothOffloaded = false;
 
     if (mRecycledTrack != 0) {
-        // check if the existing track can be reused as-is, or if a new track needs to be created.
+        // check whether we are switching between two offloaded tracks
+        bothOffloaded = (flags & mRecycledTrack->getFlags()
+                                & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0;
 
-        bool reuse = true;
+        // check if the existing track can be reused as-is, or if a new track needs to be created.
+        reuse = true;
+
         if ((mCallbackData == NULL && mCallback != NULL) ||
                 (mCallbackData != NULL && mCallback == NULL)) {
             // recycled track uses callbacks but the caller wants to use writes, or vice versa
             ALOGV("can't chain callback and write");
             reuse = false;
         } else if ((mRecycledTrack->getSampleRate() != sampleRate) ||
-                (mRecycledTrack->channelCount() != channelCount) ||
-                (mRecycledTrack->frameCount() != t->frameCount())) {
-            ALOGV("samplerate, channelcount or framecount differ: %d/%d Hz, %d/%d ch, %d/%d frames",
+                (mRecycledTrack->channelCount() != (uint32_t)channelCount) ) {
+            ALOGV("samplerate, channelcount differ: %u/%u Hz, %u/%d ch",
                   mRecycledTrack->getSampleRate(), sampleRate,
-                  mRecycledTrack->channelCount(), channelCount,
-                  mRecycledTrack->frameCount(), t->frameCount());
+                  mRecycledTrack->channelCount(), channelCount);
             reuse = false;
         } else if (flags != mFlags) {
             ALOGV("output flags differ %08x/%08x", flags, mFlags);
             reuse = false;
+        } else if (mRecycledTrack->format() != format) {
+            reuse = false;
         }
+    } else {
+        ALOGV("no track available to recycle");
+    }
+
+    ALOGV_IF(bothOffloaded, "both tracks offloaded");
+
+    // If we can't recycle and both tracks are offloaded
+    // we must close the previous output before opening a new one
+    if (bothOffloaded && !reuse) {
+        ALOGV("both offloaded and not recycling");
+        deleteRecycledTrack();
+    }
+
+    sp<AudioTrack> t;
+    CallbackData *newcbd = NULL;
+
+    // We don't attempt to create a new track if we are recycling an
+    // offloaded track. But, if we are recycling a non-offloaded or we
+    // are switching where one is offloaded and one isn't then we create
+    // the new track in advance so that we can read additional stream info
+
+    if (!(reuse && bothOffloaded)) {
+        ALOGV("creating new AudioTrack");
+
+        if (mCallback != NULL) {
+            newcbd = new CallbackData(this);
+            t = new AudioTrack(
+                    mStreamType,
+                    sampleRate,
+                    format,
+                    channelMask,
+                    frameCount,
+                    flags,
+                    CallbackWrapper,
+                    newcbd,
+                    0,  // notification frames
+                    mSessionId,
+                    AudioTrack::TRANSFER_CALLBACK,
+                    offloadInfo);
+        } else {
+            t = new AudioTrack(
+                    mStreamType,
+                    sampleRate,
+                    format,
+                    channelMask,
+                    frameCount,
+                    flags,
+                    NULL,
+                    NULL,
+                    0,
+                    mSessionId);
+        }
+
+        if ((t == 0) || (t->initCheck() != NO_ERROR)) {
+            ALOGE("Unable to create audio track");
+            delete newcbd;
+            return NO_INIT;
+        }
+    }
+
+    if (reuse) {
+        CHECK(mRecycledTrack != NULL);
+
+        if (!bothOffloaded) {
+            if (mRecycledTrack->frameCount() != t->frameCount()) {
+                ALOGV("framecount differs: %u/%u frames",
+                      mRecycledTrack->frameCount(), t->frameCount());
+                reuse = false;
+            }
+        }
+
         if (reuse) {
-            ALOGV("chaining to next output");
+            ALOGV("chaining to next output and recycling track");
             close();
             mTrack = mRecycledTrack;
             mRecycledTrack.clear();
@@ -1488,19 +1586,16 @@
             delete newcbd;
             return OK;
         }
-
-        // if we're not going to reuse the track, unblock and flush it
-        if (mCallbackData != NULL) {
-            mCallbackData->setOutput(NULL);
-            mCallbackData->endTrackSwitch();
-        }
-        mRecycledTrack->flush();
-        mRecycledTrack.clear();
-        delete mCallbackData;
-        mCallbackData = NULL;
-        close();
     }
 
+    // we're not going to reuse the track, unblock and flush it
+    // this was done earlier if both tracks are offloaded
+    if (!bothOffloaded) {
+        deleteRecycledTrack();
+    }
+
+    CHECK((t != NULL) && ((mCallback == NULL) || (newcbd != NULL)));
+
     mCallbackData = newcbd;
     ALOGV("setVolume");
     t->setVolume(mLeftVolume, mRightVolume);
@@ -1514,15 +1609,19 @@
     }
     mTrack = t;
 
-    status_t res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
-    if (res != NO_ERROR) {
-        return res;
+    status_t res = NO_ERROR;
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
+        res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
+        if (res == NO_ERROR) {
+            t->setAuxEffectSendLevel(mSendLevel);
+            res = t->attachAuxEffect(mAuxEffectId);
+        }
     }
-    t->setAuxEffectSendLevel(mSendLevel);
-    return t->attachAuxEffect(mAuxEffectId);;
+    ALOGV("open() DONE status %d", res);
+    return res;
 }
 
-void MediaPlayerService::AudioOutput::start()
+status_t MediaPlayerService::AudioOutput::start()
 {
     ALOGV("start");
     if (mCallbackData != NULL) {
@@ -1531,8 +1630,9 @@
     if (mTrack != 0) {
         mTrack->setVolume(mLeftVolume, mRightVolume);
         mTrack->setAuxEffectSendLevel(mSendLevel);
-        mTrack->start();
+        return mTrack->start();
     }
+    return NO_INIT;
 }
 
 void MediaPlayerService::AudioOutput::setNextOutput(const sp<AudioOutput>& nextOutput) {
@@ -1645,10 +1745,6 @@
 void MediaPlayerService::AudioOutput::CallbackWrapper(
         int event, void *cookie, void *info) {
     //ALOGV("callbackwrapper");
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
-    }
-
     CallbackData *data = (CallbackData*)cookie;
     data->lock();
     AudioOutput *me = data->getOutput();
@@ -1657,23 +1753,46 @@
         // no output set, likely because the track was scheduled to be reused
         // by another player, but the format turned out to be incompatible.
         data->unlock();
-        buffer->size = 0;
+        if (buffer != NULL) {
+            buffer->size = 0;
+        }
         return;
     }
 
-    size_t actualSize = (*me->mCallback)(
-            me, buffer->raw, buffer->size, me->mCallbackCookie,
-            CB_EVENT_FILL_BUFFER);
+    switch(event) {
+    case AudioTrack::EVENT_MORE_DATA: {
+        size_t actualSize = (*me->mCallback)(
+                me, buffer->raw, buffer->size, me->mCallbackCookie,
+                CB_EVENT_FILL_BUFFER);
 
-    if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
-        // We've reached EOS but the audio track is not stopped yet,
-        // keep playing silence.
+        if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
+            // We've reached EOS but the audio track is not stopped yet,
+            // keep playing silence.
 
-        memset(buffer->raw, 0, buffer->size);
-        actualSize = buffer->size;
+            memset(buffer->raw, 0, buffer->size);
+            actualSize = buffer->size;
+        }
+
+        buffer->size = actualSize;
+        } break;
+
+
+    case AudioTrack::EVENT_STREAM_END:
+        ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
+        (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
+                me->mCallbackCookie, CB_EVENT_STREAM_END);
+        break;
+
+    case AudioTrack::EVENT_NEW_IAUDIOTRACK :
+        ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
+        (*me->mCallback)(me,  NULL /* buffer */, 0 /* size */,
+                me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
+        break;
+
+    default:
+        ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
     }
 
-    buffer->size = actualSize;
     data->unlock();
 }
 
@@ -1803,10 +1922,11 @@
     return NO_ERROR;
 }
 
-void MediaPlayerService::AudioCache::start() {
+status_t MediaPlayerService::AudioCache::start() {
     if (mCallbackThread != NULL) {
         mCallbackThread->run("AudioCache callback");
     }
+    return NO_ERROR;
 }
 
 void MediaPlayerService::AudioCache::stop() {
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index f7076cc..7d27944 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -94,7 +94,7 @@
                 audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                 const audio_offload_info_t *offloadInfo = NULL);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush();
@@ -112,11 +112,14 @@
                 void            setNextOutput(const sp<AudioOutput>& nextOutput);
                 void            switchToNextOutput();
         virtual bool            needsTrailingPadding() { return mNextOutput == NULL; }
+        virtual status_t        setParameters(const String8& keyValuePairs);
+        virtual String8         getParameters(const String8& keys);
 
     private:
         static void             setMinBufferCount();
         static void             CallbackWrapper(
                 int event, void *me, void *info);
+               void             deleteRecycledTrack();
 
         sp<AudioTrack>          mTrack;
         sp<AudioTrack>          mRecycledTrack;
@@ -196,7 +199,7 @@
                 audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                 const audio_offload_info_t *offloadInfo = NULL);
 
-        virtual void            start();
+        virtual status_t        start();
         virtual ssize_t         write(const void* buffer, size_t size);
         virtual void            stop();
         virtual void            flush() {}
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 90bf324..1f68b51 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -100,6 +100,7 @@
         libstagefright_mpeg2ts \
         libstagefright_id3 \
         libFLAC \
+        libmedia_helper
 
 LOCAL_SRC_FILES += \
         chromium_http_stub.cpp
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 61d6746..2418aab 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -17,6 +17,7 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "AudioPlayer"
 #include <utils/Log.h>
+#include <cutils/compiler.h>
 
 #include <binder/IPCThreadState.h>
 #include <media/AudioTrack.h>
@@ -27,6 +28,7 @@
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 
 #include "include/AwesomePlayer.h"
 
@@ -47,14 +49,17 @@
       mSeeking(false),
       mReachedEOS(false),
       mFinalStatus(OK),
+      mSeekTimeUs(0),
       mStarted(false),
       mIsFirstBuffer(false),
       mFirstBufferResult(OK),
       mFirstBuffer(NULL),
       mAudioSink(audioSink),
-      mAllowDeepBuffering((flags & ALLOW_DEEP_BUFFERING) != 0),
       mObserver(observer),
-      mPinnedTimeUs(-1ll) {
+      mPinnedTimeUs(-1ll),
+      mPlaying(false),
+      mStartPosUs(0),
+      mCreateFlags(flags) {
 }
 
 AudioPlayer::~AudioPlayer() {
@@ -109,7 +114,7 @@
     const char *mime;
     bool success = format->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+    CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
 
     success = format->findInt32(kKeySampleRate, &mSampleRate);
     CHECK(success);
@@ -125,16 +130,74 @@
         channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
     }
 
+    audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+
+    if (useOffload()) {
+        if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
+            ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime);
+            audioFormat = AUDIO_FORMAT_INVALID;
+        } else {
+            ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat);
+        }
+    }
+
+    int avgBitRate = -1;
+    format->findInt32(kKeyBitRate, &avgBitRate);
+
     if (mAudioSink.get() != NULL) {
 
+        uint32_t flags = AUDIO_OUTPUT_FLAG_NONE;
+        audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+
+        if (allowDeepBuffering()) {
+            flags |= AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+        }
+        if (useOffload()) {
+            flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+
+            int64_t durationUs;
+            if (format->findInt64(kKeyDuration, &durationUs)) {
+                offloadInfo.duration_us = durationUs;
+            } else {
+                offloadInfo.duration_us = -1;
+            }
+
+            offloadInfo.sample_rate = mSampleRate;
+            offloadInfo.channel_mask = channelMask;
+            offloadInfo.format = audioFormat;
+            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
+            offloadInfo.bit_rate = avgBitRate;
+            offloadInfo.has_video = ((mCreateFlags & HAS_VIDEO) != 0);
+            offloadInfo.is_streaming = ((mCreateFlags & IS_STREAMING) != 0);
+        }
+
         status_t err = mAudioSink->open(
-                mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT,
+                mSampleRate, numChannels, channelMask, audioFormat,
                 DEFAULT_AUDIOSINK_BUFFERCOUNT,
                 &AudioPlayer::AudioSinkCallback,
                 this,
-                (mAllowDeepBuffering ?
-                            AUDIO_OUTPUT_FLAG_DEEP_BUFFER :
-                            AUDIO_OUTPUT_FLAG_NONE));
+                (audio_output_flags_t)flags,
+                useOffload() ? &offloadInfo : NULL);
+
+        if (err == OK) {
+            mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
+            mFrameSize = mAudioSink->frameSize();
+
+            if (useOffload()) {
+                // If the playback is offloaded to h/w we pass the
+                // HAL some metadata information
+                // We don't want to do this for PCM because it will be going
+                // through the AudioFlinger mixer before reaching the hardware
+                sendMetaDataToHal(mAudioSink, format);
+            }
+
+            err = mAudioSink->start();
+            // do not alter behavior for non offloaded tracks: ignore start status.
+            if (!useOffload()) {
+                err = OK;
+            }
+        }
+
         if (err != OK) {
             if (mFirstBuffer != NULL) {
                 mFirstBuffer->release();
@@ -148,10 +211,6 @@
             return err;
         }
 
-        mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
-        mFrameSize = mAudioSink->frameSize();
-
-        mAudioSink->start();
     } else {
         // playing to an AudioTrack, set up mask if necessary
         audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
@@ -186,6 +245,7 @@
     }
 
     mStarted = true;
+    mPlaying = true;
     mPinnedTimeUs = -1ll;
 
     return OK;
@@ -212,27 +272,56 @@
 
         mPinnedTimeUs = ALooper::GetNowUs();
     }
+
+    mPlaying = false;
 }
 
-void AudioPlayer::resume() {
+status_t AudioPlayer::resume() {
     CHECK(mStarted);
+    status_t err;
 
     if (mAudioSink.get() != NULL) {
-        mAudioSink->start();
+        err = mAudioSink->start();
     } else {
-        mAudioTrack->start();
+        err = mAudioTrack->start();
     }
+
+    if (err == OK) {
+        mPlaying = true;
+    }
+
+    return err;
 }
 
 void AudioPlayer::reset() {
     CHECK(mStarted);
 
+    ALOGV("reset: mPlaying=%d mReachedEOS=%d useOffload=%d",
+                                mPlaying, mReachedEOS, useOffload() );
+
     if (mAudioSink.get() != NULL) {
         mAudioSink->stop();
+        // If we're closing and have reached EOS, we don't want to flush
+        // the track because if it is offloaded there could be a small
+        // amount of residual data in the hardware buffer which we must
+        // play to give gapless playback.
+        // But if we're resetting when paused or before we've reached EOS
+        // we can't be doing a gapless playback and there could be a large
+        // amount of data queued in the hardware if the track is offloaded,
+        // so we must flush to prevent a track switch being delayed playing
+        // the buffered data that we don't want now
+        if (!mPlaying || !mReachedEOS) {
+            mAudioSink->flush();
+        }
+
         mAudioSink->close();
     } else {
         mAudioTrack->stop();
 
+        if (!mPlaying || !mReachedEOS) {
+            mAudioTrack->flush();
+        }
+
         mAudioTrack.clear();
     }
 
@@ -256,10 +345,16 @@
     // The following hack is necessary to ensure that the OMX
     // component is completely released by the time we may try
     // to instantiate it again.
-    wp<MediaSource> tmp = mSource;
-    mSource.clear();
-    while (tmp.promote() != NULL) {
-        usleep(1000);
+    // When offloading, the OMX component is not used so this hack
+    // is not needed
+    if (!useOffload()) {
+        wp<MediaSource> tmp = mSource;
+        mSource.clear();
+        while (tmp.promote() != NULL) {
+            usleep(1000);
+        }
+    } else {
+        mSource.clear();
     }
     IPCThreadState::self()->flushCommands();
 
@@ -271,6 +366,8 @@
     mReachedEOS = false;
     mFinalStatus = OK;
     mStarted = false;
+    mPlaying = false;
+    mStartPosUs = 0;
 }
 
 // static
@@ -291,6 +388,15 @@
     return mReachedEOS;
 }
 
+void AudioPlayer::notifyAudioEOS() {
+    ALOGV("AudioPlayer@0x%p notifyAudioEOS", this);
+
+    if (mObserver != NULL) {
+        mObserver->postAudioEOS(0);
+        ALOGV("Notified observer of EOS!");
+    }
+}
+
 status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
     if (mAudioSink.get() != NULL) {
         return mAudioSink->setPlaybackRatePermille(ratePermille);
@@ -308,18 +414,40 @@
         MediaPlayerBase::AudioSink::cb_event_t event) {
     AudioPlayer *me = (AudioPlayer *)cookie;
 
-    return me->fillBuffer(buffer, size);
+    switch(event) {
+    case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
+        return me->fillBuffer(buffer, size);
+
+    case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
+        ALOGV("AudioSinkCallback: stream end");
+        me->mReachedEOS = true;
+        me->notifyAudioEOS();
+        break;
+
+    case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
+        ALOGV("AudioSinkCallback: Tear down event");
+        me->mObserver->postAudioTearDown();
+        break;
+    }
+
+    return 0;
 }
 
 void AudioPlayer::AudioCallback(int event, void *info) {
-    if (event != AudioTrack::EVENT_MORE_DATA) {
-        return;
+    switch (event) {
+    case AudioTrack::EVENT_MORE_DATA:
+        {
+        AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+        size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
+        buffer->size = numBytesWritten;
+        }
+        break;
+
+    case AudioTrack::EVENT_STREAM_END:
+        mReachedEOS = true;
+        notifyAudioEOS();
+        break;
     }
-
-    AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
-    size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
-
-    buffer->size = numBytesWritten;
 }
 
 uint32_t AudioPlayer::getNumFramesPendingPlayout() const {
@@ -359,6 +487,7 @@
     size_t size_remaining = size;
     while (size_remaining > 0) {
         MediaSource::ReadOptions options;
+        bool refreshSeekTime = false;
 
         {
             Mutex::Autolock autoLock(mLock);
@@ -373,6 +502,7 @@
                 }
 
                 options.setSeekTo(mSeekTimeUs);
+                refreshSeekTime = true;
 
                 if (mInputBuffer != NULL) {
                     mInputBuffer->release();
@@ -405,43 +535,56 @@
             Mutex::Autolock autoLock(mLock);
 
             if (err != OK) {
-                if (mObserver && !mReachedEOS) {
-                    // We don't want to post EOS right away but only
-                    // after all frames have actually been played out.
-
-                    // These are the number of frames submitted to the
-                    // AudioTrack that you haven't heard yet.
-                    uint32_t numFramesPendingPlayout =
-                        getNumFramesPendingPlayout();
-
-                    // These are the number of frames we're going to
-                    // submit to the AudioTrack by returning from this
-                    // callback.
-                    uint32_t numAdditionalFrames = size_done / mFrameSize;
-
-                    numFramesPendingPlayout += numAdditionalFrames;
-
-                    int64_t timeToCompletionUs =
-                        (1000000ll * numFramesPendingPlayout) / mSampleRate;
-
-                    ALOGV("total number of frames played: %lld (%lld us)",
-                            (mNumFramesPlayed + numAdditionalFrames),
-                            1000000ll * (mNumFramesPlayed + numAdditionalFrames)
-                                / mSampleRate);
-
-                    ALOGV("%d frames left to play, %lld us (%.2f secs)",
-                         numFramesPendingPlayout,
-                         timeToCompletionUs, timeToCompletionUs / 1E6);
-
-                    postEOS = true;
-                    if (mAudioSink->needsTrailingPadding()) {
-                        postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                if (!mReachedEOS) {
+                    if (useOffload()) {
+                        // no more buffers to push - stop() and wait for STREAM_END
+                        // don't set mReachedEOS until stream end received
+                        if (mAudioSink != NULL) {
+                            mAudioSink->stop();
+                        } else {
+                            mAudioTrack->stop();
+                        }
                     } else {
-                        postEOSDelayUs = 0;
+                        if (mObserver) {
+                            // We don't want to post EOS right away but only
+                            // after all frames have actually been played out.
+
+                            // These are the number of frames submitted to the
+                            // AudioTrack that you haven't heard yet.
+                            uint32_t numFramesPendingPlayout =
+                                getNumFramesPendingPlayout();
+
+                            // These are the number of frames we're going to
+                            // submit to the AudioTrack by returning from this
+                            // callback.
+                            uint32_t numAdditionalFrames = size_done / mFrameSize;
+
+                            numFramesPendingPlayout += numAdditionalFrames;
+
+                            int64_t timeToCompletionUs =
+                                (1000000ll * numFramesPendingPlayout) / mSampleRate;
+
+                            ALOGV("total number of frames played: %lld (%lld us)",
+                                    (mNumFramesPlayed + numAdditionalFrames),
+                                    1000000ll * (mNumFramesPlayed + numAdditionalFrames)
+                                        / mSampleRate);
+
+                            ALOGV("%d frames left to play, %lld us (%.2f secs)",
+                                 numFramesPendingPlayout,
+                                 timeToCompletionUs, timeToCompletionUs / 1E6);
+
+                            postEOS = true;
+                            if (mAudioSink->needsTrailingPadding()) {
+                                postEOSDelayUs = timeToCompletionUs + mLatencyUs;
+                            } else {
+                                postEOSDelayUs = 0;
+                            }
+                        }
+
+                        mReachedEOS = true;
                     }
                 }
 
-                mReachedEOS = true;
                 mFinalStatus = err;
                 break;
             }
@@ -452,17 +595,34 @@
                 mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
             }
 
-            CHECK(mInputBuffer->meta_data()->findInt64(
+            if(mInputBuffer->range_length() != 0) {
+                CHECK(mInputBuffer->meta_data()->findInt64(
                         kKeyTime, &mPositionTimeMediaUs));
+            }
 
-            mPositionTimeRealUs =
-                ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
-                    / mSampleRate;
+            // need to adjust the mStartPosUs for offload decoding since parser
+            // might not be able to get the exact seek time requested.
+            if (refreshSeekTime && useOffload()) {
+                if (postSeekComplete) {
+                    ALOGV("fillBuffer is going to post SEEK_COMPLETE");
+                    mObserver->postAudioSeekComplete();
+                    postSeekComplete = false;
+                }
 
-            ALOGV("buffer->size() = %d, "
-                 "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
-                 mInputBuffer->range_length(),
-                 mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+                mStartPosUs = mPositionTimeMediaUs;
+                ALOGV("adjust seek time to: %.2f", mStartPosUs/ 1E6);
+            }
+
+            if (!useOffload()) {
+                mPositionTimeRealUs =
+                    ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+                        / mSampleRate;
+                ALOGV("buffer->size() = %d, "
+                     "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
+                     mInputBuffer->range_length(),
+                     mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+            }
+
         }
 
         if (mInputBuffer->range_length() == 0) {
@@ -488,6 +648,13 @@
         size_remaining -= copy;
     }
 
+    if (useOffload()) {
+        // We must ask the hardware what it has played
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        ALOGV("mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
+             mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
+    }
+
     {
         Mutex::Autolock autoLock(mLock);
         mNumFramesPlayed += size_done / mFrameSize;
@@ -536,9 +703,36 @@
     return result + diffUs;
 }
 
+int64_t AudioPlayer::getOutputPlayPositionUs_l() const
+{
+    uint32_t playedSamples = 0;
+    if (mAudioSink != NULL) {
+        mAudioSink->getPosition(&playedSamples);
+    } else {
+        mAudioTrack->getPosition(&playedSamples);
+    }
+
+    const int64_t playedUs = (static_cast<int64_t>(playedSamples) * 1000000 ) / mSampleRate;
+
+    // HAL position is relative to the first buffer we sent at mStartPosUs
+    const int64_t renderedDuration = mStartPosUs + playedUs;
+    ALOGV("getOutputPlayPositionUs_l %lld", renderedDuration);
+    return renderedDuration;
+}
+
 int64_t AudioPlayer::getMediaTimeUs() {
     Mutex::Autolock autoLock(mLock);
 
+    if (useOffload()) {
+        if (mSeeking) {
+            return mSeekTimeUs;
+        }
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        ALOGV("getMediaTimeUs getOutputPlayPositionUs_l() mPositionTimeRealUs %lld",
+              mPositionTimeRealUs);
+        return mPositionTimeRealUs;
+    }
+
     if (mPositionTimeMediaUs < 0 || mPositionTimeRealUs < 0) {
         if (mSeeking) {
             return mSeekTimeUs;
@@ -547,6 +741,11 @@
         return 0;
     }
 
+    if (useOffload()) {
+        mPositionTimeRealUs = getOutputPlayPositionUs_l();
+        return mPositionTimeRealUs;
+    }
+
     int64_t realTimeOffset = getRealTimeUsLocked() - mPositionTimeRealUs;
     if (realTimeOffset < 0) {
         realTimeOffset = 0;
@@ -568,19 +767,34 @@
 status_t AudioPlayer::seekTo(int64_t time_us) {
     Mutex::Autolock autoLock(mLock);
 
+    ALOGV("seekTo( %lld )", time_us);
+
     mSeeking = true;
     mPositionTimeRealUs = mPositionTimeMediaUs = -1;
     mReachedEOS = false;
     mSeekTimeUs = time_us;
+    mStartPosUs = time_us;
 
     // Flush resets the number of played frames
     mNumFramesPlayed = 0;
     mNumFramesPlayedSysTimeUs = ALooper::GetNowUs();
 
     if (mAudioSink != NULL) {
+        if (mPlaying) {
+            mAudioSink->pause();
+        }
         mAudioSink->flush();
+        if (mPlaying) {
+            mAudioSink->start();
+        }
     } else {
+        if (mPlaying) {
+            mAudioTrack->pause();
+        }
         mAudioTrack->flush();
+        if (mPlaying) {
+            mAudioTrack->start();
+        }
     }
 
     return OK;
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index b505518..3e70dd7 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -47,6 +47,7 @@
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/Utils.h>
 
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
@@ -65,6 +66,11 @@
 static const size_t kLowWaterMarkBytes = 40000;
 static const size_t kHighWaterMarkBytes = 200000;
 
+// maximum time in paused state when offloading audio decompression. When elapsed, the AudioPlayer
+// is destroyed to allow the audio DSP to power down.
+static int64_t kOffloadPauseMaxUs = 60000000ll;
+
+
 struct AwesomeEvent : public TimedEventQueue::Event {
     AwesomeEvent(
             AwesomePlayer *player,
@@ -194,7 +200,9 @@
       mVideoBuffer(NULL),
       mDecryptHandle(NULL),
       mLastVideoTimeUs(-1),
-      mTextDriver(NULL) {
+      mTextDriver(NULL),
+      mOffloadAudio(false),
+      mAudioTearDown(false) {
     CHECK_EQ(mClient.connect(), (status_t)OK);
 
     DataSource::RegisterDefaultSniffers();
@@ -213,6 +221,10 @@
 
     mAudioStatusEventPending = false;
 
+    mAudioTearDownEvent = new AwesomeEvent(this,
+                              &AwesomePlayer::onAudioTearDownEvent);
+    mAudioTearDownEventPending = false;
+
     reset();
 }
 
@@ -232,6 +244,11 @@
     mQueue.cancelEvent(mVideoLagEvent->eventID());
     mVideoLagEventPending = false;
 
+    if (mOffloadAudio) {
+        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
+        mAudioTearDownEventPending = false;
+    }
+
     if (!keepNotifications) {
         mQueue.cancelEvent(mStreamDoneEvent->eventID());
         mStreamDoneEventPending = false;
@@ -518,7 +535,7 @@
     mVideoTrack.clear();
     mExtractor.clear();
 
-    // Shutdown audio first, so that the respone to the reset request
+    // Shutdown audio first, so that the response to the reset request
     // appears to happen instantaneously as far as the user is concerned
     // If we did this later, audio would continue playing while we
     // shutdown the video-related resources and the player appear to
@@ -531,6 +548,7 @@
         mAudioSource->stop();
     }
     mAudioSource.clear();
+    mOmxSource.clear();
 
     mTimeSource = NULL;
 
@@ -586,7 +604,7 @@
 }
 
 void AwesomePlayer::notifyListener_l(int msg, int ext1, int ext2) {
-    if (mListener != NULL) {
+    if ((mListener != NULL) && !mAudioTearDown) {
         sp<MediaPlayerBase> listener = mListener.promote();
 
         if (listener != NULL) {
@@ -842,6 +860,13 @@
 
         pause_l(true /* at eos */);
 
+        // If audio hasn't completed MEDIA_SEEK_COMPLETE yet,
+        // notify MEDIA_SEEK_COMPLETE to observer immediately for state persistence.
+        if (mWatchForAudioSeekComplete) {
+            notifyListener_l(MEDIA_SEEK_COMPLETE);
+            mWatchForAudioSeekComplete = false;
+        }
+
         modifyFlags(AT_EOS, SET);
     }
 }
@@ -883,41 +908,42 @@
 
     if (mAudioSource != NULL) {
         if (mAudioPlayer == NULL) {
-            if (mAudioSink != NULL) {
-                bool allowDeepBuffering;
-                int64_t cachedDurationUs;
-                bool eos;
-                if (mVideoSource == NULL
-                        && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
-                        (getCachedDuration_l(&cachedDurationUs, &eos) &&
-                        cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
-                    allowDeepBuffering = true;
-                } else {
-                    allowDeepBuffering = false;
-                }
-
-                mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
-                mAudioPlayer->setSource(mAudioSource);
-
-                mTimeSource = mAudioPlayer;
-
-                // If there was a seek request before we ever started,
-                // honor the request now.
-                // Make sure to do this before starting the audio player
-                // to avoid a race condition.
-                seekAudioIfNecessary_l();
-            }
+            createAudioPlayer_l();
         }
 
         CHECK(!(mFlags & AUDIO_RUNNING));
 
         if (mVideoSource == NULL) {
+
             // We don't want to post an error notification at this point,
             // the error returned from MediaPlayer::start() will suffice.
 
             status_t err = startAudioPlayer_l(
                     false /* sendErrorNotification */);
 
+            if ((err != OK) && mOffloadAudio) {
+                ALOGI("play_l() cannot create offload output, fallback to sw decode");
+                delete mAudioPlayer;
+                mAudioPlayer = NULL;
+                // if the player was started it will take care of stopping the source when destroyed
+                if (!(mFlags & AUDIOPLAYER_STARTED)) {
+                    mAudioSource->stop();
+                }
+                modifyFlags((AUDIO_RUNNING | AUDIOPLAYER_STARTED), CLEAR);
+                mOffloadAudio = false;
+                mAudioSource = mOmxSource;
+                if (mAudioSource != NULL) {
+                    err = mAudioSource->start();
+
+                    if (err != OK) {
+                        mAudioSource.clear();
+                    } else {
+                        createAudioPlayer_l();
+                        err = startAudioPlayer_l(false);
+                    }
+                }
+            }
+
             if (err != OK) {
                 delete mAudioPlayer;
                 mAudioPlayer = NULL;
@@ -966,19 +992,58 @@
     return OK;
 }
 
+void AwesomePlayer::createAudioPlayer_l()
+{
+    uint32_t flags = 0;
+    int64_t cachedDurationUs;
+    bool eos;
+
+    if (mOffloadAudio) {
+        flags |= AudioPlayer::USE_OFFLOAD;
+    } else if (mVideoSource == NULL
+            && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
+            (getCachedDuration_l(&cachedDurationUs, &eos) &&
+            cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
+        flags |= AudioPlayer::ALLOW_DEEP_BUFFERING;
+    }
+    if (isStreamingHTTP()) {
+        flags |= AudioPlayer::IS_STREAMING;
+    }
+    if (mVideoSource != NULL) {
+        flags |= AudioPlayer::HAS_VIDEO;
+    }
+
+    mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);
+    mAudioPlayer->setSource(mAudioSource);
+
+    mTimeSource = mAudioPlayer;
+
+    // If there was a seek request before we ever started,
+    // honor the request now.
+    // Make sure to do this before starting the audio player
+    // to avoid a race condition.
+    seekAudioIfNecessary_l();
+}
+
 status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {
     CHECK(!(mFlags & AUDIO_RUNNING));
+    status_t err = OK;
 
     if (mAudioSource == NULL || mAudioPlayer == NULL) {
         return OK;
     }
 
+    if (mOffloadAudio) {
+        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
+        mAudioTearDownEventPending = false;
+    }
+
     if (!(mFlags & AUDIOPLAYER_STARTED)) {
         bool wasSeeking = mAudioPlayer->isSeeking();
 
         // We've already started the MediaSource in order to enable
         // the prefetcher to read its data.
-        status_t err = mAudioPlayer->start(
+        err = mAudioPlayer->start(
                 true /* sourceAlreadyStarted */);
 
         if (err != OK) {
@@ -998,14 +1063,16 @@
             postAudioSeekComplete();
         }
     } else {
-        mAudioPlayer->resume();
+        err = mAudioPlayer->resume();
     }
 
-    modifyFlags(AUDIO_RUNNING, SET);
+    if (err == OK) {
+        modifyFlags(AUDIO_RUNNING, SET);
 
-    mWatchForAudioEOS = true;
+        mWatchForAudioEOS = true;
+    }
 
-    return OK;
+    return err;
 }
 
 void AwesomePlayer::notifyVideoSize_l() {
@@ -1137,15 +1204,14 @@
     cancelPlayerEvents(true /* keepNotifications */);
 
     if (mAudioPlayer != NULL && (mFlags & AUDIO_RUNNING)) {
-        if (at_eos) {
-            // If we played the audio stream to completion we
-            // want to make sure that all samples remaining in the audio
-            // track's queue are played out.
-            mAudioPlayer->pause(true /* playPendingSamples */);
-        } else {
-            mAudioPlayer->pause();
+        // If we played the audio stream to completion we
+        // want to make sure that all samples remaining in the audio
+        // track's queue are played out.
+        mAudioPlayer->pause(at_eos /* playPendingSamples */);
+        // send us a reminder to tear down the AudioPlayer if paused for too long.
+        if (mOffloadAudio) {
+            postAudioTearDownEvent(kOffloadPauseMaxUs);
         }
-
         modifyFlags(AUDIO_RUNNING, CLEAR);
     }
 
@@ -1290,7 +1356,6 @@
     } else {
         *positionUs = 0;
     }
-
     return OK;
 }
 
@@ -1385,14 +1450,29 @@
 
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
+    // Check whether there is a hardware codec for this stream
+    // This doesn't guarantee that the hardware has a free stream
+    // but it avoids us attempting to open (and re-open) an offload
+    // stream to hardware that doesn't have the necessary codec
+    mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL), isStreamingHTTP());
 
     if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+        ALOGV("createAudioPlayer: bypass OMX (raw)");
         mAudioSource = mAudioTrack;
     } else {
-        mAudioSource = OMXCodec::Create(
+        // If offloading we still create a OMX decoder as a fall-back
+        // but we don't start it
+        mOmxSource = OMXCodec::Create(
                 mClient.interface(), mAudioTrack->getFormat(),
                 false, // createEncoder
                 mAudioTrack);
+
+        if (mOffloadAudio) {
+            ALOGV("createAudioPlayer: bypass OMX (offload)");
+            mAudioSource = mAudioTrack;
+        } else {
+            mAudioSource = mOmxSource;
+        }
     }
 
     if (mAudioSource != NULL) {
@@ -1408,6 +1488,7 @@
 
         if (err != OK) {
             mAudioSource.clear();
+            mOmxSource.clear();
             return err;
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
@@ -1885,6 +1966,15 @@
     mQueue.postEventWithDelay(mCheckAudioStatusEvent, delayUs);
 }
 
+void AwesomePlayer::postAudioTearDownEvent(int64_t delayUs) {
+    Mutex::Autolock autoLock(mAudioLock);
+    if (mAudioTearDownEventPending) {
+        return;
+    }
+    mAudioTearDownEventPending = true;
+    mQueue.postEventWithDelay(mAudioTearDownEvent, delayUs);
+}
+
 void AwesomePlayer::onCheckAudioStatus() {
     {
         Mutex::Autolock autoLock(mAudioLock);
@@ -2200,7 +2290,10 @@
 
 void AwesomePlayer::onPrepareAsyncEvent() {
     Mutex::Autolock autoLock(mLock);
+    beginPrepareAsync_l();
+}
 
+void AwesomePlayer::beginPrepareAsync_l() {
     if (mFlags & PREPARE_CANCELLED) {
         ALOGI("prepare was cancelled before doing anything");
         abortPrepare(UNKNOWN_ERROR);
@@ -2273,6 +2366,10 @@
     postCheckAudioStatusEvent(0);
 }
 
+void AwesomePlayer::postAudioTearDown() {
+    postAudioTearDownEvent(0);
+}
+
 status_t AwesomePlayer::setParameter(int key, const Parcel &request) {
     switch (key) {
         case KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS:
@@ -2404,6 +2501,7 @@
         mAudioSource->stop();
     }
     mAudioSource.clear();
+    mOmxSource.clear();
 
     mTimeSource = NULL;
 
@@ -2660,4 +2758,66 @@
     }
 }
 
+void AwesomePlayer::onAudioTearDownEvent() {
+
+    Mutex::Autolock autoLock(mLock);
+    if (!mAudioTearDownEventPending) {
+        return;
+    }
+    mAudioTearDownEventPending = false;
+
+    ALOGV("onAudioTearDownEvent");
+
+    // stream info is cleared by reset_l() so copy what we need
+    const bool wasPlaying = (mFlags & PLAYING);
+    KeyedVector<String8, String8> uriHeaders(mUriHeaders);
+    sp<DataSource> fileSource(mFileSource);
+
+    mStatsLock.lock();
+    String8 uri(mStats.mURI);
+    mStatsLock.unlock();
+
+    // get current position so we can start recreated stream from here
+    int64_t position = 0;
+    getPosition(&position);
+
+    // Reset and recreate
+    reset_l();
+    mFlags |= PREPARING;
+
+    status_t err;
+
+    if (fileSource != NULL) {
+        mFileSource = fileSource;
+        err = setDataSource_l(fileSource);
+    } else {
+        err = setDataSource_l(uri, &uriHeaders);
+    }
+
+    if ( err != OK ) {
+        // This will force beingPrepareAsync_l() to notify
+        // a MEDIA_ERROR to the client and abort the prepare
+        mFlags |= PREPARE_CANCELLED;
+    }
+
+    mAudioTearDown = true;
+    mIsAsyncPrepare = true;
+
+    // Call parepare for the host decoding
+    beginPrepareAsync_l();
+
+    if (mPrepareResult == OK) {
+        if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
+            seekTo_l(position);
+        }
+
+        if (wasPlaying) {
+            modifyFlags(CACHE_UNDERRUN, CLEAR);
+            play_l();
+        }
+    }
+
+    mAudioTearDown = false;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index e9789d3..4db8e80 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -26,7 +26,12 @@
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/AudioSystem.h>
+#include <media/MediaPlayerInterface.h>
+#include <hardware/audio.h>
 #include <media/stagefright/Utils.h>
+#include <media/AudioParameter.h>
 
 namespace android {
 
@@ -474,20 +479,128 @@
 status_t sendMetaDataToHal(sp<MediaPlayerBase::AudioSink>& sink,
                            const sp<MetaData>& meta)
 {
-    // stub
+    int32_t sampleRate = 0;
+    int32_t bitRate = 0;
+    int32_t channelMask = 0;
+    int32_t delaySamples = 0;
+    int32_t paddingSamples = 0;
+
+    AudioParameter param = AudioParameter();
+
+    if (meta->findInt32(kKeySampleRate, &sampleRate)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_SAMPLE_RATE), sampleRate);
+    }
+    if (meta->findInt32(kKeyChannelMask, &channelMask)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_NUM_CHANNEL), channelMask);
+    }
+    if (meta->findInt32(kKeyBitRate, &bitRate)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE), bitRate);
+    }
+    if (meta->findInt32(kKeyEncoderDelay, &delaySamples)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES), delaySamples);
+    }
+    if (meta->findInt32(kKeyEncoderPadding, &paddingSamples)) {
+        param.addInt(String8(AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES), paddingSamples);
+    }
+
+    ALOGV("sendMetaDataToHal: bitRate %d, sampleRate %d, chanMask %d,"
+          "delaySample %d, paddingSample %d", bitRate, sampleRate,
+          channelMask, delaySamples, paddingSamples);
+
+    sink->setParameters(param.toString());
     return OK;
 }
 
-status_t mapMimeToAudioFormat(audio_format_t& format, const char* mime)
+struct mime_conv_t {
+    const char* mime;
+    audio_format_t format;
+};
+
+static const struct mime_conv_t mimeLookup[] = {
+    { MEDIA_MIMETYPE_AUDIO_MPEG,        AUDIO_FORMAT_MP3 },
+    { MEDIA_MIMETYPE_AUDIO_RAW,         AUDIO_FORMAT_PCM_16_BIT },
+    { MEDIA_MIMETYPE_AUDIO_AMR_NB,      AUDIO_FORMAT_AMR_NB },
+    { MEDIA_MIMETYPE_AUDIO_AMR_WB,      AUDIO_FORMAT_AMR_WB },
+    { MEDIA_MIMETYPE_AUDIO_AAC,         AUDIO_FORMAT_AAC },
+    { MEDIA_MIMETYPE_AUDIO_VORBIS,      AUDIO_FORMAT_VORBIS },
+    { 0, AUDIO_FORMAT_INVALID }
+};
+
+status_t mapMimeToAudioFormat( audio_format_t& format, const char* mime )
 {
-    // stub
+const struct mime_conv_t* p = &mimeLookup[0];
+    while (p->mime != NULL) {
+        if (0 == strcasecmp(mime, p->mime)) {
+            format = p->format;
+            return OK;
+        }
+        ++p;
+    }
+
     return BAD_VALUE;
 }
 
 bool canOffloadStream(const sp<MetaData>& meta, bool hasVideo, bool isStreaming)
 {
-    // stub
-    return false;
+    const char *mime;
+    CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+    audio_offload_info_t info = AUDIO_INFO_INITIALIZER;
+
+    info.format = AUDIO_FORMAT_INVALID;
+    if (mapMimeToAudioFormat(info.format, mime) != OK) {
+        ALOGE(" Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format !", mime);
+        return false;
+    } else {
+        ALOGV("Mime type \"%s\" mapped to audio_format %d", mime, info.format);
+    }
+
+    if (AUDIO_FORMAT_INVALID == info.format) {
+        // can't offload if we don't know what the source format is
+        ALOGE("mime type \"%s\" not a known audio format", mime);
+        return false;
+    }
+
+    int32_t srate = -1;
+    if (!meta->findInt32(kKeySampleRate, &srate)) {
+        ALOGV("track of type '%s' does not publish sample rate", mime);
+    }
+    info.sample_rate = srate;
+
+    int32_t cmask = 0;
+    if (!meta->findInt32(kKeyChannelMask, &cmask)) {
+        ALOGV("track of type '%s' does not publish channel mask", mime);
+
+        // Try a channel count instead
+        int32_t channelCount;
+        if (!meta->findInt32(kKeyChannelCount, &channelCount)) {
+            ALOGV("track of type '%s' does not publish channel count", mime);
+        } else {
+            cmask = audio_channel_out_mask_from_count(channelCount);
+        }
+    }
+    info.channel_mask = cmask;
+
+    int64_t duration = 0;
+    if (!meta->findInt64(kKeyDuration, &duration)) {
+        ALOGV("track of type '%s' does not publish duration", mime);
+    }
+    info.duration_us = duration;
+
+    int32_t brate = -1;
+    if (!meta->findInt32(kKeyBitRate, &brate)) {
+        ALOGV("track of type '%s' does not publish bitrate", mime);
+     }
+    info.bit_rate = brate;
+
+
+    info.stream_type = AUDIO_STREAM_MUSIC;
+    info.has_video = hasVideo;
+    info.is_streaming = isStreaming;
+
+    // Check if offload is possible for given format, stream type, sample rate,
+    // bit rate, duration, video and streaming
+    return AudioSystem::isOffloadSupported(info);
 }
 
 }  // namespace android
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 0d17d65..d3c74e2 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -226,7 +226,7 @@
     void postStreamDoneEvent_l(status_t status);
     void postCheckAudioStatusEvent(int64_t delayUs);
     void postVideoLagEvent_l();
-    void postAudioTearDownEvent();
+    void postAudioTearDownEvent(int64_t delayUs);
 
     status_t play_l();
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 99e077c..712772b 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1423,13 +1423,15 @@
     audio_stream_out_t *outStream = NULL;
     AudioHwDevice *outHwDev;
 
-    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
+    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (pDevices != NULL) ? *pDevices : 0,
               config.sample_rate,
               config.format,
               config.channel_mask,
               flags);
+    ALOGV("openOutput(), offloadInfo %p version 0x%04x",
+          offloadInfo, offloadInfo == NULL ? -1 : offloadInfo->version );
 
     if (pDevices == NULL || *pDevices == 0) {
         return 0;
@@ -1454,7 +1456,7 @@
                                           &outStream);
 
     mHardwareStatus = AUDIO_HW_IDLE;
-    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, "
+    ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %#08x, "
             "Channels %x, status %d",
             outStream,
             config.sample_rate,
@@ -1463,9 +1465,12 @@
             status);
 
     if (status == NO_ERROR && outStream != NULL) {
-        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
+        AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
 
-        if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
+        if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+            thread = new OffloadThread(this, output, id, *pDevices);
+            ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
+        } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
             (config.format != AUDIO_FORMAT_PCM_16_BIT) ||
             (config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
             thread = new DirectOutputThread(this, output, id, *pDevices);
@@ -1555,11 +1560,28 @@
                     DuplicatingThread *dupThread =
                             (DuplicatingThread *)mPlaybackThreads.valueAt(i).get();
                     dupThread->removeOutputTrack((MixerThread *)thread.get());
+
+                }
+            }
+        }
+
+
+        mPlaybackThreads.removeItem(output);
+        // save all effects to the default thread
+        if (mPlaybackThreads.size()) {
+            PlaybackThread *dstThread = checkPlaybackThread_l(mPlaybackThreads.keyAt(0));
+            if (dstThread != NULL) {
+                // audioflinger lock is held here so the acquisition order of thread locks does not
+                // matter
+                Mutex::Autolock _dl(dstThread->mLock);
+                Mutex::Autolock _sl(thread->mLock);
+                Vector< sp<EffectChain> > effectChains = thread->getEffectChains_l();
+                for (size_t i = 0; i < effectChains.size(); i ++) {
+                    moveEffectChain_l(effectChains[i]->sessionId(), thread.get(), dstThread, true);
                 }
             }
         }
         audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
-        mPlaybackThreads.removeItem(output);
     }
     thread->exit();
     // The thread entity (active unit of execution) is no longer running here,
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index f31619b..262d194 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -365,7 +365,9 @@
     class PlaybackThread;
     class MixerThread;
     class DirectOutputThread;
+    class OffloadThread;
     class DuplicatingThread;
+    class AsyncCallbackThread;
     class Track;
     class RecordTrack;
     class EffectModule;
@@ -432,6 +434,7 @@
         void                stop_nonvirtual();
     };
 
+
               PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
               MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
               RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
@@ -498,11 +501,12 @@
     struct AudioStreamOut {
         AudioHwDevice* const audioHwDev;
         audio_stream_out_t* const stream;
+        audio_output_flags_t flags;
 
         audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
 
-        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out) :
-            audioHwDev(dev), stream(out) {}
+        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out, audio_output_flags_t flags) :
+            audioHwDev(dev), stream(out), flags(flags) {}
     };
 
     struct AudioStreamIn {
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index fa1e405..900b411 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -70,10 +70,11 @@
     Mutex::Autolock _l(mLock);
 
     // start tone playback thread
-    mTonePlaybackThread = new AudioCommandThread(String8(""));
+    mTonePlaybackThread = new AudioCommandThread(String8("ApmTone"), this);
     // start audio commands thread
-    mAudioCommandThread = new AudioCommandThread(String8("ApmCommand"));
-
+    mAudioCommandThread = new AudioCommandThread(String8("ApmAudio"), this);
+    // start output activity command thread
+    mOutputCommandThread = new AudioCommandThread(String8("ApmOutput"), this);
     /* instantiate the audio policy manager */
     rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);
     if (rc)
@@ -256,6 +257,15 @@
         return NO_INIT;
     }
     ALOGV("stopOutput()");
+    mOutputCommandThread->stopOutputCommand(output, stream, session);
+    return NO_ERROR;
+}
+
+status_t  AudioPolicyService::doStopOutput(audio_io_handle_t output,
+                                      audio_stream_type_t stream,
+                                      int session)
+{
+    ALOGV("doStopOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
     return mpAudioPolicy->stop_output(mpAudioPolicy, output, stream, session);
 }
@@ -266,6 +276,12 @@
         return;
     }
     ALOGV("releaseOutput()");
+    mOutputCommandThread->releaseOutputCommand(output);
+}
+
+void AudioPolicyService::doReleaseOutput(audio_io_handle_t output)
+{
+    ALOGV("doReleaseOutput from tid %d", gettid());
     Mutex::Autolock _l(mLock);
     mpAudioPolicy->release_output(mpAudioPolicy, output);
 }
@@ -641,8 +657,9 @@
 
 // -----------  AudioPolicyService::AudioCommandThread implementation ----------
 
-AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name)
-    : Thread(false), mName(name)
+AudioPolicyService::AudioCommandThread::AudioCommandThread(String8 name,
+                                                           const wp<AudioPolicyService>& service)
+    : Thread(false), mName(name), mService(service)
 {
     mpToneGenerator = NULL;
 }
@@ -650,7 +667,7 @@
 
 AudioPolicyService::AudioCommandThread::~AudioCommandThread()
 {
-    if (mName != "" && !mAudioCommands.isEmpty()) {
+    if (!mAudioCommands.isEmpty()) {
         release_wake_lock(mName.string());
     }
     mAudioCommands.clear();
@@ -659,11 +676,7 @@
 
 void AudioPolicyService::AudioCommandThread::onFirstRef()
 {
-    if (mName != "") {
-        run(mName.string(), ANDROID_PRIORITY_AUDIO);
-    } else {
-        run("AudioCommand", ANDROID_PRIORITY_AUDIO);
-    }
+    run(mName.string(), ANDROID_PRIORITY_AUDIO);
 }
 
 bool AudioPolicyService::AudioCommandThread::threadLoop()
@@ -738,6 +751,32 @@
                     }
                     delete data;
                     }break;
+                case STOP_OUTPUT: {
+                    StopOutputData *data = (StopOutputData *)command->mParam;
+                    ALOGV("AudioCommandThread() processing stop output %d",
+                            data->mIO);
+                    sp<AudioPolicyService> svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doStopOutput(data->mIO, data->mStream, data->mSession);
+                    mLock.lock();
+                    delete data;
+                    }break;
+                case RELEASE_OUTPUT: {
+                    ReleaseOutputData *data = (ReleaseOutputData *)command->mParam;
+                    ALOGV("AudioCommandThread() processing release output %d",
+                            data->mIO);
+                    sp<AudioPolicyService> svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doReleaseOutput(data->mIO);
+                    mLock.lock();
+                    delete data;
+                    }break;
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
@@ -749,7 +788,7 @@
             }
         }
         // release delayed commands wake lock
-        if (mName != "" && mAudioCommands.isEmpty()) {
+        if (mAudioCommands.isEmpty()) {
             release_wake_lock(mName.string());
         }
         ALOGV("AudioCommandThread() going to sleep");
@@ -893,17 +932,46 @@
     return status;
 }
 
+void AudioPolicyService::AudioCommandThread::stopOutputCommand(audio_io_handle_t output,
+                                                               audio_stream_type_t stream,
+                                                               int session)
+{
+    AudioCommand *command = new AudioCommand();
+    command->mCommand = STOP_OUTPUT;
+    StopOutputData *data = new StopOutputData();
+    data->mIO = output;
+    data->mStream = stream;
+    data->mSession = session;
+    command->mParam = (void *)data;
+    Mutex::Autolock _l(mLock);
+    insertCommand_l(command);
+    ALOGV("AudioCommandThread() adding stop output %d", output);
+    mWaitWorkCV.signal();
+}
+
+void AudioPolicyService::AudioCommandThread::releaseOutputCommand(audio_io_handle_t output)
+{
+    AudioCommand *command = new AudioCommand();
+    command->mCommand = RELEASE_OUTPUT;
+    ReleaseOutputData *data = new ReleaseOutputData();
+    data->mIO = output;
+    command->mParam = (void *)data;
+    Mutex::Autolock _l(mLock);
+    insertCommand_l(command);
+    ALOGV("AudioCommandThread() adding release output %d", output);
+    mWaitWorkCV.signal();
+}
+
 // insertCommand_l() must be called with mLock held
 void AudioPolicyService::AudioCommandThread::insertCommand_l(AudioCommand *command, int delayMs)
 {
     ssize_t i;  // not size_t because i will count down to -1
     Vector <AudioCommand *> removedCommands;
-
     nsecs_t time = 0;
     command->mTime = systemTime() + milliseconds(delayMs);
 
     // acquire wake lock to make sure delayed commands are processed
-    if (mName != "" && mAudioCommands.isEmpty()) {
+    if (mAudioCommands.isEmpty()) {
         acquire_wake_lock(PARTIAL_WAKE_LOCK, mName.string());
     }
 
@@ -1060,7 +1128,17 @@
 
 bool AudioPolicyService::isOffloadSupported(const audio_offload_info_t& info)
 {
-    return false;   // stub function
+    if (mpAudioPolicy == NULL) {
+        ALOGV("mpAudioPolicy == NULL");
+        return false;
+    }
+
+    if (mpAudioPolicy->is_offload_supported == NULL) {
+        ALOGV("HAL does not implement is_offload_supported");
+        return false;
+    }
+
+    return mpAudioPolicy->is_offload_supported(mpAudioPolicy, &info);
 }
 
 // ----------------------------------------------------------------------------
@@ -1404,7 +1482,7 @@
         return 0;
     }
     return af->openOutput(module, pDevices, pSamplingRate, pFormat, pChannelMask,
-                          pLatencyMs, flags);
+                          pLatencyMs, flags, offloadInfo);
 }
 
 static audio_io_handle_t aps_open_dup_output(void *service,
diff --git a/services/audioflinger/AudioPolicyService.h b/services/audioflinger/AudioPolicyService.h
index e723c47..ae053a9 100644
--- a/services/audioflinger/AudioPolicyService.h
+++ b/services/audioflinger/AudioPolicyService.h
@@ -139,6 +139,11 @@
     virtual status_t setVoiceVolume(float volume, int delayMs = 0);
     virtual bool isOffloadSupported(const audio_offload_info_t &config);
 
+            status_t doStopOutput(audio_io_handle_t output,
+                                  audio_stream_type_t stream,
+                                  int session = 0);
+            void doReleaseOutput(audio_io_handle_t output);
+
 private:
                         AudioPolicyService() ANDROID_API;
     virtual             ~AudioPolicyService();
@@ -161,10 +166,12 @@
             STOP_TONE,
             SET_VOLUME,
             SET_PARAMETERS,
-            SET_VOICE_VOLUME
+            SET_VOICE_VOLUME,
+            STOP_OUTPUT,
+            RELEASE_OUTPUT
         };
 
-        AudioCommandThread (String8 name);
+        AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
         virtual             ~AudioCommandThread();
 
                     status_t    dump(int fd);
@@ -182,6 +189,11 @@
                     status_t    parametersCommand(audio_io_handle_t ioHandle,
                                             const char *keyValuePairs, int delayMs = 0);
                     status_t    voiceVolumeCommand(float volume, int delayMs = 0);
+                    void        stopOutputCommand(audio_io_handle_t output,
+                                                  audio_stream_type_t stream,
+                                                  int session);
+                    void        releaseOutputCommand(audio_io_handle_t output);
+
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
 
     private:
@@ -226,12 +238,25 @@
             float mVolume;
         };
 
+        class StopOutputData {
+        public:
+            audio_io_handle_t mIO;
+            audio_stream_type_t mStream;
+            int mSession;
+        };
+
+        class ReleaseOutputData {
+        public:
+            audio_io_handle_t mIO;
+        };
+
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector <AudioCommand *> mAudioCommands; // list of pending commands
         ToneGenerator *mpToneGenerator;     // the tone generator
         AudioCommand mLastCommand;          // last processed command (used by dump)
         String8 mName;                      // string used by wake lock fo delayed commands
+        wp<AudioPolicyService> mService;
     };
 
     class EffectDesc {
@@ -316,6 +341,7 @@
                             // device connection state  or routing
     sp<AudioCommandThread> mAudioCommandThread;     // audio commands thread
     sp<AudioCommandThread> mTonePlaybackThread;     // tone playback thread
+    sp<AudioCommandThread> mOutputCommandThread;    // process stop and release output
     struct audio_policy_device *mpAudioPolicyDev;
     struct audio_policy *mpAudioPolicy;
     KeyedVector< audio_source_t, InputSourceDesc* > mInputSources;
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 1c7a64b..d5a21a7 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -95,16 +95,7 @@
 {
     ALOGV("Destructor %p", this);
     if (mEffectInterface != NULL) {
-        if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
-                (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
-            sp<ThreadBase> thread = mThread.promote();
-            if (thread != 0) {
-                audio_stream_t *stream = thread->stream();
-                if (stream != NULL) {
-                    stream->remove_audio_effect(stream, mEffectInterface);
-                }
-            }
-        }
+        remove_effect_from_hal_l();
         // release effect engine
         EffectRelease(mEffectInterface);
     }
@@ -488,7 +479,7 @@
     if (mStatus != NO_ERROR) {
         return mStatus;
     }
-    status_t cmdStatus;
+    status_t cmdStatus = NO_ERROR;
     uint32_t size = sizeof(status_t);
     status_t status = (*mEffectInterface)->command(mEffectInterface,
                                                    EFFECT_CMD_DISABLE,
@@ -496,12 +487,19 @@
                                                    NULL,
                                                    &size,
                                                    &cmdStatus);
-    if (status == 0) {
+    if (status == NO_ERROR) {
         status = cmdStatus;
     }
-    if (status == 0 &&
-            ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
-             (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC)) {
+    if (status == NO_ERROR) {
+        status = remove_effect_from_hal_l();
+    }
+    return status;
+}
+
+status_t AudioFlinger::EffectModule::remove_effect_from_hal_l()
+{
+    if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC ||
+             (mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
             audio_stream_t *stream = thread->stream();
@@ -510,7 +508,7 @@
             }
         }
     }
-    return status;
+    return NO_ERROR;
 }
 
 status_t AudioFlinger::EffectModule::command(uint32_t cmdCode,
@@ -595,6 +593,17 @@
                 h->setEnabled(enabled);
             }
         }
+//EL_FIXME not sure why this is needed?
+//        sp<ThreadBase> thread = mThread.promote();
+//        if (thread == 0) {
+//            return NO_ERROR;
+//        }
+//
+//        if ((thread->type() == ThreadBase::OFFLOAD) && (enabled)) {
+//            PlaybackThread *p = (PlaybackThread *)thread.get();
+//            ALOGV("setEnabled: Offload, invalidate tracks");
+//            p->invalidateTracks(AUDIO_STREAM_MUSIC);
+//        }
     }
     return NO_ERROR;
 }
@@ -1218,9 +1227,7 @@
 // Must be called with EffectChain::mLock locked
 void AudioFlinger::EffectChain::clearInputBuffer_l(sp<ThreadBase> thread)
 {
-    size_t numSamples = thread->frameCount() * thread->channelCount();
-    memset(mInBuffer, 0, numSamples * sizeof(int16_t));
-
+    memset(mInBuffer, 0, thread->frameCount() * thread->frameSize());
 }
 
 // Must be called with EffectChain::mLock locked
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 91303ee..0b7fb83 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -126,6 +126,7 @@
 
     status_t start_l();
     status_t stop_l();
+    status_t remove_effect_from_hal_l();
 
 mutable Mutex               mLock;      // mutex for process, commands and handles list protection
     wp<ThreadBase>      mThread;    // parent thread
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 5350e2c..ad9f4f2 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -45,6 +45,8 @@
 #define MIN_WARMUP_CYCLES          2    // minimum number of loop cycles to wait for warmup
 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
 
+#define FCC_2                       2   // fixed channel count assumption
+
 namespace android {
 
 // Fast mixer thread
@@ -225,7 +227,7 @@
                 } else {
                     format = outputSink->format();
                     sampleRate = Format_sampleRate(format);
-                    ALOG_ASSERT(Format_channelCount(format) == 2);
+                    ALOG_ASSERT(Format_channelCount(format) == FCC_2);
                 }
                 dumpState->mSampleRate = sampleRate;
             }
@@ -241,7 +243,7 @@
                     //       implementation; it would be better to have normal mixer allocate for us
                     //       to avoid blocking here and to prevent possible priority inversion
                     mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
-                    mixBuffer = new short[frameCount * 2];
+                    mixBuffer = new short[frameCount * FCC_2];
                     periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
                     underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
                     overrunNs = (frameCount * 500000000LL) / sampleRate;    // 0.50
@@ -438,7 +440,7 @@
         //bool didFullWrite = false;    // dumpsys could display a count of partial writes
         if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) {
             if (mixBufferState == UNDEFINED) {
-                memset(mixBuffer, 0, frameCount * 2 * sizeof(short));
+                memset(mixBuffer, 0, frameCount * FCC_2 * sizeof(short));
                 mixBufferState = ZEROED;
             }
             if (teeSink != NULL) {
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index b1286d3..8b7433c 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -51,6 +51,8 @@
             audio_stream_type_t streamType() const {
                 return mStreamType;
             }
+            bool        isOffloaded() const { return (mFlags & IAudioFlinger::TRACK_OFFLOAD) != 0; }
+            status_t    setParameters(const String8& keyValuePairs);
             status_t    attachAuxEffect(int EffectId);
             void        setAuxBuffer(int EffectId, int32_t *buffer);
             int32_t     *auxBuffer() const { return mAuxBuffer; }
@@ -68,6 +70,7 @@
     friend class PlaybackThread;
     friend class MixerThread;
     friend class DirectOutputThread;
+    friend class OffloadThread;
 
                         Track(const Track&);
                         Track& operator = (const Track&);
@@ -142,6 +145,7 @@
                                         // barrier, but is read/written atomically
     bool                mIsInvalid; // non-resettable latch, set by invalidate()
     AudioTrackServerProxy*  mAudioTrackServerProxy;
+    bool                mResumeToStopping; // track was paused in stopping state.
 };  // end of Track
 
 class TimedTrack : public Track {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e2e023a..07f815e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -932,13 +932,18 @@
                                              audio_devices_t device,
                                              type_t type)
     :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
-        mMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
+        mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
         // mStreamTypes[] initialized in constructor body
         mOutput(output),
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
         mMixerStatus(MIXER_IDLE),
         mMixerStatusIgnoringFastTracks(MIXER_IDLE),
         standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
+        mBytesRemaining(0),
+        mCurrentWriteLength(0),
+        mUseAsyncWrite(false),
+        mWriteBlocked(false),
+        mDraining(false),
         mScreenState(AudioFlinger::mScreenState),
         // index 0 is reserved for normal mixer's submix
         mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1)
@@ -981,7 +986,7 @@
 AudioFlinger::PlaybackThread::~PlaybackThread()
 {
     mAudioFlinger->unregisterWriter(mNBLogWriter);
-    delete [] mMixBuffer;
+    delete [] mAllocMixBuffer;
 }
 
 void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
@@ -1187,7 +1192,22 @@
                 goto Exit;
             }
         }
+    } else if (mType == OFFLOAD) {
+        if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
+            ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
+                    "for output %p with format %d",
+                    sampleRate, format, channelMask, mOutput, mFormat);
+            lStatus = BAD_VALUE;
+            goto Exit;
+        }
     } else {
+        if ((format & AUDIO_FORMAT_MAIN_MASK) != AUDIO_FORMAT_PCM) {
+                ALOGE("createTrack_l() Bad parameter: format %d \""
+                        "for output %p with format %d",
+                        format, mOutput, mFormat);
+                lStatus = BAD_VALUE;
+                goto Exit;
+        }
         // Resampler implementation limits input sampling rate to 2 x output sampling rate.
         if (sampleRate > mSampleRate*2) {
             ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
@@ -1233,6 +1253,7 @@
             lStatus = NO_MEMORY;
             goto Exit;
         }
+
         mTracks.add(track);
 
         sp<EffectChain> chain = getEffectChain_l(sessionId);
@@ -1307,12 +1328,14 @@
 {
     Mutex::Autolock _l(mLock);
     mStreamTypes[stream].volume = value;
+    signal_l();
 }
 
 void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
 {
     Mutex::Autolock _l(mLock);
     mStreamTypes[stream].mute = muted;
+    signal_l();
 }
 
 float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
@@ -1332,6 +1355,30 @@
         // the track is newly added, make sure it fills up all its
         // buffers before playing. This is to ensure the client will
         // effectively get the latency it requested.
+        if (!track->isOutputTrack()) {
+            TrackBase::track_state state = track->mState;
+            mLock.unlock();
+            status = AudioSystem::startOutput(mId, track->streamType(), track->sessionId());
+            mLock.lock();
+            // abort track was stopped/paused while we released the lock
+            if (state != track->mState) {
+                if (status == NO_ERROR) {
+                    mLock.unlock();
+                    AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
+                    mLock.lock();
+                }
+                return INVALID_OPERATION;
+            }
+            // abort if start is rejected by audio policy manager
+            if (status != NO_ERROR) {
+                return PERMISSION_DENIED;
+            }
+#ifdef ADD_BATTERY_DATA
+            // to track the speaker usage
+            addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
+#endif
+        }
+
         track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
         track->mResetDone = false;
         track->mPresentationCompleteFrames = 0;
@@ -1352,14 +1399,19 @@
     return status;
 }
 
-// destroyTrack_l() must be called with ThreadBase::mLock held
-void AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
+bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
 {
-    track->mState = TrackBase::TERMINATED;
+    track->terminate();
     // active tracks are removed by threadLoop()
-    if (mActiveTracks.indexOf(track) < 0) {
+    bool trackActive = (mActiveTracks.indexOf(track) >= 0);
+    track->mState = TrackBase::STOPPED;
+    if (!trackActive) {
         removeTrack_l(track);
+    } else if (track->isFastTrack() || track->isOffloaded()) {
+        track->mState = TrackBase::STOPPING_1;
     }
+
+    return trackActive;
 }
 
 void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
@@ -1383,6 +1435,16 @@
     }
 }
 
+void AudioFlinger::PlaybackThread::signal_l()
+{
+    // Thread could be blocked waiting for async
+    // so signal it to handle state changes immediately
+    // If threadLoop is currently unlocked a signal of mWaitWorkCV will
+    // be lost so we also flag to prevent it blocking on mWaitWorkCV
+    mSignalPending = true;
+    mWaitWorkCV.signal();
+}
+
 String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
 {
     String8 out_s8 = String8("");
@@ -1428,12 +1490,78 @@
     mAudioFlinger->audioConfigChanged_l(event, mId, param2);
 }
 
+void AudioFlinger::PlaybackThread::writeCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setWriteBlocked(false);
+}
+
+void AudioFlinger::PlaybackThread::drainCallback()
+{
+    ALOG_ASSERT(mCallbackThread != 0);
+    mCallbackThread->setDraining(false);
+}
+
+void AudioFlinger::PlaybackThread::setWriteBlocked(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mWriteBlocked = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+void AudioFlinger::PlaybackThread::setDraining(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mDraining = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+// static
+int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event,
+                                                void *param,
+                                                void *cookie)
+{
+    AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie;
+    ALOGV("asyncCallback() event %d", event);
+    switch (event) {
+    case STREAM_CBK_EVENT_WRITE_READY:
+        me->writeCallback();
+        break;
+    case STREAM_CBK_EVENT_DRAIN_READY:
+        me->drainCallback();
+        break;
+    default:
+        ALOGW("asyncCallback() unknown event %d", event);
+        break;
+    }
+    return 0;
+}
+
 void AudioFlinger::PlaybackThread::readOutputParameters()
 {
+    // unfortunately we have no way of recovering from errors here, hence the LOG_FATAL
     mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
     mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
+    if (!audio_is_output_channel(mChannelMask)) {
+        LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
+    }
+    if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
+        LOG_FATAL("HAL channel mask %#x not supported for mixed output; "
+                "must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
+    }
     mChannelCount = (uint16_t)popcount(mChannelMask);
     mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
+    if (!audio_is_valid_format(mFormat)) {
+        LOG_FATAL("HAL format %d not valid for output", mFormat);
+    }
+    if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) {
+        LOG_FATAL("HAL format %d not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT",
+                mFormat);
+    }
     mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
     mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
     if (mFrameCount & 15) {
@@ -1441,6 +1569,14 @@
                 mFrameCount);
     }
 
+    if ((mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) &&
+            (mOutput->stream->set_callback != NULL)) {
+        if (mOutput->stream->set_callback(mOutput->stream,
+                                      AudioFlinger::PlaybackThread::asyncCallback, this) == 0) {
+            mUseAsyncWrite = true;
+        }
+    }
+
     // Calculate size of normal mix buffer relative to the HAL output buffer size
     double multiplier = 1.0;
     if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
@@ -1483,9 +1619,11 @@
     ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
-    delete[] mMixBuffer;
-    mMixBuffer = new int16_t[mNormalFrameCount * mChannelCount];
-    memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
+    delete[] mAllocMixBuffer;
+    size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize;
+    mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1];
+    mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align);
+    memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize);
 
     // force reconfiguration of effect chains and engines to take new buffer size and audio
     // parameters into account
@@ -1622,13 +1760,18 @@
     if (CC_UNLIKELY(count)) {
         for (size_t i = 0 ; i < count ; i++) {
             const sp<Track>& track = tracksToRemove.itemAt(i);
-            if ((track->sharedBuffer() != 0) &&
-                    (track->mState == TrackBase::ACTIVE || track->mState == TrackBase::RESUMING)) {
+            if (!track->isOutputTrack()) {
                 AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
+#ifdef ADD_BATTERY_DATA
+                // to track the speaker usage
+                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
+#endif
+                if (track->isTerminated()) {
+                    AudioSystem::releaseOutput(mId);
+                }
             }
         }
     }
-
 }
 
 void AudioFlinger::PlaybackThread::checkSilentMode_l()
@@ -1649,17 +1792,18 @@
 }
 
 // shared by MIXER and DIRECT, overridden by DUPLICATING
-void AudioFlinger::PlaybackThread::threadLoop_write()
+ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
 {
     // FIXME rewrite to reduce number of system calls
     mLastWriteTime = systemTime();
     mInWrite = true;
-    int bytesWritten;
+    ssize_t bytesWritten;
 
     // If an NBAIO sink is present, use it to write the normal mixer's submix
     if (mNormalSink != 0) {
 #define mBitShift 2 // FIXME
-        size_t count = mixBufferSize >> mBitShift;
+        size_t count = mBytesRemaining >> mBitShift;
+        size_t offset = (mCurrentWriteLength - mBytesRemaining) >> 1;
         ATRACE_BEGIN("write");
         // update the setpoint when AudioFlinger::mScreenState changes
         uint32_t screenState = AudioFlinger::mScreenState;
@@ -1671,7 +1815,7 @@
                         (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
             }
         }
-        ssize_t framesWritten = mNormalSink->write(mMixBuffer, count);
+        ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count);
         ATRACE_END();
         if (framesWritten > 0) {
             bytesWritten = framesWritten << mBitShift;
@@ -1680,15 +1824,48 @@
         }
     // otherwise use the HAL / AudioStreamOut directly
     } else {
-        // Direct output thread.
-        bytesWritten = (int)mOutput->stream->write(mOutput->stream, mMixBuffer, mixBufferSize);
+        // Direct output and offload threads
+        size_t offset = (mCurrentWriteLength - mBytesRemaining) / sizeof(int16_t);
+        if (mUseAsyncWrite) {
+            mWriteBlocked = true;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setWriteBlocked(true);
+        }
+        bytesWritten = mOutput->stream->write(mOutput->stream,
+                                                   mMixBuffer + offset, mBytesRemaining);
+        if (mUseAsyncWrite &&
+                ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
+            // do not wait for async callback in case of error of full write
+            mWriteBlocked = false;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setWriteBlocked(false);
+        }
     }
 
-    if (bytesWritten > 0) {
-        mBytesWritten += mixBufferSize;
-    }
     mNumWrites++;
     mInWrite = false;
+
+    return bytesWritten;
+}
+
+void AudioFlinger::PlaybackThread::threadLoop_drain()
+{
+    if (mOutput->stream->drain) {
+        ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
+        if (mUseAsyncWrite) {
+            mDraining = true;
+            ALOG_ASSERT(mCallbackThread != 0);
+            mCallbackThread->setDraining(true);
+        }
+        mOutput->stream->drain(mOutput->stream,
+            (mMixerStatus == MIXER_DRAIN_TRACK) ? AUDIO_DRAIN_EARLY_NOTIFY
+                                                : AUDIO_DRAIN_ALL);
+    }
+}
+
+void AudioFlinger::PlaybackThread::threadLoop_exit()
+{
+    // Default implementation has nothing to do
 }
 
 /*
@@ -1929,10 +2106,29 @@
 
             saveOutputTracks();
 
-            // put audio hardware into standby after short delay
-            if (CC_UNLIKELY((!mActiveTracks.size() && systemTime() > standbyTime) ||
-                        isSuspended())) {
-                if (!mStandby) {
+            if (mSignalPending) {
+                // A signal was raised while we were unlocked
+                mSignalPending = false;
+            } else if (waitingAsyncCallback_l()) {
+                if (exitPending()) {
+                    break;
+                }
+                releaseWakeLock_l();
+                ALOGV("wait async completion");
+                mWaitWorkCV.wait(mLock);
+                ALOGV("async completion/wake");
+                acquireWakeLock_l();
+                if (exitPending()) {
+                    break;
+                }
+                if (!mActiveTracks.size() && (systemTime() > standbyTime)) {
+                    continue;
+                }
+                sleepTime = 0;
+            } else if ((!mActiveTracks.size() && systemTime() > standbyTime) ||
+                                   isSuspended()) {
+                // put audio hardware into standby after short delay
+                if (shouldStandby_l()) {
 
                     threadLoop_standby();
 
@@ -1959,7 +2155,7 @@
                     mMixerStatus = MIXER_IDLE;
                     mMixerStatusIgnoringFastTracks = MIXER_IDLE;
                     mBytesWritten = 0;
-
+                    mBytesRemaining = 0;
                     checkSilentMode_l();
 
                     standbyTime = systemTime() + standbyDelay;
@@ -1981,50 +2177,73 @@
             lockEffectChains_l(effectChains);
         }
 
-        if (CC_LIKELY(mMixerStatus == MIXER_TRACKS_READY)) {
-            threadLoop_mix();
-        } else {
-            threadLoop_sleepTime();
-        }
+        if (mBytesRemaining == 0) {
+            mCurrentWriteLength = 0;
+            if (mMixerStatus == MIXER_TRACKS_READY) {
+                // threadLoop_mix() sets mCurrentWriteLength
+                threadLoop_mix();
+            } else if ((mMixerStatus != MIXER_DRAIN_TRACK)
+                        && (mMixerStatus != MIXER_DRAIN_ALL)) {
+                // threadLoop_sleepTime sets sleepTime to 0 if data
+                // must be written to HAL
+                threadLoop_sleepTime();
+                if (sleepTime == 0) {
+                    mCurrentWriteLength = mixBufferSize;
+                }
+            }
+            mBytesRemaining = mCurrentWriteLength;
+            if (isSuspended()) {
+                sleepTime = suspendSleepTimeUs();
+                // simulate write to HAL when suspended
+                mBytesWritten += mixBufferSize;
+                mBytesRemaining = 0;
+            }
 
-        if (isSuspended()) {
-            sleepTime = suspendSleepTimeUs();
-            mBytesWritten += mixBufferSize;
-        }
-
-        // only process effects if we're going to write
-        if (sleepTime == 0) {
-            for (size_t i = 0; i < effectChains.size(); i ++) {
-                effectChains[i]->process_l();
+            // only process effects if we're going to write
+            if (sleepTime == 0) {
+                for (size_t i = 0; i < effectChains.size(); i ++) {
+                    effectChains[i]->process_l();
+                }
             }
         }
 
         // enable changes in effect chain
         unlockEffectChains(effectChains);
 
-        // sleepTime == 0 means we must write to audio hardware
-        if (sleepTime == 0) {
-
-            threadLoop_write();
-
-if (mType == MIXER) {
-            // write blocked detection
-            nsecs_t now = systemTime();
-            nsecs_t delta = now - mLastWriteTime;
-            if (!mStandby && delta > maxPeriod) {
-                mNumDelayedWrites++;
-                if ((now - lastWarning) > kWarningThrottleNs) {
-                    ATRACE_NAME("underrun");
-                    ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
-                            ns2ms(delta), mNumDelayedWrites, this);
-                    lastWarning = now;
+        if (!waitingAsyncCallback()) {
+            // sleepTime == 0 means we must write to audio hardware
+            if (sleepTime == 0) {
+                if (mBytesRemaining) {
+                    ssize_t ret = threadLoop_write();
+                    if (ret < 0) {
+                        mBytesRemaining = 0;
+                    } else {
+                        mBytesWritten += ret;
+                        mBytesRemaining -= ret;
+                    }
+                } else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
+                        (mMixerStatus == MIXER_DRAIN_ALL)) {
+                    threadLoop_drain();
                 }
-            }
+if (mType == MIXER) {
+                // write blocked detection
+                nsecs_t now = systemTime();
+                nsecs_t delta = now - mLastWriteTime;
+                if (!mStandby && delta > maxPeriod) {
+                    mNumDelayedWrites++;
+                    if ((now - lastWarning) > kWarningThrottleNs) {
+                        ATRACE_NAME("underrun");
+                        ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
+                                ns2ms(delta), mNumDelayedWrites, this);
+                        lastWarning = now;
+                    }
+                }
 }
 
-            mStandby = false;
-        } else {
-            usleep(sleepTime);
+                mStandby = false;
+            } else {
+                usleep(sleepTime);
+            }
         }
 
         // Finally let go of removed track(s), without the lock held
@@ -2046,8 +2265,10 @@
         // is now local to this block, but will keep it for now (at least until merge done).
     }
 
+    threadLoop_exit();
+
     // for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
-    if (mType == MIXER || mType == DIRECT) {
+    if (mType == MIXER || mType == DIRECT || mType == OFFLOAD) {
         // put output stream into standby mode
         if (!mStandby) {
             mOutput->stream->common.standby(&mOutput->stream->common);
@@ -2060,6 +2281,28 @@
     return false;
 }
 
+// removeTracks_l() must be called with ThreadBase::mLock held
+void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
+{
+    size_t count = tracksToRemove.size();
+    if (CC_UNLIKELY(count)) {
+        for (size_t i=0 ; i<count ; i++) {
+            const sp<Track>& track = tracksToRemove.itemAt(i);
+            mActiveTracks.remove(track);
+            ALOGV("removeTracks_l removing track on session %d", track->sessionId());
+            sp<EffectChain> chain = getEffectChain_l(track->sessionId());
+            if (chain != 0) {
+                ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
+                        track->sessionId());
+                chain->decActiveTrackCnt();
+            }
+            if (track->isTerminated()) {
+                removeTrack_l(track);
+            }
+        }
+    }
+
+}
 
 // ----------------------------------------------------------------------------
 
@@ -2264,7 +2507,7 @@
     PlaybackThread::threadLoop_removeTracks(tracksToRemove);
 }
 
-void AudioFlinger::MixerThread::threadLoop_write()
+ssize_t AudioFlinger::MixerThread::threadLoop_write()
 {
     // FIXME we should only do one push per cycle; confirm this is true
     // Start the fast mixer if it's not already running
@@ -2296,7 +2539,7 @@
             sq->end(false /*didModify*/);
         }
     }
-    PlaybackThread::threadLoop_write();
+    return PlaybackThread::threadLoop_write();
 }
 
 void AudioFlinger::MixerThread::threadLoop_standby()
@@ -2328,11 +2571,40 @@
     PlaybackThread::threadLoop_standby();
 }
 
+// Empty implementation for standard mixer
+// Overridden for offloaded playback
+void AudioFlinger::PlaybackThread::flushOutput_l()
+{
+}
+
+bool AudioFlinger::PlaybackThread::waitingAsyncCallback_l()
+{
+    return false;
+}
+
+bool AudioFlinger::PlaybackThread::shouldStandby_l()
+{
+    return !mStandby;
+}
+
+bool AudioFlinger::PlaybackThread::waitingAsyncCallback()
+{
+    Mutex::Autolock _l(mLock);
+    return waitingAsyncCallback_l();
+}
+
 // shared by MIXER and DIRECT, overridden by DUPLICATING
 void AudioFlinger::PlaybackThread::threadLoop_standby()
 {
     ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
     mOutput->stream->common.standby(&mOutput->stream->common);
+    if (mUseAsyncWrite != 0) {
+        mWriteBlocked = false;
+        mDraining = false;
+        ALOG_ASSERT(mCallbackThread != 0);
+        mCallbackThread->setWriteBlocked(false);
+        mCallbackThread->setDraining(false);
+    }
 }
 
 void AudioFlinger::MixerThread::threadLoop_mix()
@@ -2353,6 +2625,7 @@
 
     // mix buffers...
     mAudioMixer->process(pts);
+    mCurrentWriteLength = mixBufferSize;
     // increase sleep time progressively when application underrun condition clears.
     // Only increase sleep time if the mixer is ready for two consecutive times to avoid
     // that a steady state of alternating ready/not ready conditions keeps the sleep time
@@ -2480,7 +2753,7 @@
             switch (track->mState) {
             case TrackBase::STOPPING_1:
                 // track stays active in STOPPING_1 state until first underrun
-                if (recentUnderruns > 0) {
+                if (recentUnderruns > 0 || track->isTerminated()) {
                     track->mState = TrackBase::STOPPING_2;
                 }
                 break;
@@ -2522,7 +2795,6 @@
                 // fall through
             case TrackBase::STOPPING_2:
             case TrackBase::PAUSED:
-            case TrackBase::TERMINATED:
             case TrackBase::STOPPED:
             case TrackBase::FLUSHED:   // flush() while active
                 // Check for presentation complete if track is inactive
@@ -2634,8 +2906,7 @@
         if ((framesReady >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
-            ALOGVV("track %d u=%08x, s=%08x [OK] on thread %p", name, cblk->user, cblk->server,
-                    this);
+            ALOGVV("track %d s=%08x [OK] on thread %p", name, cblk->server, this);
 
             mixedTracks++;
 
@@ -2709,6 +2980,7 @@
                 }
                 va = (uint32_t)(v * sendLevel);
             }
+
             // Delegate volume control to effect in track effect chain if needed
             if (chain != 0 && chain->setVolume_l(&vl, &vr)) {
                 // Do not ramp volume if volume is controlled by effect
@@ -2800,8 +3072,7 @@
                 chain->clearInputBuffer();
             }
 
-            ALOGVV("track %d u=%08x, s=%08x [NOT READY] on thread %p", name, cblk->user,
-                    cblk->server, this);
+            ALOGVV("track %d s=%08x [NOT READY] on thread %p", name, cblk->server, this);
             if ((track->sharedBuffer() != 0) || track->isTerminated() ||
                     track->isStopped() || track->isPaused()) {
                 // We have consumed all the buffers of this track.
@@ -2887,30 +3158,13 @@
     }
 
     // remove all the tracks that need to be...
-    count = tracksToRemove->size();
-    if (CC_UNLIKELY(count)) {
-        for (size_t i=0 ; i<count ; i++) {
-            const sp<Track>& track = tracksToRemove->itemAt(i);
-            mActiveTracks.remove(track);
-            if (track->mainBuffer() != mMixBuffer) {
-                chain = getEffectChain_l(track->sessionId());
-                if (chain != 0) {
-                    ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
-                            track->sessionId());
-                    chain->decActiveTrackCnt();
-                }
-            }
-            if (track->isTerminated()) {
-                removeTrack_l(track);
-            }
-        }
-    }
+    removeTracks_l(*tracksToRemove);
 
     // mix buffer must be cleared if all tracks are connected to an
     // effect chain as in this case the mixer will not write to
     // mix buffer and track effects will accumulate into it
-    if ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
-            (mixedTracks == 0 && fastTracks > 0)) {
+    if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+            (mixedTracks == 0 && fastTracks > 0))) {
         // FIXME as a performance optimization, should remember previous zero status
         memset(mMixBuffer, 0, mNormalFrameCount * mChannelCount * sizeof(int16_t));
     }
@@ -3142,10 +3396,63 @@
 {
 }
 
+AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
+        ThreadBase::type_t type)
+    :   PlaybackThread(audioFlinger, output, id, device, type)
+        // mLeftVolFloat, mRightVolFloat
+{
+}
+
 AudioFlinger::DirectOutputThread::~DirectOutputThread()
 {
 }
 
+void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
+{
+    audio_track_cblk_t* cblk = track->cblk();
+    float left, right;
+
+    if (mMasterMute || mStreamTypes[track->streamType()].mute) {
+        left = right = 0;
+    } else {
+        float typeVolume = mStreamTypes[track->streamType()].volume;
+        float v = mMasterVolume * typeVolume;
+        AudioTrackServerProxy *proxy = track->mAudioTrackServerProxy;
+        uint32_t vlr = proxy->getVolumeLR();
+        float v_clamped = v * (vlr & 0xFFFF);
+        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+        left = v_clamped/MAX_GAIN;
+        v_clamped = v * (vlr >> 16);
+        if (v_clamped > MAX_GAIN) v_clamped = MAX_GAIN;
+        right = v_clamped/MAX_GAIN;
+    }
+
+    if (lastTrack) {
+        if (left != mLeftVolFloat || right != mRightVolFloat) {
+            mLeftVolFloat = left;
+            mRightVolFloat = right;
+
+            // Convert volumes from float to 8.24
+            uint32_t vl = (uint32_t)(left * (1 << 24));
+            uint32_t vr = (uint32_t)(right * (1 << 24));
+
+            // Delegate volume control to effect in track effect chain if needed
+            // only one effect chain can be present on DirectOutputThread, so if
+            // there is one, the track is connected to it
+            if (!mEffectChains.isEmpty()) {
+                mEffectChains[0]->setVolume_l(&vl, &vr);
+                left = (float)vl / (1 << 24);
+                right = (float)vr / (1 << 24);
+            }
+            if (mOutput->stream->set_volume) {
+                mOutput->stream->set_volume(mOutput->stream, left, right);
+            }
+        }
+    }
+}
+
+
 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
     Vector< sp<Track> > *tracksToRemove
 )
@@ -3172,6 +3479,12 @@
         } else {
             minFrames = 1;
         }
+        // Only consider last track started for volume and mixer state control.
+        // This is the last entry in mActiveTracks unless a track underruns.
+        // As we only care about the transition phase between two tracks on a
+        // direct output, it is not a problem to ignore the underrun case.
+        bool last = (i == (count - 1));
+
         if ((track->framesReady() >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
@@ -3186,52 +3499,8 @@
             }
 
             // compute volume for this track
-            float left, right;
-            if (mMasterMute || track->isPausing() || mStreamTypes[track->streamType()].mute) {
-                left = right = 0;
-                if (track->isPausing()) {
-                    track->setPaused();
-                }
-            } else {
-                float typeVolume = mStreamTypes[track->streamType()].volume;
-                float v = mMasterVolume * typeVolume;
-                uint32_t vlr = track->mAudioTrackServerProxy->getVolumeLR();
-                float v_clamped = v * (vlr & 0xFFFF);
-                if (v_clamped > MAX_GAIN) {
-                    v_clamped = MAX_GAIN;
-                }
-                left = v_clamped/MAX_GAIN;
-                v_clamped = v * (vlr >> 16);
-                if (v_clamped > MAX_GAIN) {
-                    v_clamped = MAX_GAIN;
-                }
-                right = v_clamped/MAX_GAIN;
-            }
-            // Only consider last track started for volume and mixer state control.
-            // This is the last entry in mActiveTracks unless a track underruns.
-            // As we only care about the transition phase between two tracks on a
-            // direct output, it is not a problem to ignore the underrun case.
-            if (i == (count - 1)) {
-                if (left != mLeftVolFloat || right != mRightVolFloat) {
-                    mLeftVolFloat = left;
-                    mRightVolFloat = right;
-
-                    // Convert volumes from float to 8.24
-                    uint32_t vl = (uint32_t)(left * (1 << 24));
-                    uint32_t vr = (uint32_t)(right * (1 << 24));
-
-                    // Delegate volume control to effect in track effect chain if needed
-                    // only one effect chain can be present on DirectOutputThread, so if
-                    // there is one, the track is connected to it
-                    if (!mEffectChains.isEmpty()) {
-                        // Do not ramp volume if volume is controlled by effect
-                        mEffectChains[0]->setVolume_l(&vl, &vr);
-                        left = (float)vl / (1 << 24);
-                        right = (float)vr / (1 << 24);
-                    }
-                    mOutput->stream->set_volume(mOutput->stream, left, right);
-                }
-
+            processVolume_l(track, last);
+            if (last) {
                 // reset retry count
                 track->mRetryCount = kMaxTrackRetriesDirect;
                 mActiveTrack = t;
@@ -3265,7 +3534,7 @@
                 if (--(track->mRetryCount) <= 0) {
                     ALOGV("BUFFER TIMEOUT: remove(%d) from active list", track->name());
                     tracksToRemove->add(track);
-                } else if (i == (count -1)){
+                } else if (last) {
                     mixerStatus = MIXER_TRACKS_ENABLED;
                 }
             }
@@ -3273,21 +3542,7 @@
     }
 
     // remove all the tracks that need to be...
-    count = tracksToRemove->size();
-    if (CC_UNLIKELY(count)) {
-        for (size_t i = 0 ; i < count ; i++) {
-            const sp<Track>& track = tracksToRemove->itemAt(i);
-            mActiveTracks.remove(track);
-            if (!mEffectChains.isEmpty()) {
-                ALOGV("stopping track on chain %p for session Id: %d", mEffectChains[0].get(),
-                      track->sessionId());
-                mEffectChains[0]->decActiveTrackCnt();
-            }
-            if (track->isTerminated()) {
-                removeTrack_l(track);
-            }
-        }
-    }
+    removeTracks_l(*tracksToRemove);
 
     return mixerStatus;
 }
@@ -3310,10 +3565,10 @@
         curBuf += buffer.frameCount * mFrameSize;
         mActiveTrack->releaseBuffer(&buffer);
     }
+    mCurrentWriteLength = curBuf - (int8_t *)mMixBuffer;
     sleepTime = 0;
     standbyTime = systemTime() + standbyDelay;
     mActiveTrack.clear();
-
 }
 
 void AudioFlinger::DirectOutputThread::threadLoop_sleepTime()
@@ -3434,6 +3689,307 @@
 
 // ----------------------------------------------------------------------------
 
+AudioFlinger::AsyncCallbackThread::AsyncCallbackThread(
+        const sp<AudioFlinger::OffloadThread>& offloadThread)
+    :   Thread(false /*canCallJava*/),
+        mOffloadThread(offloadThread),
+        mWriteBlocked(false),
+        mDraining(false)
+{
+}
+
+AudioFlinger::AsyncCallbackThread::~AsyncCallbackThread()
+{
+}
+
+void AudioFlinger::AsyncCallbackThread::onFirstRef()
+{
+    run("Offload Cbk", ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+bool AudioFlinger::AsyncCallbackThread::threadLoop()
+{
+    while (!exitPending()) {
+        bool writeBlocked;
+        bool draining;
+
+        {
+            Mutex::Autolock _l(mLock);
+            mWaitWorkCV.wait(mLock);
+            if (exitPending()) {
+                break;
+            }
+            writeBlocked = mWriteBlocked;
+            draining = mDraining;
+            ALOGV("AsyncCallbackThread mWriteBlocked %d mDraining %d", mWriteBlocked, mDraining);
+        }
+        {
+            sp<AudioFlinger::OffloadThread> offloadThread = mOffloadThread.promote();
+            if (offloadThread != 0) {
+                if (writeBlocked == false) {
+                    offloadThread->setWriteBlocked(false);
+                }
+                if (draining == false) {
+                    offloadThread->setDraining(false);
+                }
+            }
+        }
+    }
+    return false;
+}
+
+void AudioFlinger::AsyncCallbackThread::exit()
+{
+    ALOGV("AsyncCallbackThread::exit");
+    Mutex::Autolock _l(mLock);
+    requestExit();
+    mWaitWorkCV.broadcast();
+}
+
+void AudioFlinger::AsyncCallbackThread::setWriteBlocked(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mWriteBlocked = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+void AudioFlinger::AsyncCallbackThread::setDraining(bool value)
+{
+    Mutex::Autolock _l(mLock);
+    mDraining = value;
+    if (!value) {
+        mWaitWorkCV.signal();
+    }
+}
+
+
+// ----------------------------------------------------------------------------
+AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device)
+    :   DirectOutputThread(audioFlinger, output, id, device, OFFLOAD),
+        mHwPaused(false),
+        mPausedBytesRemaining(0)
+{
+    mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
+}
+
+AudioFlinger::OffloadThread::~OffloadThread()
+{
+    mPreviousTrack.clear();
+}
+
+void AudioFlinger::OffloadThread::threadLoop_exit()
+{
+    if (mFlushPending || mHwPaused) {
+        // If a flush is pending or track was paused, just discard buffered data
+        flushHw_l();
+    } else {
+        mMixerStatus = MIXER_DRAIN_ALL;
+        threadLoop_drain();
+    }
+    mCallbackThread->exit();
+    PlaybackThread::threadLoop_exit();
+}
+
+AudioFlinger::PlaybackThread::mixer_state AudioFlinger::OffloadThread::prepareTracks_l(
+    Vector< sp<Track> > *tracksToRemove
+)
+{
+    ALOGV("OffloadThread::prepareTracks_l");
+    size_t count = mActiveTracks.size();
+
+    mixer_state mixerStatus = MIXER_IDLE;
+    if (mFlushPending) {
+        flushHw_l();
+        mFlushPending = false;
+    }
+    // find out which tracks need to be processed
+    for (size_t i = 0; i < count; i++) {
+        sp<Track> t = mActiveTracks[i].promote();
+        // The track died recently
+        if (t == 0) {
+            continue;
+        }
+        Track* const track = t.get();
+        audio_track_cblk_t* cblk = track->cblk();
+        if (mPreviousTrack != NULL) {
+            if (t != mPreviousTrack) {
+                // Flush any data still being written from last track
+                mBytesRemaining = 0;
+                if (mPausedBytesRemaining) {
+                    // Last track was paused so we also need to flush saved
+                    // mixbuffer state and invalidate track so that it will
+                    // re-submit that unwritten data when it is next resumed
+                    mPausedBytesRemaining = 0;
+                    // Invalidate is a bit drastic - would be more efficient
+                    // to have a flag to tell client that some of the
+                    // previously written data was lost
+                    mPreviousTrack->invalidate();
+                }
+            }
+        }
+        mPreviousTrack = t;
+        bool last = (i == (count - 1));
+        if (track->isPausing()) {
+            track->setPaused();
+            if (last) {
+                if (!mHwPaused) {
+                    mOutput->stream->pause(mOutput->stream);
+                    mHwPaused = true;
+                }
+                // If we were part way through writing the mixbuffer to
+                // the HAL we must save this until we resume
+                // BUG - this will be wrong if a different track is made active,
+                // in that case we want to discard the pending data in the
+                // mixbuffer and tell the client to present it again when the
+                // track is resumed
+                mPausedWriteLength = mCurrentWriteLength;
+                mPausedBytesRemaining = mBytesRemaining;
+                mBytesRemaining = 0;    // stop writing
+            }
+            tracksToRemove->add(track);
+        } else if (track->framesReady() && track->isReady() &&
+                !track->isPaused() && !track->isTerminated()) {
+            ALOGVV("OffloadThread: track %d s=%08x [OK]", track->name(), cblk->server);
+            if (track->mFillingUpStatus == Track::FS_FILLED) {
+                track->mFillingUpStatus = Track::FS_ACTIVE;
+                mLeftVolFloat = mRightVolFloat = 0;
+                if (track->mState == TrackBase::RESUMING) {
+                    if (CC_UNLIKELY(mPausedBytesRemaining)) {
+                        // Need to continue write that was interrupted
+                        mCurrentWriteLength = mPausedWriteLength;
+                        mBytesRemaining = mPausedBytesRemaining;
+                        mPausedBytesRemaining = 0;
+                    }
+                    track->mState = TrackBase::ACTIVE;
+                }
+            }
+
+            if (last) {
+                if (mHwPaused) {
+                    mOutput->stream->resume(mOutput->stream);
+                    mHwPaused = false;
+                    // threadLoop_mix() will handle the case that we need to
+                    // resume an interrupted write
+                }
+                // reset retry count
+                track->mRetryCount = kMaxTrackRetriesOffload;
+                mActiveTrack = t;
+                mixerStatus = MIXER_TRACKS_READY;
+            }
+        } else {
+            ALOGVV("OffloadThread: track %d s=%08x [NOT READY]", track->name(), cblk->server);
+            if (track->isStopping_1()) {
+                // Hardware buffer can hold a large amount of audio so we must
+                // wait for all current track's data to drain before we say
+                // that the track is stopped.
+                if (mBytesRemaining == 0) {
+                    // Only start draining when all data in mixbuffer
+                    // has been written
+                    ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
+                    track->mState = TrackBase::STOPPING_2; // so presentation completes after drain
+                    sleepTime = 0;
+                    standbyTime = systemTime() + standbyDelay;
+                    if (last) {
+                        mixerStatus = MIXER_DRAIN_TRACK;
+                        if (mHwPaused) {
+                            // It is possible to move from PAUSED to STOPPING_1 without
+                            // a resume so we must ensure hardware is running
+                            mOutput->stream->resume(mOutput->stream);
+                            mHwPaused = false;
+                        }
+                    }
+                }
+            } else if (track->isStopping_2()) {
+                // Drain has completed, signal presentation complete
+                if (!mDraining || !last) {
+                    track->mState = TrackBase::STOPPED;
+                    size_t audioHALFrames =
+                            (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
+                    size_t framesWritten =
+                            mBytesWritten / audio_stream_frame_size(&mOutput->stream->common);
+                    track->presentationComplete(framesWritten, audioHALFrames);
+                    track->reset();
+                    tracksToRemove->add(track);
+                }
+            } else {
+                // No buffers for this track. Give it a few chances to
+                // fill a buffer, then remove it from active list.
+                if (--(track->mRetryCount) <= 0) {
+                    ALOGV("OffloadThread: BUFFER TIMEOUT: remove(%d) from active list",
+                          track->name());
+                    tracksToRemove->add(track);
+                } else if (last){
+                    mixerStatus = MIXER_TRACKS_ENABLED;
+                }
+            }
+        }
+        // compute volume for this track
+        processVolume_l(track, last);
+    }
+    // remove all the tracks that need to be...
+    removeTracks_l(*tracksToRemove);
+
+    return mixerStatus;
+}
+
+void AudioFlinger::OffloadThread::flushOutput_l()
+{
+    mFlushPending = true;
+}
+
+// must be called with thread mutex locked
+bool AudioFlinger::OffloadThread::waitingAsyncCallback_l()
+{
+    ALOGV("waitingAsyncCallback_l mWriteBlocked %d mDraining %d", mWriteBlocked, mDraining);
+    if (mUseAsyncWrite && (mWriteBlocked || mDraining)) {
+        return true;
+    }
+    return false;
+}
+
+// must be called with thread mutex locked
+bool AudioFlinger::OffloadThread::shouldStandby_l()
+{
+    bool TrackPaused = false;
+
+    // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
+    // after a timeout and we will enter standby then.
+    if (mTracks.size() > 0) {
+        TrackPaused = mTracks[mTracks.size() - 1]->isPaused();
+    }
+
+    return !mStandby && !TrackPaused;
+}
+
+
+bool AudioFlinger::OffloadThread::waitingAsyncCallback()
+{
+    Mutex::Autolock _l(mLock);
+    return waitingAsyncCallback_l();
+}
+
+void AudioFlinger::OffloadThread::flushHw_l()
+{
+    mOutput->stream->flush(mOutput->stream);
+    // Flush anything still waiting in the mixbuffer
+    mCurrentWriteLength = 0;
+    mBytesRemaining = 0;
+    mPausedWriteLength = 0;
+    mPausedBytesRemaining = 0;
+    if (mUseAsyncWrite) {
+        mWriteBlocked = false;
+        mDraining = false;
+        ALOG_ASSERT(mCallbackThread != 0);
+        mCallbackThread->setWriteBlocked(false);
+        mCallbackThread->setDraining(false);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
         AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
     :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(),
@@ -3460,6 +4016,7 @@
     }
     sleepTime = 0;
     writeFrames = mNormalFrameCount;
+    mCurrentWriteLength = mixBufferSize;
     standbyTime = systemTime() + standbyDelay;
 }
 
@@ -3483,12 +4040,12 @@
     }
 }
 
-void AudioFlinger::DuplicatingThread::threadLoop_write()
+ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
 {
     for (size_t i = 0; i < outputTracks.size(); i++) {
         outputTracks[i]->write(mMixBuffer, writeFrames);
     }
-    mBytesWritten += mixBufferSize;
+    return (ssize_t)mixBufferSize;
 }
 
 void AudioFlinger::DuplicatingThread::threadLoop_standby()
@@ -3682,7 +4239,10 @@
                 continue;
             }
             if (mActiveTrack != 0) {
-                if (mActiveTrack->mState == TrackBase::PAUSING) {
+                if (mActiveTrack->isTerminated()) {
+                    removeTrack_l(mActiveTrack);
+                    mActiveTrack.clear();
+                } else if (mActiveTrack->mState == TrackBase::PAUSING) {
                     standby();
                     mActiveTrack.clear();
                     mStartStopCond.broadcast();
@@ -3701,9 +4261,6 @@
                         mStartStopCond.broadcast();
                     }
                     mStandby = false;
-                } else if (mActiveTrack->mState == TrackBase::TERMINATED) {
-                    removeTrack_l(mActiveTrack);
-                    mActiveTrack.clear();
                 }
             }
             lockEffectChains_l(effectChains);
@@ -4084,7 +4641,8 @@
 // destroyTrack_l() must be called with ThreadBase::mLock held
 void AudioFlinger::RecordThread::destroyTrack_l(const sp<RecordTrack>& track)
 {
-    track->mState = TrackBase::TERMINATED;
+    track->terminate();
+    track->mState = TrackBase::STOPPED;
     // active tracks are removed by threadLoop()
     if (mActiveTrack != track) {
         removeTrack_l(track);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 4c969d8..c5818ae 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -28,7 +28,8 @@
         MIXER,              // Thread class is MixerThread
         DIRECT,             // Thread class is DirectOutputThread
         DUPLICATING,        // Thread class is DuplicatingThread
-        RECORD              // Thread class is RecordThread
+        RECORD,             // Thread class is RecordThread
+        OFFLOAD             // Thread class is OffloadThread
     };
 
     ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
@@ -129,6 +130,7 @@
                 size_t      frameCount() const { return mNormalFrameCount; }
                 // Return's the HAL's frame count i.e. fast mixer buffer size.
                 size_t      frameCountHAL() const { return mFrameCount; }
+                size_t      frameSize() const { return mFrameSize; }
 
     // Should be "virtual status_t requestExitAndWait()" and override same
     // method in Thread, but Thread::requestExitAndWait() is not yet virtual.
@@ -184,6 +186,8 @@
                 void lockEffectChains_l(Vector< sp<EffectChain> >& effectChains);
                 // unlock effect chains after process
                 void unlockEffectChains(const Vector< sp<EffectChain> >& effectChains);
+                // get a copy of mEffectChains vector
+                Vector< sp<EffectChain> > getEffectChains_l() const { return mEffectChains; };
                 // set audio mode to all effect chains
                 void setMode(audio_mode_t mode);
                 // get effect module with corresponding ID on specified audio session
@@ -329,11 +333,19 @@
     enum mixer_state {
         MIXER_IDLE,             // no active tracks
         MIXER_TRACKS_ENABLED,   // at least one active track, but no track has any data ready
-        MIXER_TRACKS_READY      // at least one active track, and at least one track has data
+        MIXER_TRACKS_READY,      // at least one active track, and at least one track has data
+        MIXER_DRAIN_TRACK,      // drain currently playing track
+        MIXER_DRAIN_ALL,        // fully drain the hardware
         // standby mode does not have an enum value
         // suspend by audio policy manager is orthogonal to mixer state
     };
 
+    // retry count before removing active track in case of underrun on offloaded thread:
+    // we need to make sure that AudioTrack client has enough time to send large buffers
+//FIXME may be more appropriate if expressed in time units. Need to revise how underrun is handled
+    // for offloaded tracks
+    static const int8_t kMaxTrackRetriesOffload = 20;
+
     PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
                    audio_io_handle_t id, audio_devices_t device, type_t type);
     virtual             ~PlaybackThread();
@@ -351,8 +363,10 @@
     // Code snippets that were lifted up out of threadLoop()
     virtual     void        threadLoop_mix() = 0;
     virtual     void        threadLoop_sleepTime() = 0;
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
+    virtual     void        threadLoop_drain();
     virtual     void        threadLoop_standby();
+    virtual     void        threadLoop_exit();
     virtual     void        threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
 
                 // prepareTracks_l reads and writes mActiveTracks, and returns
@@ -360,6 +374,19 @@
                 // is responsible for clearing or destroying this Vector later on, when it
                 // is safe to do so. That will drop the final ref count and destroy the tracks.
     virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove) = 0;
+                void        removeTracks_l(const Vector< sp<Track> >& tracksToRemove);
+
+                void        writeCallback();
+                void        setWriteBlocked(bool value);
+                void        drainCallback();
+                void        setDraining(bool value);
+
+    static      int         asyncCallback(stream_callback_event_t event, void *param, void *cookie);
+
+    virtual     bool        waitingAsyncCallback();
+    virtual     bool        waitingAsyncCallback_l();
+    virtual     bool        shouldStandby_l();
+
 
     // ThreadBase virtuals
     virtual     void        preExit();
@@ -436,7 +463,8 @@
 
 
 protected:
-    int16_t*                        mMixBuffer;
+    int16_t*                        mMixBuffer;         // frame size aligned mix buffer
+    int8_t*                         mAllocMixBuffer;    // mixer buffer allocation address
 
     // suspend count, > 0 means suspended.  While suspended, the thread continues to pull from
     // tracks and mix, but doesn't write to HAL.  A2DP and SCO HAL implementations can't handle
@@ -489,8 +517,9 @@
     PlaybackThread& operator = (const PlaybackThread&);
 
     status_t    addTrack_l(const sp<Track>& track);
-    void        destroyTrack_l(const sp<Track>& track);
+    bool        destroyTrack_l(const sp<Track>& track);
     void        removeTrack_l(const sp<Track>& track);
+    void        signal_l();
 
     void        readOutputParameters();
 
@@ -538,6 +567,14 @@
     // DUPLICATING only
     uint32_t                        writeFrames;
 
+    size_t                          mBytesRemaining;
+    size_t                          mCurrentWriteLength;
+    bool                            mUseAsyncWrite;
+    bool                            mWriteBlocked;
+    bool                            mDraining;
+    bool                            mSignalPending;
+    sp<AsyncCallbackThread>         mCallbackThread;
+
 private:
     // The HAL output sink is treated as non-blocking, but current implementation is blocking
     sp<NBAIO_Sink>          mOutputSink;
@@ -561,7 +598,7 @@
 protected:
                 // accessed by both binder threads and within threadLoop(), lock on mutex needed
                 unsigned    mFastTrackAvailMask;    // bit i set if fast track [i] is available
-
+    virtual     void        flushOutput_l();
 };
 
 class MixerThread : public PlaybackThread {
@@ -587,7 +624,7 @@
     virtual     void        cacheParameters_l();
 
     // threadLoop snippets
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
@@ -644,17 +681,73 @@
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
 
-private:
     // volumes last sent to audio HAL with stream->set_volume()
     float mLeftVolFloat;
     float mRightVolFloat;
 
+    DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                        audio_io_handle_t id, uint32_t device, ThreadBase::type_t type);
+    void processVolume_l(Track *track, bool lastTrack);
+
     // prepareTracks_l() tells threadLoop_mix() the name of the single active track
     sp<Track>               mActiveTrack;
 public:
     virtual     bool        hasFastMixer() const { return false; }
 };
 
+class OffloadThread : public DirectOutputThread {
+public:
+
+    OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
+                        audio_io_handle_t id, uint32_t device);
+    virtual                 ~OffloadThread();
+
+protected:
+    // threadLoop snippets
+    virtual     mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
+    virtual     void        threadLoop_exit();
+    virtual     void        flushOutput_l();
+
+    virtual     bool        waitingAsyncCallback();
+    virtual     bool        waitingAsyncCallback_l();
+    virtual     bool        shouldStandby_l();
+
+private:
+                void        flushHw_l();
+
+private:
+    bool        mHwPaused;
+    bool        mFlushPending;
+    size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
+    size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
+    sp<Track>   mPreviousTrack;         // used to detect track switch
+};
+
+class AsyncCallbackThread : public Thread {
+public:
+
+    AsyncCallbackThread(const sp<OffloadThread>& offloadThread);
+
+    virtual             ~AsyncCallbackThread();
+
+    // Thread virtuals
+    virtual bool        threadLoop();
+
+    // RefBase
+    virtual void        onFirstRef();
+
+            void        exit();
+            void        setWriteBlocked(bool value);
+            void        setDraining(bool value);
+
+private:
+    wp<OffloadThread>   mOffloadThread;
+    bool                mWriteBlocked;
+    bool                mDraining;
+    Condition           mWaitWorkCV;
+    Mutex               mLock;
+};
+
 class DuplicatingThread : public MixerThread {
 public:
     DuplicatingThread(const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
@@ -674,7 +767,7 @@
     // threadLoop snippets
     virtual     void        threadLoop_mix();
     virtual     void        threadLoop_sleepTime();
-    virtual     void        threadLoop_write();
+    virtual     ssize_t     threadLoop_write();
     virtual     void        threadLoop_standby();
     virtual     void        cacheParameters_l();
 
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index f0c32b2..a243563 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -25,10 +25,10 @@
 public:
     enum track_state {
         IDLE,
-        TERMINATED,
         FLUSHED,
         STOPPED,
-        // next 2 states are currently used for fast tracks only
+        // next 2 states are currently used for fast tracks
+        // and offloaded tracks only
         STOPPING_1,     // waiting for first underrun
         STOPPING_2,     // waiting for presentation complete
         RESUMING,
@@ -89,7 +89,7 @@
         return (mState == STOPPED || mState == FLUSHED);
     }
 
-    // for fast tracks only
+    // for fast tracks and offloaded tracks only
     bool isStopping() const {
         return mState == STOPPING_1 || mState == STOPPING_2;
     }
@@ -101,7 +101,11 @@
     }
 
     bool isTerminated() const {
-        return mState == TERMINATED;
+        return mTerminated;
+    }
+
+    void terminate() {
+        mTerminated = true;
     }
 
     bool step();    // mStepCount is an implicit input
@@ -141,4 +145,5 @@
     const int           mId;
     sp<NBAIO_Sink>      mTeeSink;
     sp<NBAIO_Source>    mTeeSource;
+    bool                mTerminated;
 };
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 1df333f..52518ae 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -89,7 +89,8 @@
         mSessionId(sessionId),
         mIsOut(isOut),
         mServerProxy(NULL),
-        mId(android_atomic_inc(&nextTrackId))
+        mId(android_atomic_inc(&nextTrackId)),
+        mTerminated(false)
 {
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -247,7 +248,7 @@
 }
 
 status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
-    return INVALID_OPERATION;   // stub function
+    return mTrack->setParameters(keyValuePairs);
 }
 
 status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
@@ -323,7 +324,8 @@
     mUnderrunCount(0),
     mCachedVolume(1.0),
     mIsInvalid(false),
-    mAudioTrackServerProxy(NULL)
+    mAudioTrackServerProxy(NULL),
+    mResumeToStopping(false)
 {
     if (mCblk != NULL) {
         if (sharedBuffer == 0) {
@@ -381,27 +383,19 @@
     { // scope for mLock
         sp<ThreadBase> thread = mThread.promote();
         if (thread != 0) {
-            if (!isOutputTrack()) {
-                if (mState == ACTIVE || mState == RESUMING) {
-                    AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-
-#ifdef ADD_BATTERY_DATA
-                    // to track the speaker usage
-                    addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-                }
-                AudioSystem::releaseOutput(thread->id());
-            }
             Mutex::Autolock _l(thread->mLock);
             PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-            playbackThread->destroyTrack_l(this);
+            bool wasActive = playbackThread->destroyTrack_l(this);
+            if (!isOutputTrack() && !wasActive) {
+                AudioSystem::releaseOutput(thread->id());
+            }
         }
     }
 }
 
 /*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
 {
-    result.append("   Name Client Type Fmt Chn mask   Session StpCnt fCount S F SRate  "
+    result.append("   Name Client Type Fmt        Chn mask   Session StpCnt fCount S F SRate  "
                   "L dB  R dB    Server    Main buf    Aux Buf  Flags Underruns\n");
 }
 
@@ -415,40 +409,41 @@
     }
     track_state state = mState;
     char stateChar;
-    switch (state) {
-    case IDLE:
-        stateChar = 'I';
-        break;
-    case TERMINATED:
+    if (isTerminated()) {
         stateChar = 'T';
-        break;
-    case STOPPING_1:
-        stateChar = 's';
-        break;
-    case STOPPING_2:
-        stateChar = '5';
-        break;
-    case STOPPED:
-        stateChar = 'S';
-        break;
-    case RESUMING:
-        stateChar = 'R';
-        break;
-    case ACTIVE:
-        stateChar = 'A';
-        break;
-    case PAUSING:
-        stateChar = 'p';
-        break;
-    case PAUSED:
-        stateChar = 'P';
-        break;
-    case FLUSHED:
-        stateChar = 'F';
-        break;
-    default:
-        stateChar = '?';
-        break;
+    } else {
+        switch (state) {
+        case IDLE:
+            stateChar = 'I';
+            break;
+        case STOPPING_1:
+            stateChar = 's';
+            break;
+        case STOPPING_2:
+            stateChar = '5';
+            break;
+        case STOPPED:
+            stateChar = 'S';
+            break;
+        case RESUMING:
+            stateChar = 'R';
+            break;
+        case ACTIVE:
+            stateChar = 'A';
+            break;
+        case PAUSING:
+            stateChar = 'p';
+            break;
+        case PAUSED:
+            stateChar = 'P';
+            break;
+        case FLUSHED:
+            stateChar = 'F';
+            break;
+        default:
+            stateChar = '?';
+            break;
+        }
     }
     char nowInUnderrun;
     switch (mObservedUnderruns.mBitFields.mMostRecent) {
@@ -465,7 +460,7 @@
         nowInUnderrun = '?';
         break;
     }
-    snprintf(&buffer[7], size-7, " %6d %4u %3u 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g  "
+    snprintf(&buffer[7], size-7, " %6d %4u 0x%08x 0x%08x %7u %6u %6u %1c %1d %5u %5.2g %5.2g  "
             "0x%08x 0x%08x 0x%08x %#5x %9u%c\n",
             (mClient == 0) ? getpid_cached : mClient->pid(),
             mStreamType,
@@ -550,32 +545,33 @@
         track_state state = mState;
         // here the track could be either new, or restarted
         // in both cases "unstop" the track
+
         if (state == PAUSED) {
-            mState = TrackBase::RESUMING;
-            ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
+            if (mResumeToStopping) {
+                // happened we need to resume to STOPPING_1
+                mState = TrackBase::STOPPING_1;
+                ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
+            } else {
+                mState = TrackBase::RESUMING;
+                ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
+            }
         } else {
             mState = TrackBase::ACTIVE;
             ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
         }
 
-        if (!isOutputTrack() && state != ACTIVE && state != RESUMING) {
-            thread->mLock.unlock();
-            status = AudioSystem::startOutput(thread->id(), mStreamType, mSessionId);
-            thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
-            // to track the speaker usage
-            if (status == NO_ERROR) {
-                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
-            }
-#endif
-        }
-        if (status == NO_ERROR) {
-            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-            playbackThread->addTrack_l(this);
-        } else {
-            mState = state;
+        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        status = playbackThread->addTrack_l(this);
+        if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+            //  restore previous state if start was rejected by policy manager
+            if (status == PERMISSION_DENIED) {
+                mState = state;
+            }
+        }
+        // track was already in the active list, not a problem
+        if (status == ALREADY_EXISTS) {
+            status = NO_ERROR;
         }
     } else {
         status = BAD_VALUE;
@@ -596,26 +592,18 @@
             if (playbackThread->mActiveTracks.indexOf(this) < 0) {
                 reset();
                 mState = STOPPED;
-            } else if (!isFastTrack()) {
+            } else if (!isFastTrack() && !isOffloaded()) {
                 mState = STOPPED;
             } else {
-                // prepareTracks_l() will set state to STOPPING_2 after next underrun,
-                // and then to STOPPED and reset() when presentation is complete
+                // For fast tracks prepareTracks_l() will set state to STOPPING_2
+                // presentation is complete
+                // For an offloaded track this starts a drain and state will
+                // move to STOPPING_2 when drain completes and then STOPPED
                 mState = STOPPING_1;
             }
             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
                     playbackThread);
         }
-        if (!isOutputTrack() && (state == ACTIVE || state == RESUMING)) {
-            thread->mLock.unlock();
-            AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-            thread->mLock.lock();
-
-#ifdef ADD_BATTERY_DATA
-            // to track the speaker usage
-            addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-        }
     }
 }
 
@@ -625,19 +613,27 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        if (mState == ACTIVE || mState == RESUMING) {
+        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        switch (mState) {
+        case STOPPING_1:
+        case STOPPING_2:
+            if (!isOffloaded()) {
+                /* nothing to do if track is not offloaded */
+                break;
+            }
+
+            // Offloaded track was draining, we need to carry on draining when resumed
+            mResumeToStopping = true;
+            // fall through...
+        case ACTIVE:
+        case RESUMING:
             mState = PAUSING;
             ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
-            if (!isOutputTrack()) {
-                thread->mLock.unlock();
-                AudioSystem::stopOutput(thread->id(), mStreamType, mSessionId);
-                thread->mLock.lock();
+            playbackThread->signal_l();
+            break;
 
-#ifdef ADD_BATTERY_DATA
-                // to track the speaker usage
-                addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
-#endif
-            }
+        default:
+            break;
         }
     }
 }
@@ -648,21 +644,52 @@
     sp<ThreadBase> thread = mThread.promote();
     if (thread != 0) {
         Mutex::Autolock _l(thread->mLock);
-        if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && mState != PAUSED &&
-                mState != PAUSING && mState != IDLE && mState != FLUSHED) {
-            return;
-        }
-        // No point remaining in PAUSED state after a flush => go to
-        // FLUSHED state
-        mState = FLUSHED;
-        // do not reset the track if it is still in the process of being stopped or paused.
-        // this will be done by prepareTracks_l() when the track is stopped.
-        // prepareTracks_l() will see mState == FLUSHED, then
-        // remove from active track list, reset(), and trigger presentation complete
         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
-        if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+
+        if (isOffloaded()) {
+            // If offloaded we allow flush during any state except terminated
+            // and keep the track active to avoid problems if user is seeking
+            // rapidly and underlying hardware has a significant delay handling
+            // a pause
+            if (isTerminated()) {
+                return;
+            }
+
+            ALOGV("flush: offload flush");
             reset();
+
+            if (mState == STOPPING_1 || mState == STOPPING_2) {
+                ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
+                mState = ACTIVE;
+            }
+
+            if (mState == ACTIVE) {
+                ALOGV("flush called in active state, resetting buffer time out retry count");
+                mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
+            }
+
+            mResumeToStopping = false;
+        } else {
+            if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
+                    mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
+                return;
+            }
+            // No point remaining in PAUSED state after a flush => go to
+            // FLUSHED state
+            mState = FLUSHED;
+            // do not reset the track if it is still in the process of being stopped or paused.
+            // this will be done by prepareTracks_l() when the track is stopped.
+            // prepareTracks_l() will see mState == FLUSHED, then
+            // remove from active track list, reset(), and trigger presentation complete
+            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
+                reset();
+            }
         }
+        // Prevent flush being lost if the track is flushed and then resumed
+        // before mixer thread can run. This is important when offloading
+        // because the hardware buffer could hold a large amount of audio
+        playbackThread->flushOutput_l();
+        playbackThread->signal_l();
     }
 }
 
@@ -682,6 +709,20 @@
     }
 }
 
+status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
+{
+    sp<ThreadBase> thread = mThread.promote();
+    if (thread == 0) {
+        ALOGE("thread is dead");
+        return FAILED_TRANSACTION;
+    } else if ((thread->type() == ThreadBase::DIRECT) ||
+                    (thread->type() == ThreadBase::OFFLOAD)) {
+        return thread->setParameters(keyValuePairs);
+    } else {
+        return PERMISSION_DENIED;
+    }
+}
+
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
 {
     status_t status = DEAD_OBJECT;
@@ -743,15 +784,23 @@
     // a track is considered presented when the total number of frames written to audio HAL
     // corresponds to the number of frames written when presentationComplete() is called for the
     // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
+    // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
+    // to detect when all frames have been played. In this case framesWritten isn't
+    // useful because it doesn't always reflect whether there is data in the h/w
+    // buffers, particularly if a track has been paused and resumed during draining
+    ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
+                      mPresentationCompleteFrames, framesWritten);
     if (mPresentationCompleteFrames == 0) {
         mPresentationCompleteFrames = framesWritten + audioHalFrames;
         ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
                   mPresentationCompleteFrames, audioHalFrames);
     }
-    if (framesWritten >= mPresentationCompleteFrames) {
+
+    if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
         ALOGV("presentationComplete() session %d complete: framesWritten %d",
                   mSessionId, framesWritten);
         triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
+        mAudioTrackServerProxy->setStreamEndDone();
         return true;
     }
     return false;
@@ -797,7 +846,7 @@
 
 status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
 {
-    if (mState == TERMINATED || mState == PAUSED ||
+    if (isTerminated() || mState == PAUSED ||
             ((framesReady() == 0) && ((mSharedBuffer != 0) ||
                                       (mState == STOPPED)))) {
         ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",