Remove TimedAudioTrack and associated code
Bug: 8278435
Change-Id: I095c1a4888e645e14d93b0b15fbef4524a831ca1
diff --git a/include/media/AudioBufferProvider.h b/include/media/AudioBufferProvider.h
index 7be449c..458d170 100644
--- a/include/media/AudioBufferProvider.h
+++ b/include/media/AudioBufferProvider.h
@@ -40,12 +40,6 @@
virtual ~AudioBufferProvider() {}
- // value representing an invalid presentation timestamp
- static const int64_t kInvalidPTS = 0x7FFFFFFFFFFFFFFFLL; // <stdint.h> is too painful
-
- // pts is the local time when the next sample yielded by getNextBuffer
- // will be rendered.
- // Pass kInvalidPTS if the PTS is unknown or not applicable.
// On entry:
// buffer != NULL
// buffer->raw unused
@@ -59,7 +53,7 @@
// status != NO_ERROR
// buffer->raw NULL
// buffer->frameCount 0
- virtual status_t getNextBuffer(Buffer* buffer, int64_t pts = kInvalidPTS) = 0;
+ virtual status_t getNextBuffer(Buffer* buffer) = 0;
// Release (a portion of) the buffer previously obtained by getNextBuffer().
// It is permissible to call releaseBuffer() multiple times per getNextBuffer().
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 38adb03..a4b8571 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -954,7 +954,6 @@
mutable Mutex mLock;
- bool mIsTimed;
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
bool mAwaitBoost; // thread should wait for priority boost before running
@@ -992,29 +991,6 @@
sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
};
-class TimedAudioTrack : public AudioTrack
-{
-public:
- TimedAudioTrack();
-
- /* allocate a shared memory buffer that can be passed to queueTimedBuffer */
- status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer);
-
- /* queue a buffer obtained via allocateTimedBuffer for playback at the
- given timestamp. PTS units are microseconds on the media time timeline.
- The media time transform (set with setMediaTimeTransform) set by the
- audio producer will handle converting from media time to local time
- (perhaps going through the common time timeline in the case of
- synchronized multiroom audio case) */
- status_t queueTimedBuffer(const sp<IMemory>& buffer, int64_t pts);
-
- /* define a transform between media time and either common time or
- local time */
- enum TargetTimeline {LOCAL_TIME, COMMON_TIME};
- status_t setMediaTimeTransform(const LinearTransform& xform,
- TargetTimeline target);
-};
-
}; // namespace android
#endif // ANDROID_AUDIOTRACK_H
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 5051aff..3b69ecf 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -47,7 +47,8 @@
// or-able bits shared by createTrack and openRecord, but not all combinations make sense
enum {
TRACK_DEFAULT = 0, // client requests a default AudioTrack
- TRACK_TIMED = 1, // client requests a TimedAudioTrack
+ // FIXME: obsolete
+ // TRACK_TIMED= 1, // client requests a TimedAudioTrack
TRACK_FAST = 2, // client requests a fast AudioTrack or AudioRecord
TRACK_OFFLOAD = 4, // client requests offload to hw codec
TRACK_DIRECT = 8, // client requests a direct output
diff --git a/include/media/IAudioTrack.h b/include/media/IAudioTrack.h
index 619ac78..a31cec6 100644
--- a/include/media/IAudioTrack.h
+++ b/include/media/IAudioTrack.h
@@ -24,7 +24,6 @@
#include <utils/Errors.h>
#include <binder/IInterface.h>
#include <binder/IMemory.h>
-#include <utils/LinearTransform.h>
#include <utils/String8.h>
#include <media/AudioTimestamp.h>
@@ -67,24 +66,6 @@
*/
virtual status_t attachAuxEffect(int effectId) = 0;
-
- /* Allocate a shared memory buffer suitable for holding timed audio
- samples */
- virtual status_t allocateTimedBuffer(size_t size,
- sp<IMemory>* buffer) = 0;
-
- /* Queue a buffer obtained via allocateTimedBuffer for playback at the given
- timestamp */
- virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts) = 0;
-
- /* Define the linear transform that will be applied to the timestamps
- given to queueTimedBuffer (which are expressed in media time).
- Target specifies whether this transform converts media time to local time
- or Tungsten time. The values for target are defined in AudioTrack.h */
- virtual status_t setMediaTimeTransform(const LinearTransform& xform,
- int target) = 0;
-
/* Send parameters to the audio hardware */
virtual status_t setParameters(const String8& keyValuePairs) = 0;
diff --git a/include/media/nbaio/AudioBufferProviderSource.h b/include/media/nbaio/AudioBufferProviderSource.h
index b16e20a..4747dcf 100644
--- a/include/media/nbaio/AudioBufferProviderSource.h
+++ b/include/media/nbaio/AudioBufferProviderSource.h
@@ -42,9 +42,8 @@
//virtual size_t framesOverrun();
//virtual size_t overruns();
virtual ssize_t availableToRead();
- virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
- virtual ssize_t readVia(readVia_t via, size_t total, void *user,
- int64_t readPTS, size_t block);
+ virtual ssize_t read(void *buffer, size_t count);
+ virtual ssize_t readVia(readVia_t via, size_t total, void *user, size_t block);
private:
AudioBufferProvider * const mProvider;
diff --git a/include/media/nbaio/AudioStreamInSource.h b/include/media/nbaio/AudioStreamInSource.h
index 5169f1e..eaea63c 100644
--- a/include/media/nbaio/AudioStreamInSource.h
+++ b/include/media/nbaio/AudioStreamInSource.h
@@ -45,7 +45,7 @@
// FIXME Use an audio HAL API to query the buffer filling status when it's available.
virtual ssize_t availableToRead() { return mStreamBufferSizeBytes / mFrameSize; }
- virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
+ virtual ssize_t read(void *buffer, size_t count);
// NBAIO_Sink end
diff --git a/include/media/nbaio/AudioStreamOutSink.h b/include/media/nbaio/AudioStreamOutSink.h
index 9949b88..0998d45 100644
--- a/include/media/nbaio/AudioStreamOutSink.h
+++ b/include/media/nbaio/AudioStreamOutSink.h
@@ -47,11 +47,6 @@
virtual ssize_t write(const void *buffer, size_t count);
- // AudioStreamOutSink wraps a HAL's output stream. Its
- // getNextWriteTimestamp method is simply a passthru to the HAL's underlying
- // implementation of GNWT (if any)
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
virtual status_t getTimestamp(AudioTimestamp& timestamp);
// NBAIO_Sink end
diff --git a/include/media/nbaio/MonoPipe.h b/include/media/nbaio/MonoPipe.h
index b09b35f..df9cafe 100644
--- a/include/media/nbaio/MonoPipe.h
+++ b/include/media/nbaio/MonoPipe.h
@@ -18,7 +18,6 @@
#define ANDROID_AUDIO_MONO_PIPE_H
#include <time.h>
-#include <utils/LinearTransform.h>
#include "NBAIO.h"
#include <media/SingleStateQueue.h>
@@ -60,20 +59,6 @@
virtual ssize_t write(const void *buffer, size_t count);
//virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block);
- // MonoPipe's implementation of getNextWriteTimestamp works in conjunction
- // with MonoPipeReader. Every time a MonoPipeReader reads from the pipe, it
- // receives a "readPTS" indicating the point in time for which the reader
- // would like to read data. This "last read PTS" is offset by the amt of
- // data the reader is currently mixing and then cached cached along with the
- // updated read pointer. This cached value is the local time for which the
- // reader is going to request data next time it reads data (assuming we are
- // in steady state and operating with no underflows). Writers to the
- // MonoPipe who would like to know when their next write operation will hit
- // the speakers can call getNextWriteTimestamp which will return the value
- // of the last read PTS plus the duration of the amt of data waiting to be
- // read in the MonoPipe.
- virtual status_t getNextWriteTimestamp(int64_t *timestamp);
-
// average number of frames present in the pipe under normal conditions.
// See throttling mechanism in MonoPipe::write()
size_t getAvgFrames() const { return mSetpoint; }
@@ -95,43 +80,21 @@
status_t getTimestamp(AudioTimestamp& timestamp);
private:
- // A pair of methods and a helper variable which allows the reader and the
- // writer to update and observe the values of mFront and mNextRdPTS in an
- // atomic lock-less fashion.
- //
- // :: Important ::
- // Two assumptions must be true in order for this lock-less approach to
- // function properly on all systems. First, there may only be one updater
- // thread in the system. Second, the updater thread must be running at a
- // strictly higher priority than the observer threads. Currently, both of
- // these assumptions are true. The only updater is always a single
- // FastMixer thread (which runs with SCHED_FIFO/RT priority while the only
- // observer is always an AudioFlinger::PlaybackThread running with
- // traditional (non-RT) audio priority.
- void updateFrontAndNRPTS(int32_t newFront, int64_t newNextRdPTS);
- void observeFrontAndNRPTS(int32_t *outFront, int64_t *outNextRdPTS);
- volatile int32_t mUpdateSeq;
-
const size_t mReqFrames; // as requested in constructor, unrounded
const size_t mMaxFrames; // always a power of 2
void * const mBuffer;
// mFront and mRear will never be separated by more than mMaxFrames.
// 32-bit overflow is possible if the pipe is active for a long time, but if that happens it's
// safe because we "&" with (mMaxFrames-1) at end of computations to calculate a buffer index.
- volatile int32_t mFront; // written by the reader with updateFrontAndNRPTS, observed by
- // the writer with observeFrontAndNRPTS
+ volatile int32_t mFront; // written by reader with android_atomic_release_store,
+ // read by writer with android_atomic_acquire_load
volatile int32_t mRear; // written by writer with android_atomic_release_store,
// read by reader with android_atomic_acquire_load
- volatile int64_t mNextRdPTS; // written by the reader with updateFrontAndNRPTS, observed by
- // the writer with observeFrontAndNRPTS
bool mWriteTsValid; // whether mWriteTs is valid
struct timespec mWriteTs; // time that the previous write() completed
size_t mSetpoint; // target value for pipe fill depth
const bool mWriteCanBlock; // whether write() should block if the pipe is full
- int64_t offsetTimestampByAudioFrames(int64_t ts, size_t audFrames);
- LinearTransform mSamplesToLocalTime;
-
bool mIsShutdown; // whether shutdown(true) was called, no barriers are needed
AudioTimestampSingleStateQueue::Shared mTimestampShared;
diff --git a/include/media/nbaio/MonoPipeReader.h b/include/media/nbaio/MonoPipeReader.h
index 78fe867..4a7c3c5 100644
--- a/include/media/nbaio/MonoPipeReader.h
+++ b/include/media/nbaio/MonoPipeReader.h
@@ -47,7 +47,7 @@
virtual ssize_t availableToRead();
- virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
+ virtual ssize_t read(void *buffer, size_t count);
virtual void onTimestamp(const AudioTimestamp& timestamp);
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index d9bbc8d..2f7e291 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -79,8 +79,7 @@
// Callbacks used by NBAIO_Sink::writeVia() and NBAIO_Source::readVia() below.
typedef ssize_t (*writeVia_t)(void *user, void *buffer, size_t count);
-typedef ssize_t (*readVia_t)(void *user, const void *buffer,
- size_t count, int64_t readPTS);
+typedef ssize_t (*readVia_t)(void *user, const void *buffer, size_t count);
// Check whether an NBAIO_Format is valid
bool Format_isValid(const NBAIO_Format& format);
@@ -210,21 +209,6 @@
// < 0 status_t error occurred prior to the first frame transfer during this callback.
virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block = 0);
- // Get the time (on the LocalTime timeline) at which the first frame of audio of the next write
- // operation to this sink will be eventually rendered by the HAL.
- // Inputs:
- // ts A pointer pointing to the int64_t which will hold the result.
- // Return value:
- // OK Everything went well, *ts holds the time at which the first audio frame of the next
- // write operation will be rendered, or AudioBufferProvider::kInvalidPTS if this sink
- // does not know the answer for some reason. Sinks which eventually lead to a HAL
- // which implements get_next_write_timestamp may return Invalid temporarily if the DMA
- // output of the audio driver has not started yet. Sinks which lead to a HAL which
- // does not implement get_next_write_timestamp, or which don't lead to a HAL at all,
- // will always return kInvalidPTS.
- // <other> Something unexpected happened internally. Check the logs and start debugging.
- virtual status_t getNextWriteTimestamp(int64_t *ts) { return INVALID_OPERATION; }
-
// Returns NO_ERROR if a timestamp is available. The timestamp includes the total number
// of frames presented to an external observer, together with the value of CLOCK_MONOTONIC
// as of this presentation count. The timestamp parameter is undefined if error is returned.
@@ -271,8 +255,6 @@
// Inputs:
// buffer Non-NULL destination buffer owned by consumer.
// count Maximum number of frames to transfer.
- // readPTS The presentation time (on the LocalTime timeline) for which data
- // is being requested, or kInvalidPTS if not known.
// Return value:
// > 0 Number of frames successfully transferred prior to first error.
// = 0 Count was zero.
@@ -282,7 +264,7 @@
// WOULD_BLOCK No frames can be transferred without blocking.
// OVERRUN read() has not been called frequently enough, or with enough frames to keep up.
// One or more frames were lost due to overrun, try again to read more recent data.
- virtual ssize_t read(void *buffer, size_t count, int64_t readPTS) = 0;
+ virtual ssize_t read(void *buffer, size_t count) = 0;
// Transfer data from source using a series of callbacks. More suitable for zero-fill,
// synthesis, and non-contiguous transfers (e.g. circular buffer or readv).
@@ -291,8 +273,6 @@
// total Estimate of the number of frames the consumer desires. This is an estimate,
// and it can consume a different number of frames during the series of callbacks.
// user Arbitrary void * reserved for data consumer.
- // readPTS The presentation time (on the LocalTime timeline) for which data
- // is being requested, or kInvalidPTS if not known.
// block Number of frames per block, that is a suggested value for 'count' in each callback.
// Zero means no preference. This parameter is a hint only, and may be ignored.
// Return value:
@@ -315,8 +295,7 @@
// > 0 Number of frames successfully transferred during this callback prior to first error.
// = 0 Count was zero.
// < 0 status_t error occurred prior to the first frame transfer during this callback.
- virtual ssize_t readVia(readVia_t via, size_t total, void *user,
- int64_t readPTS, size_t block = 0);
+ virtual ssize_t readVia(readVia_t via, size_t total, void *user, size_t block = 0);
// Invoked asynchronously by corresponding sink when a new timestamp is available.
// Default implementation ignores the timestamp.
diff --git a/include/media/nbaio/PipeReader.h b/include/media/nbaio/PipeReader.h
index 350e6ab..398353b 100644
--- a/include/media/nbaio/PipeReader.h
+++ b/include/media/nbaio/PipeReader.h
@@ -45,7 +45,7 @@
virtual ssize_t availableToRead();
- virtual ssize_t read(void *buffer, size_t count, int64_t readPTS);
+ virtual ssize_t read(void *buffer, size_t count);
// NBAIO_Source end
diff --git a/include/media/nbaio/SourceAudioBufferProvider.h b/include/media/nbaio/SourceAudioBufferProvider.h
index daf6bc3..29172e1 100644
--- a/include/media/nbaio/SourceAudioBufferProvider.h
+++ b/include/media/nbaio/SourceAudioBufferProvider.h
@@ -31,7 +31,7 @@
virtual ~SourceAudioBufferProvider();
// AudioBufferProvider interface
- virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
+ virtual status_t getNextBuffer(Buffer *buffer);
virtual void releaseBuffer(Buffer *buffer);
// ExtendedAudioBufferProvider interface
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index a13d53a..e17e47e 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -163,7 +163,6 @@
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
- mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -193,7 +192,6 @@
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
- mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -223,7 +221,6 @@
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
- mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -750,7 +747,7 @@
if (rate == mSampleRate) {
return NO_ERROR;
}
- if (mIsTimed || isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
+ if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
return INVALID_OPERATION;
}
if (mOutput == AUDIO_IO_HANDLE_NONE) {
@@ -777,10 +774,6 @@
uint32_t AudioTrack::getSampleRate() const
{
- if (mIsTimed) {
- return 0;
- }
-
AutoMutex lock(mLock);
// sample rate can be updated during playback by the offloaded decoder so we need to
@@ -800,10 +793,6 @@
uint32_t AudioTrack::getOriginalSampleRate() const
{
- if (mIsTimed) {
- return 0;
- }
-
return mOriginalSampleRate;
}
@@ -813,7 +802,7 @@
if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
return NO_ERROR;
}
- if (mIsTimed || isOffloadedOrDirect_l()) {
+ if (isOffloadedOrDirect_l()) {
return INVALID_OPERATION;
}
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
@@ -877,7 +866,7 @@
return NO_INIT;
}
// Reject if timed track or compressed audio.
- if (mIsTimed || !audio_is_linear_pcm(mFormat)) {
+ if (!audio_is_linear_pcm(mFormat)) {
return INVALID_OPERATION;
}
// TODO also need to inform the server side (through mAudioTrack) that
@@ -888,7 +877,7 @@
status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
{
- if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
+ if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
return INVALID_OPERATION;
}
@@ -991,7 +980,7 @@
status_t AudioTrack::setPosition(uint32_t position)
{
- if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
+ if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
return INVALID_OPERATION;
}
if (position > mFrameCount) {
@@ -1056,7 +1045,7 @@
status_t AudioTrack::getBufferPosition(uint32_t *position)
{
- if (mSharedBuffer == 0 || mIsTimed) {
+ if (mSharedBuffer == 0) {
return INVALID_OPERATION;
}
if (position == NULL) {
@@ -1070,7 +1059,7 @@
status_t AudioTrack::reload()
{
- if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
+ if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
return INVALID_OPERATION;
}
@@ -1199,8 +1188,7 @@
mSampleRate = mAfSampleRate;
mOriginalSampleRate = mAfSampleRate;
}
- // Client decides whether the track is TIMED (see below), but can only express a preference
- // for FAST. Server will perform additional tests.
+ // Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
bool useCaseAllowed =
// either of these use cases:
@@ -1284,9 +1272,6 @@
}
IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
- if (mIsTimed) {
- trackFlags |= IAudioFlinger::TRACK_TIMED;
- }
pid_t tid = -1;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
@@ -1626,7 +1611,7 @@
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
{
- if (mTransfer != TRANSFER_SYNC || mIsTimed) {
+ if (mTransfer != TRANSFER_SYNC) {
return INVALID_OPERATION;
}
@@ -1676,73 +1661,6 @@
// -------------------------------------------------------------------------
-TimedAudioTrack::TimedAudioTrack() {
- mIsTimed = true;
-}
-
-status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
-{
- AutoMutex lock(mLock);
- status_t result = UNKNOWN_ERROR;
-
-#if 1
- // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
- // while we are accessing the cblk
- sp<IAudioTrack> audioTrack = mAudioTrack;
- sp<IMemory> iMem = mCblkMemory;
-#endif
-
- // If the track is not invalid already, try to allocate a buffer. alloc
- // fails indicating that the server is dead, flag the track as invalid so
- // we can attempt to restore in just a bit.
- audio_track_cblk_t* cblk = mCblk;
- if (!(cblk->mFlags & CBLK_INVALID)) {
- result = mAudioTrack->allocateTimedBuffer(size, buffer);
- if (result == DEAD_OBJECT) {
- android_atomic_or(CBLK_INVALID, &cblk->mFlags);
- }
- }
-
- // If the track is invalid at this point, attempt to restore it. and try the
- // allocation one more time.
- if (cblk->mFlags & CBLK_INVALID) {
- result = restoreTrack_l("allocateTimedBuffer");
-
- if (result == NO_ERROR) {
- result = mAudioTrack->allocateTimedBuffer(size, buffer);
- }
- }
-
- return result;
-}
-
-status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts)
-{
- status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
- {
- AutoMutex lock(mLock);
- audio_track_cblk_t* cblk = mCblk;
- // restart track if it was disabled by audioflinger due to previous underrun
- if (buffer->size() != 0 && status == NO_ERROR &&
- (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
- android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
- ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
- // FIXME ignoring status
- mAudioTrack->start();
- }
- }
- return status;
-}
-
-status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
- TargetTimeline target)
-{
- return mAudioTrack->setMediaTimeTransform(xform, target);
-}
-
-// -------------------------------------------------------------------------
-
nsecs_t AudioTrack::processAudioBuffer()
{
// Currently the AudioTrack thread is not created if there are no callbacks.
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index 651cb61..636e3bb 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -36,9 +36,6 @@
RESERVED, // was MUTE
PAUSE,
ATTACH_AUX_EFFECT,
- ALLOCATE_TIMED_BUFFER,
- QUEUE_TIMED_BUFFER,
- SET_MEDIA_TIME_TRANSFORM,
SET_PARAMETERS,
GET_TIMESTAMP,
SIGNAL,
@@ -115,55 +112,6 @@
return status;
}
- virtual status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer) {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
- data.writeInt64(size);
- status_t status = remote()->transact(ALLOCATE_TIMED_BUFFER,
- data, &reply);
- if (status == NO_ERROR) {
- status = reply.readInt32();
- if (status == NO_ERROR) {
- *buffer = interface_cast<IMemory>(reply.readStrongBinder());
- if (*buffer != 0 && (*buffer)->pointer() == NULL) {
- (*buffer).clear();
- }
- }
- }
- return status;
- }
-
- virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts) {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(buffer));
- data.writeInt64(pts);
- status_t status = remote()->transact(QUEUE_TIMED_BUFFER,
- data, &reply);
- if (status == NO_ERROR) {
- status = reply.readInt32();
- }
- return status;
- }
-
- virtual status_t setMediaTimeTransform(const LinearTransform& xform,
- int target) {
- Parcel data, reply;
- data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
- data.writeInt64(xform.a_zero);
- data.writeInt64(xform.b_zero);
- data.writeInt32(xform.a_to_b_numer);
- data.writeInt32(xform.a_to_b_denom);
- data.writeInt32(target);
- status_t status = remote()->transact(SET_MEDIA_TIME_TRANSFORM,
- data, &reply);
- if (status == NO_ERROR) {
- status = reply.readInt32();
- }
- return status;
- }
-
virtual status_t setParameters(const String8& keyValuePairs) {
Parcel data, reply;
data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
@@ -235,35 +183,6 @@
reply->writeInt32(attachAuxEffect(data.readInt32()));
return NO_ERROR;
} break;
- case ALLOCATE_TIMED_BUFFER: {
- CHECK_INTERFACE(IAudioTrack, data, reply);
- sp<IMemory> buffer;
- status_t status = allocateTimedBuffer(data.readInt64(), &buffer);
- reply->writeInt32(status);
- if (status == NO_ERROR) {
- reply->writeStrongBinder(IInterface::asBinder(buffer));
- }
- return NO_ERROR;
- } break;
- case QUEUE_TIMED_BUFFER: {
- CHECK_INTERFACE(IAudioTrack, data, reply);
- sp<IMemory> buffer = interface_cast<IMemory>(
- data.readStrongBinder());
- uint64_t pts = data.readInt64();
- reply->writeInt32(queueTimedBuffer(buffer, pts));
- return NO_ERROR;
- } break;
- case SET_MEDIA_TIME_TRANSFORM: {
- CHECK_INTERFACE(IAudioTrack, data, reply);
- LinearTransform xform;
- xform.a_zero = data.readInt64();
- xform.b_zero = data.readInt64();
- xform.a_to_b_numer = data.readInt32();
- xform.a_to_b_denom = data.readInt32();
- int target = data.readInt32();
- reply->writeInt32(setMediaTimeTransform(xform, target));
- return NO_ERROR;
- } break;
case SET_PARAMETERS: {
CHECK_INTERFACE(IAudioTrack, data, reply);
String8 keyValuePairs(data.readString8());
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 1353f28..16c5040 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -28,7 +28,6 @@
LOCAL_SHARED_LIBRARIES := \
libaudioutils \
libbinder \
- libcommon_time_client \
libcutils \
libutils \
liblog
diff --git a/media/libnbaio/AudioBufferProviderSource.cpp b/media/libnbaio/AudioBufferProviderSource.cpp
index 551f516..cba8b59 100644
--- a/media/libnbaio/AudioBufferProviderSource.cpp
+++ b/media/libnbaio/AudioBufferProviderSource.cpp
@@ -46,16 +46,14 @@
return mBuffer.raw != NULL ? mBuffer.frameCount - mConsumed : 0;
}
-ssize_t AudioBufferProviderSource::read(void *buffer,
- size_t count,
- int64_t readPTS)
+ssize_t AudioBufferProviderSource::read(void *buffer, size_t count)
{
if (CC_UNLIKELY(!mNegotiated)) {
return NEGOTIATE;
}
if (CC_UNLIKELY(mBuffer.raw == NULL)) {
mBuffer.frameCount = count;
- status_t status = mProvider->getNextBuffer(&mBuffer, readPTS);
+ status_t status = mProvider->getNextBuffer(&mBuffer);
if (status != OK) {
return status == NOT_ENOUGH_DATA ? (ssize_t) WOULD_BLOCK : (ssize_t) status;
}
@@ -81,8 +79,7 @@
return count;
}
-ssize_t AudioBufferProviderSource::readVia(readVia_t via, size_t total, void *user,
- int64_t readPTS, size_t block)
+ssize_t AudioBufferProviderSource::readVia(readVia_t via, size_t total, void *user, size_t block)
{
if (CC_UNLIKELY(!mNegotiated)) {
return NEGOTIATE;
@@ -102,7 +99,7 @@
// 1 <= count <= block
if (CC_UNLIKELY(mBuffer.raw == NULL)) {
mBuffer.frameCount = count;
- status_t status = mProvider->getNextBuffer(&mBuffer, readPTS);
+ status_t status = mProvider->getNextBuffer(&mBuffer);
if (CC_LIKELY(status == OK)) {
ALOG_ASSERT(mBuffer.raw != NULL && mBuffer.frameCount <= count);
// mConsumed is 0 either from constructor or after releaseBuffer()
@@ -120,8 +117,8 @@
count = available;
}
if (CC_LIKELY(count > 0)) {
- char* readTgt = (char *) mBuffer.raw + (mConsumed * mFrameSize);
- ssize_t ret = via(user, readTgt, count, readPTS);
+ ssize_t ret = via(user, (char *) mBuffer.raw + (mConsumed * mFrameSize), count);
+
if (CC_UNLIKELY(ret <= 0)) {
if (CC_LIKELY(accumulator > 0)) {
return accumulator;
diff --git a/media/libnbaio/AudioStreamInSource.cpp b/media/libnbaio/AudioStreamInSource.cpp
index 6aab48a..286e0eb 100644
--- a/media/libnbaio/AudioStreamInSource.cpp
+++ b/media/libnbaio/AudioStreamInSource.cpp
@@ -64,7 +64,7 @@
return mFramesOverrun;
}
-ssize_t AudioStreamInSource::read(void *buffer, size_t count, int64_t readPTS __unused)
+ssize_t AudioStreamInSource::read(void *buffer, size_t count)
{
if (CC_UNLIKELY(!Format_isValid(mFormat))) {
return NEGOTIATE;
diff --git a/media/libnbaio/AudioStreamOutSink.cpp b/media/libnbaio/AudioStreamOutSink.cpp
index 0d5f935..3f4e0bb 100644
--- a/media/libnbaio/AudioStreamOutSink.cpp
+++ b/media/libnbaio/AudioStreamOutSink.cpp
@@ -66,18 +66,6 @@
return ret;
}
-status_t AudioStreamOutSink::getNextWriteTimestamp(int64_t *timestamp) {
- ALOG_ASSERT(timestamp != NULL);
-
- if (NULL == mStream)
- return INVALID_OPERATION;
-
- if (NULL == mStream->get_next_write_timestamp)
- return INVALID_OPERATION;
-
- return mStream->get_next_write_timestamp(mStream, timestamp);
-}
-
status_t AudioStreamOutSink::getTimestamp(AudioTimestamp& timestamp)
{
if (mStream->get_presentation_position == NULL) {
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 129e9ef..aef9834 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -19,10 +19,8 @@
#define LOG_TAG "MonoPipe"
//#define LOG_NDEBUG 0
-#include <common_time/cc_helper.h>
#include <cutils/atomic.h>
#include <cutils/compiler.h>
-#include <utils/LinearTransform.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <media/AudioBufferProvider.h>
@@ -32,26 +30,8 @@
namespace android {
-static uint64_t cacheN; // output of CCHelper::getLocalFreq()
-static bool cacheValid; // whether cacheN is valid
-static pthread_once_t cacheOnceControl = PTHREAD_ONCE_INIT;
-
-static void cacheOnceInit()
-{
- CCHelper tmpHelper;
- status_t res;
- if (OK != (res = tmpHelper.getLocalFreq(&cacheN))) {
- ALOGE("Failed to fetch local time frequency when constructing a"
- " MonoPipe (res = %d). getNextWriteTimestamp calls will be"
- " non-functional", res);
- return;
- }
- cacheValid = true;
-}
-
MonoPipe::MonoPipe(size_t reqFrames, const NBAIO_Format& format, bool writeCanBlock) :
NBAIO_Sink(format),
- mUpdateSeq(0),
mReqFrames(reqFrames),
mMaxFrames(roundup(reqFrames)),
mBuffer(malloc(mMaxFrames * Format_frameSize(format))),
@@ -66,36 +46,6 @@
mTimestampMutator(&mTimestampShared),
mTimestampObserver(&mTimestampShared)
{
- uint64_t N, D;
-
- mNextRdPTS = AudioBufferProvider::kInvalidPTS;
-
- mSamplesToLocalTime.a_zero = 0;
- mSamplesToLocalTime.b_zero = 0;
- mSamplesToLocalTime.a_to_b_numer = 0;
- mSamplesToLocalTime.a_to_b_denom = 0;
-
- D = Format_sampleRate(format);
-
- (void) pthread_once(&cacheOnceControl, cacheOnceInit);
- if (!cacheValid) {
- // log has already been done
- return;
- }
- N = cacheN;
-
- LinearTransform::reduce(&N, &D);
- static const uint64_t kSignedHiBitsMask = ~(0x7FFFFFFFull);
- static const uint64_t kUnsignedHiBitsMask = ~(0xFFFFFFFFull);
- if ((N & kSignedHiBitsMask) || (D & kUnsignedHiBitsMask)) {
- ALOGE("Cannot reduce sample rate to local clock frequency ratio to fit"
- " in a 32/32 bit rational. (max reduction is 0x%016" PRIx64 "/0x%016" PRIx64
- "). getNextWriteTimestamp calls will be non-functional", N, D);
- return;
- }
-
- mSamplesToLocalTime.a_to_b_numer = static_cast<int32_t>(N);
- mSamplesToLocalTime.a_to_b_denom = static_cast<uint32_t>(D);
}
MonoPipe::~MonoPipe()
@@ -223,104 +173,6 @@
mSetpoint = setpoint;
}
-status_t MonoPipe::getNextWriteTimestamp(int64_t *timestamp)
-{
- int32_t front;
-
- ALOG_ASSERT(NULL != timestamp);
-
- if (0 == mSamplesToLocalTime.a_to_b_denom)
- return UNKNOWN_ERROR;
-
- observeFrontAndNRPTS(&front, timestamp);
-
- if (AudioBufferProvider::kInvalidPTS != *timestamp) {
- // If we have a valid read-pointer and next read timestamp pair, then
- // use the current value of the write pointer to figure out how many
- // frames are in the buffer, and offset the timestamp by that amt. Then
- // next time we write to the MonoPipe, the data will hit the speakers at
- // the next read timestamp plus the current amount of data in the
- // MonoPipe.
- size_t pendingFrames = (mRear - front) & (mMaxFrames - 1);
- *timestamp = offsetTimestampByAudioFrames(*timestamp, pendingFrames);
- }
-
- return OK;
-}
-
-void MonoPipe::updateFrontAndNRPTS(int32_t newFront, int64_t newNextRdPTS)
-{
- // Set the MSB of the update sequence number to indicate that there is a
- // multi-variable update in progress. Use an atomic store with an "acquire"
- // barrier to make sure that the next operations cannot be re-ordered and
- // take place before the change to mUpdateSeq is commited..
- int32_t tmp = mUpdateSeq | 0x80000000;
- android_atomic_acquire_store(tmp, &mUpdateSeq);
-
- // Update mFront and mNextRdPTS
- mFront = newFront;
- mNextRdPTS = newNextRdPTS;
-
- // We are finished with the update. Compute the next sequnce number (which
- // should be the old sequence number, plus one, and with the MSB cleared)
- // and then store it in mUpdateSeq using an atomic store with a "release"
- // barrier so our update operations cannot be re-ordered past the update of
- // the sequence number.
- tmp = (tmp + 1) & 0x7FFFFFFF;
- android_atomic_release_store(tmp, &mUpdateSeq);
-}
-
-void MonoPipe::observeFrontAndNRPTS(int32_t *outFront, int64_t *outNextRdPTS)
-{
- // Perform an atomic observation of mFront and mNextRdPTS. Basically,
- // atomically observe the sequence number, then observer the variables, then
- // atomically observe the sequence number again. If the two observations of
- // the sequence number match, and the update-in-progress bit was not set,
- // then we know we have a successful atomic observation. Otherwise, we loop
- // around and try again.
- //
- // Note, it is very important that the observer be a lower priority thread
- // than the updater. If the updater is lower than the observer, or they are
- // the same priority and running with SCHED_FIFO (implying that quantum
- // based premption is disabled) then we run the risk of deadlock.
- int32_t seqOne, seqTwo;
-
- do {
- seqOne = android_atomic_acquire_load(&mUpdateSeq);
- *outFront = mFront;
- *outNextRdPTS = mNextRdPTS;
- seqTwo = android_atomic_release_load(&mUpdateSeq);
- } while ((seqOne != seqTwo) || (seqOne & 0x80000000));
-}
-
-int64_t MonoPipe::offsetTimestampByAudioFrames(int64_t ts, size_t audFrames)
-{
- if (0 == mSamplesToLocalTime.a_to_b_denom)
- return AudioBufferProvider::kInvalidPTS;
-
- if (ts == AudioBufferProvider::kInvalidPTS)
- return AudioBufferProvider::kInvalidPTS;
-
- int64_t frame_lt_duration;
- if (!mSamplesToLocalTime.doForwardTransform(audFrames,
- &frame_lt_duration)) {
- // This should never fail, but if there is a bug which is causing it
- // to fail, this message would probably end up flooding the logs
- // because the conversion would probably fail forever. Log the
- // error, but then zero out the ratio in the linear transform so
- // that we don't try to do any conversions from now on. This
- // MonoPipe's getNextWriteTimestamp is now broken for good.
- ALOGE("Overflow when attempting to convert %zu audio frames to"
- " duration in local time. getNextWriteTimestamp will fail from"
- " now on.", audFrames);
- mSamplesToLocalTime.a_to_b_numer = 0;
- mSamplesToLocalTime.a_to_b_denom = 0;
- return AudioBufferProvider::kInvalidPTS;
- }
-
- return ts + frame_lt_duration;
-}
-
void MonoPipe::shutdown(bool newState)
{
mIsShutdown = newState;
diff --git a/media/libnbaio/MonoPipeReader.cpp b/media/libnbaio/MonoPipeReader.cpp
index e4d3ed8..7e09544 100644
--- a/media/libnbaio/MonoPipeReader.cpp
+++ b/media/libnbaio/MonoPipeReader.cpp
@@ -43,25 +43,11 @@
return ret;
}
-ssize_t MonoPipeReader::read(void *buffer, size_t count, int64_t readPTS)
+ssize_t MonoPipeReader::read(void *buffer, size_t count)
{
- // Compute the "next read PTS" and cache it. Callers of read pass a read
- // PTS indicating the local time for which they are requesting data along
- // with a count (which is the number of audio frames they are going to
- // ultimately pass to the next stage of the pipeline). Offsetting readPTS
- // by the duration of count will give us the readPTS which will be passed to
- // us next time, assuming they system continues to operate in steady state
- // with no discontinuities. We stash this value so it can be used by the
- // MonoPipe writer to imlement getNextWriteTimestamp.
- int64_t nextReadPTS;
- nextReadPTS = mPipe->offsetTimestampByAudioFrames(readPTS, count);
-
// count == 0 is unlikely and not worth checking for explicitly; will be handled automatically
ssize_t red = availableToRead();
if (CC_UNLIKELY(red <= 0)) {
- // Uh-oh, looks like we are underflowing. Update the next read PTS and
- // get out.
- mPipe->updateFrontAndNRPTS(mPipe->mFront, nextReadPTS);
return red;
}
if (CC_LIKELY((size_t) red > count)) {
@@ -80,7 +66,7 @@
memcpy((char *) buffer + (part1 * mFrameSize), mPipe->mBuffer, part2 * mFrameSize);
}
}
- mPipe->updateFrontAndNRPTS(red + mPipe->mFront, nextReadPTS);
+ android_atomic_release_store(red + mPipe->mFront, &mPipe->mFront);
mFramesRead += red;
}
return red;
diff --git a/media/libnbaio/NBAIO.cpp b/media/libnbaio/NBAIO.cpp
index d641e74..1cb4410 100644
--- a/media/libnbaio/NBAIO.cpp
+++ b/media/libnbaio/NBAIO.cpp
@@ -97,8 +97,7 @@
}
// This is a default implementation; it is expected that subclasses will optimize this.
-ssize_t NBAIO_Source::readVia(readVia_t via, size_t total, void *user,
- int64_t readPTS, size_t block)
+ssize_t NBAIO_Source::readVia(readVia_t via, size_t total, void *user, size_t block)
{
if (!mNegotiated) {
return (ssize_t) NEGOTIATE;
@@ -117,11 +116,11 @@
if (count > block) {
count = block;
}
- ssize_t ret = read(buffer, count, readPTS);
+ ssize_t ret = read(buffer, count);
if (ret > 0) {
ALOG_ASSERT((size_t) ret <= count);
size_t maxRet = ret;
- ret = via(user, buffer, maxRet, readPTS);
+ ret = via(user, buffer, maxRet);
if (ret > 0) {
ALOG_ASSERT((size_t) ret <= maxRet);
accumulator += ret;
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index c8e4953..b096903 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -59,7 +59,7 @@
return avail;
}
-ssize_t PipeReader::read(void *buffer, size_t count, int64_t readPTS __unused)
+ssize_t PipeReader::read(void *buffer, size_t count)
{
ssize_t avail = availableToRead();
if (CC_UNLIKELY(avail <= 0)) {
diff --git a/media/libnbaio/SourceAudioBufferProvider.cpp b/media/libnbaio/SourceAudioBufferProvider.cpp
index 04c42c9..dc01c0e 100644
--- a/media/libnbaio/SourceAudioBufferProvider.cpp
+++ b/media/libnbaio/SourceAudioBufferProvider.cpp
@@ -45,7 +45,7 @@
free(mAllocated);
}
-status_t SourceAudioBufferProvider::getNextBuffer(Buffer *buffer, int64_t pts)
+status_t SourceAudioBufferProvider::getNextBuffer(Buffer *buffer)
{
ALOG_ASSERT(buffer != NULL && buffer->frameCount > 0 && mGetCount == 0);
// any leftover data available?
@@ -73,7 +73,7 @@
}
{
// read from source
- ssize_t actual = mSource->read(mAllocated, buffer->frameCount, pts);
+ ssize_t actual = mSource->read(mAllocated, buffer->frameCount);
if (actual > 0) {
ALOG_ASSERT((size_t) actual <= buffer->frameCount);
mOffset = 0;
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 87f9aaa..6e3eb83 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -41,7 +41,6 @@
libaudioresampler \
libaudiospdif \
libaudioutils \
- libcommon_time_client \
libcutils \
libutils \
liblog \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index d2786b9..f4bd1c4 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -56,8 +56,6 @@
#include <powermanager/PowerManager.h>
-#include <common_time/cc_helper.h>
-
#include <media/IMediaLogService.h>
#include <media/nbaio/Pipe.h>
@@ -1359,8 +1357,7 @@
AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
: RefBase(),
mAudioFlinger(audioFlinger),
- mPid(pid),
- mTimedTrackCount(0)
+ mPid(pid)
{
size_t heapSize = kClientSharedHeapSizeBytes;
// Increase heap size on non low ram devices to limit risk of reconnection failure for
@@ -1382,31 +1379,6 @@
return mMemoryDealer;
}
-// Reserve one of the limited slots for a timed audio track associated
-// with this client
-bool AudioFlinger::Client::reserveTimedTrack()
-{
- const int kMaxTimedTracksPerClient = 4;
-
- Mutex::Autolock _l(mTimedTrackLock);
-
- if (mTimedTrackCount >= kMaxTimedTracksPerClient) {
- ALOGW("can not create timed track - pid %d has exceeded the limit",
- mPid);
- return false;
- }
-
- mTimedTrackCount++;
- return true;
-}
-
-// Release a slot for a timed audio track
-void AudioFlinger::Client::releaseTimedTrack()
-{
- Mutex::Autolock _l(mTimedTrackLock);
- mTimedTrackCount--;
-}
-
// ----------------------------------------------------------------------------
AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
@@ -2979,8 +2951,7 @@
void *buffer = malloc(TEE_SINK_READ * frameSize);
for (;;) {
size_t count = TEE_SINK_READ;
- ssize_t actual = teeSource->read(buffer, count,
- AudioBufferProvider::kInvalidPTS);
+ ssize_t actual = teeSource->read(buffer, count);
bool wasFirstRead = firstRead;
firstRead = false;
if (actual <= 0) {
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 2571e67..62a3115 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -23,8 +23,6 @@
#include <sys/types.h>
#include <limits.h>
-#include <common_time/cc_helper.h>
-
#include <cutils/compiler.h>
#include <media/IAudioFlinger.h>
@@ -414,18 +412,12 @@
pid_t pid() const { return mPid; }
sp<AudioFlinger> audioFlinger() const { return mAudioFlinger; }
- bool reserveTimedTrack();
- void releaseTimedTrack();
-
private:
Client(const Client&);
Client& operator = (const Client&);
const sp<AudioFlinger> mAudioFlinger;
sp<MemoryDealer> mMemoryDealer;
const pid_t mPid;
-
- Mutex mTimedTrackLock;
- int mTimedTrackCount;
};
// --- Notification Client ---
@@ -496,12 +488,6 @@
virtual void flush();
virtual void pause();
virtual status_t attachAuxEffect(int effectId);
- virtual status_t allocateTimedBuffer(size_t size,
- sp<IMemory>* buffer);
- virtual status_t queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts);
- virtual status_t setMediaTimeTransform(const LinearTransform& xform,
- int target);
virtual status_t setParameters(const String8& keyValuePairs);
virtual status_t getTimestamp(AudioTimestamp& timestamp);
virtual void signal(); // signal playback thread for a change in control block
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 8a9a837..aea6b67 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -36,8 +36,6 @@
#include <audio_utils/primitives.h>
#include <audio_utils/format.h>
-#include <common_time/local_clock.h>
-#include <common_time/cc_helper.h>
#include "AudioMixerOps.h"
#include "AudioMixer.h"
@@ -786,7 +784,6 @@
mMixerInFormat,
resamplerChannelCount,
devSampleRate, quality);
- resampler->setLocalTimeFreq(sLocalTimeFreq);
}
return true;
}
@@ -906,13 +903,13 @@
}
-void AudioMixer::process(int64_t pts)
+void AudioMixer::process()
{
- mState.hook(&mState, pts);
+ mState.hook(&mState);
}
-void AudioMixer::process__validate(state_t* state, int64_t pts)
+void AudioMixer::process__validate(state_t* state)
{
ALOGW_IF(!state->needsChanged,
"in process__validate() but nothing's invalid");
@@ -1042,7 +1039,7 @@
countActiveTracks, state->enabledTracks,
all16BitsStereoNoResample, resampling, volumeRamp);
- state->hook(state, pts);
+ state->hook(state);
// Now that the volume ramp has been done, set optimal state and
// track hooks for subsequent mixer process
@@ -1367,7 +1364,7 @@
}
// no-op case
-void AudioMixer::process__nop(state_t* state, int64_t pts)
+void AudioMixer::process__nop(state_t* state)
{
ALOGVV("process__nop\n");
uint32_t e0 = state->enabledTracks;
@@ -1401,9 +1398,7 @@
size_t outFrames = state->frameCount;
while (outFrames) {
t3.buffer.frameCount = outFrames;
- int64_t outputPTS = calculateOutputPTS(
- t3, pts, state->frameCount - outFrames);
- t3.bufferProvider->getNextBuffer(&t3.buffer, outputPTS);
+ t3.bufferProvider->getNextBuffer(&t3.buffer);
if (t3.buffer.raw == NULL) break;
outFrames -= t3.buffer.frameCount;
t3.bufferProvider->releaseBuffer(&t3.buffer);
@@ -1414,7 +1409,7 @@
}
// generic code without resampling
-void AudioMixer::process__genericNoResampling(state_t* state, int64_t pts)
+void AudioMixer::process__genericNoResampling(state_t* state)
{
ALOGVV("process__genericNoResampling\n");
int32_t outTemp[BLOCKSIZE * MAX_NUM_CHANNELS] __attribute__((aligned(32)));
@@ -1427,7 +1422,7 @@
e0 &= ~(1<<i);
track_t& t = state->tracks[i];
t.buffer.frameCount = state->frameCount;
- t.bufferProvider->getNextBuffer(&t.buffer, pts);
+ t.bufferProvider->getNextBuffer(&t.buffer);
t.frameCount = t.buffer.frameCount;
t.in = t.buffer.raw;
}
@@ -1486,9 +1481,7 @@
t.bufferProvider->releaseBuffer(&t.buffer);
t.buffer.frameCount = (state->frameCount - numFrames) -
(BLOCKSIZE - outFrames);
- int64_t outputPTS = calculateOutputPTS(
- t, pts, numFrames + (BLOCKSIZE - outFrames));
- t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
+ t.bufferProvider->getNextBuffer(&t.buffer);
t.in = t.buffer.raw;
if (t.in == NULL) {
enabledTracks &= ~(1<<i);
@@ -1522,7 +1515,7 @@
// generic code with resampling
-void AudioMixer::process__genericResampling(state_t* state, int64_t pts)
+void AudioMixer::process__genericResampling(state_t* state)
{
ALOGVV("process__genericResampling\n");
// this const just means that local variable outTemp doesn't change
@@ -1561,7 +1554,6 @@
// acquire/release the buffers because it's done by
// the resampler.
if (t.needs & NEEDS_RESAMPLE) {
- t.resampler->setPTS(pts);
t.hook(&t, outTemp, numFrames, state->resampleTemp, aux);
} else {
@@ -1569,8 +1561,7 @@
while (outFrames < numFrames) {
t.buffer.frameCount = numFrames - outFrames;
- int64_t outputPTS = calculateOutputPTS(t, pts, outFrames);
- t.bufferProvider->getNextBuffer(&t.buffer, outputPTS);
+ t.bufferProvider->getNextBuffer(&t.buffer);
t.in = t.buffer.raw;
// t.in == NULL can happen if the track was flushed just after having
// been enabled for mixing.
@@ -1592,8 +1583,7 @@
}
// one track, 16 bits stereo without resampling is the most common case
-void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state,
- int64_t pts)
+void AudioMixer::process__OneTrack16BitsStereoNoResampling(state_t* state)
{
ALOGVV("process__OneTrack16BitsStereoNoResampling\n");
// This method is only called when state->enabledTracks has exactly
@@ -1615,8 +1605,7 @@
const uint32_t vrl = t.volumeRL;
while (numFrames) {
b.frameCount = numFrames;
- int64_t outputPTS = calculateOutputPTS(t, pts, out - t.mainBuffer);
- t.bufferProvider->getNextBuffer(&b, outputPTS);
+ t.bufferProvider->getNextBuffer(&b);
const int16_t *in = b.i16;
// in == NULL can happen if the track was flushed just after having
@@ -1677,24 +1666,10 @@
}
}
-int64_t AudioMixer::calculateOutputPTS(const track_t& t, int64_t basePTS,
- int outputFrameIndex)
-{
- if (AudioBufferProvider::kInvalidPTS == basePTS) {
- return AudioBufferProvider::kInvalidPTS;
- }
-
- return basePTS + ((outputFrameIndex * sLocalTimeFreq) / t.sampleRate);
-}
-
-/*static*/ uint64_t AudioMixer::sLocalTimeFreq;
/*static*/ pthread_once_t AudioMixer::sOnceControl = PTHREAD_ONCE_INIT;
/*static*/ void AudioMixer::sInitRoutine()
{
- LocalClock lc;
- sLocalTimeFreq = lc.getLocalFreq(); // for the resampler
-
DownmixerBufferProvider::init(); // for the downmixer
}
@@ -1836,7 +1811,7 @@
* TA: int32_t (Q4.27)
*/
template <int MIXTYPE, typename TO, typename TI, typename TA>
-void AudioMixer::process_NoResampleOneTrack(state_t* state, int64_t pts)
+void AudioMixer::process_NoResampleOneTrack(state_t* state)
{
ALOGVV("process_NoResampleOneTrack\n");
// CLZ is faster than CTZ on ARM, though really not sure if true after 31 - clz.
@@ -1852,8 +1827,7 @@
AudioBufferProvider::Buffer& b(t->buffer);
// get input buffer
b.frameCount = numFrames;
- const int64_t outputPTS = calculateOutputPTS(*t, pts, state->frameCount - numFrames);
- t->bufferProvider->getNextBuffer(&b, outputPTS);
+ t->bufferProvider->getNextBuffer(&b);
const TI *in = reinterpret_cast<TI*>(b.raw);
// in == NULL can happen if the track was flushed just after having
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index 7165c6c..e788ac3 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -126,7 +126,7 @@
void setParameter(int name, int target, int param, void *value);
void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
- void process(int64_t pts);
+ void process();
uint32_t trackNames() const { return mTrackNames; }
@@ -278,7 +278,7 @@
void reconfigureBufferProviders();
};
- typedef void (*process_hook_t)(state_t* state, int64_t pts);
+ typedef void (*process_hook_t)(state_t* state);
// pad to 32-bytes to fill cache line
struct state_t {
@@ -328,17 +328,12 @@
static void volumeStereo(track_t* t, int32_t* out, size_t frameCount, int32_t* temp,
int32_t* aux);
- static void process__validate(state_t* state, int64_t pts);
- static void process__nop(state_t* state, int64_t pts);
- static void process__genericNoResampling(state_t* state, int64_t pts);
- static void process__genericResampling(state_t* state, int64_t pts);
- static void process__OneTrack16BitsStereoNoResampling(state_t* state,
- int64_t pts);
+ static void process__validate(state_t* state);
+ static void process__nop(state_t* state);
+ static void process__genericNoResampling(state_t* state);
+ static void process__genericResampling(state_t* state);
+ static void process__OneTrack16BitsStereoNoResampling(state_t* state);
- static int64_t calculateOutputPTS(const track_t& t, int64_t basePTS,
- int outputFrameIndex);
-
- static uint64_t sLocalTimeFreq;
static pthread_once_t sOnceControl;
static void sInitRoutine();
@@ -359,7 +354,7 @@
// multi-format process hooks
template <int MIXTYPE, typename TO, typename TI, typename TA>
- static void process_NoResampleOneTrack(state_t* state, int64_t pts);
+ static void process_NoResampleOneTrack(state_t* state);
// multi-format track hooks
template <int MIXTYPE, typename TO, typename TI, typename TA>
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index e49b7b1..7c20478 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -261,8 +261,8 @@
int32_t sampleRate, src_quality quality) :
mChannelCount(inChannelCount),
mSampleRate(sampleRate), mInSampleRate(sampleRate), mInputIndex(0),
- mPhaseFraction(0), mLocalTimeFreq(0),
- mPTS(AudioBufferProvider::kInvalidPTS), mQuality(quality) {
+ mPhaseFraction(0),
+ mQuality(quality) {
const int maxChannels = quality < DYN_LOW_QUALITY ? 2 : 8;
if (inChannelCount < 1
@@ -304,23 +304,6 @@
mVolume[1] = u4_12_from_float(clampFloatVol(right));
}
-void AudioResampler::setLocalTimeFreq(uint64_t freq) {
- mLocalTimeFreq = freq;
-}
-
-void AudioResampler::setPTS(int64_t pts) {
- mPTS = pts;
-}
-
-int64_t AudioResampler::calculateOutputPTS(int outputFrameIndex) {
-
- if (mPTS == AudioBufferProvider::kInvalidPTS) {
- return AudioBufferProvider::kInvalidPTS;
- } else {
- return mPTS + ((outputFrameIndex * mLocalTimeFreq) / mSampleRate);
- }
-}
-
void AudioResampler::reset() {
mInputIndex = 0;
mPhaseFraction = 0;
@@ -368,8 +351,7 @@
// buffer is empty, fetch a new one
while (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer,
- calculateOutputPTS(outputIndex / 2));
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
goto resampleStereo16_exit;
}
@@ -465,8 +447,7 @@
// buffer is empty, fetch a new one
while (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer,
- calculateOutputPTS(outputIndex / 2));
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
mInputIndex = inputIndex;
mPhaseFraction = phaseFraction;
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index a8e3e6f..c4627e8 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -59,10 +59,6 @@
virtual void init() = 0;
virtual void setSampleRate(int32_t inSampleRate);
virtual void setVolume(float left, float right);
- virtual void setLocalTimeFreq(uint64_t freq);
-
- // set the PTS of the next buffer output by the resampler
- virtual void setPTS(int64_t pts);
// Resample int16_t samples from provider and accumulate into 'out'.
// A mono provider delivers a sequence of samples.
@@ -103,8 +99,6 @@
AudioResampler(const AudioResampler&);
AudioResampler& operator=(const AudioResampler&);
- int64_t calculateOutputPTS(int outputFrameIndex);
-
const int32_t mChannelCount;
const int32_t mSampleRate;
int32_t mInSampleRate;
@@ -117,8 +111,6 @@
size_t mInputIndex;
int32_t mPhaseIncrement;
uint32_t mPhaseFraction;
- uint64_t mLocalTimeFreq;
- int64_t mPTS;
// returns the inFrameCount required to generate outFrameCount frames.
//
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 172c2a5..6a324ad 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -66,7 +66,7 @@
// fetch first buffer
if (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer, mPTS);
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
return 0;
}
@@ -97,8 +97,7 @@
inputIndex = 0;
provider->releaseBuffer(&mBuffer);
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer,
- calculateOutputPTS(outputIndex / 2));
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
goto save_state; // ugly, but efficient
}
@@ -135,7 +134,7 @@
// fetch first buffer
if (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer, mPTS);
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
return 0;
}
@@ -166,8 +165,7 @@
inputIndex = 0;
provider->releaseBuffer(&mBuffer);
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer,
- calculateOutputPTS(outputIndex / 2));
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
goto save_state; // ugly, but efficient
}
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index 6481b85..618b56c 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -527,8 +527,7 @@
// We may not fetch a new buffer if the existing data is sufficient.
while (mBuffer.frameCount == 0 && inFrameCount > 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer,
- calculateOutputPTS(outputIndex / OUTPUT_CHANNELS));
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
goto resample_exit;
}
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index 41730ee..e93c064 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -301,8 +301,7 @@
// buffer is empty, fetch a new one
while (mBuffer.frameCount == 0) {
mBuffer.frameCount = inFrameCount;
- provider->getNextBuffer(&mBuffer,
- calculateOutputPTS(outputIndex / 2));
+ provider->getNextBuffer(&mBuffer);
if (mBuffer.raw == NULL) {
goto resample_exit;
}
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
index a8be206..2ca2cac 100644
--- a/services/audioflinger/BufferProviders.cpp
+++ b/services/audioflinger/BufferProviders.cpp
@@ -70,13 +70,12 @@
free(mLocalBufferData);
}
-status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
- int64_t pts)
+status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer)
{
- //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
- // this, pBuffer, pBuffer->frameCount, pts);
+ //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu))",
+ // this, pBuffer, pBuffer->frameCount);
if (mLocalBufferFrameCount == 0) {
- status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+ status_t res = mTrackBufferProvider->getNextBuffer(pBuffer);
if (res == OK) {
copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
}
@@ -84,7 +83,7 @@
}
if (mBuffer.frameCount == 0) {
mBuffer.frameCount = pBuffer->frameCount;
- status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+ status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
// At one time an upstream buffer provider had
// res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
//
@@ -356,13 +355,13 @@
}
status_t TimestretchBufferProvider::getNextBuffer(
- AudioBufferProvider::Buffer *pBuffer, int64_t pts)
+ AudioBufferProvider::Buffer *pBuffer)
{
- ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
- this, pBuffer, pBuffer->frameCount, pts);
+ ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu))",
+ this, pBuffer, pBuffer->frameCount);
// BYPASS
- //return mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+ //return mTrackBufferProvider->getNextBuffer(pBuffer);
// check if previously processed data is sufficient.
if (pBuffer->frameCount <= mRemaining) {
@@ -391,7 +390,7 @@
mBuffer.frameCount = mPlaybackRate.mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
? outputDesired : outputDesired * mPlaybackRate.mSpeed + 1;
- status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+ status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer);
ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h
index 4bc895c..abd43c6 100644
--- a/services/audioflinger/BufferProviders.h
+++ b/services/audioflinger/BufferProviders.h
@@ -64,7 +64,7 @@
virtual ~CopyBufferProvider();
// Overrides AudioBufferProvider methods
- virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
+ virtual status_t getNextBuffer(Buffer *buffer);
virtual void releaseBuffer(Buffer *buffer);
// Overrides PassthruBufferProvider
@@ -156,7 +156,7 @@
virtual ~TimestretchBufferProvider();
// Overrides AudioBufferProvider methods
- virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
+ virtual status_t getNextBuffer(Buffer* buffer);
virtual void releaseBuffer(Buffer* buffer);
// Overrides PassthruBufferProvider
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index 1bba5f6..bb83858 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -166,8 +166,7 @@
ALOG_ASSERT(mReadBuffer != NULL);
dumpState->mReadSequence++;
ATRACE_BEGIN("read");
- ssize_t framesRead = mInputSource->read(mReadBuffer, frameCount,
- AudioBufferProvider::kInvalidPTS);
+ ssize_t framesRead = mInputSource->read(mReadBuffer, frameCount);
ATRACE_END();
dumpState->mReadSequence++;
if (framesRead >= 0) {
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 3f99b43..cc9acff 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -402,13 +402,8 @@
ftDump->mFramesReady = framesReady;
}
- int64_t pts;
- if (mOutputSink == NULL || (OK != mOutputSink->getNextWriteTimestamp(&pts))) {
- pts = AudioBufferProvider::kInvalidPTS;
- }
-
// process() is CPU-bound
- mMixer->process(pts);
+ mMixer->process();
mMixerBufferState = MIXED;
} else if (mMixerBufferState == MIXED) {
mMixerBufferState = UNDEFINED;
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 7bc6f0c..1450ca1 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -83,8 +83,7 @@
Track& operator = (const Track&);
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
- int64_t pts = kInvalidPTS);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
// releaseBuffer() not overridden
// ExtendedAudioBufferProvider interface
@@ -158,92 +157,6 @@
}; // end of Track
-class TimedTrack : public Track {
- public:
- static sp<TimedTrack> create(PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- int uid);
- virtual ~TimedTrack();
-
- class TimedBuffer {
- public:
- TimedBuffer();
- TimedBuffer(const sp<IMemory>& buffer, int64_t pts);
- const sp<IMemory>& buffer() const { return mBuffer; }
- int64_t pts() const { return mPTS; }
- uint32_t position() const { return mPosition; }
- void setPosition(uint32_t pos) { mPosition = pos; }
- private:
- sp<IMemory> mBuffer;
- int64_t mPTS;
- uint32_t mPosition;
- };
-
- // Mixer facing methods.
- virtual size_t framesReady() const;
-
- // AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
- int64_t pts);
- virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
-
- // Client/App facing methods.
- status_t allocateTimedBuffer(size_t size,
- sp<IMemory>* buffer);
- status_t queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts);
- status_t setMediaTimeTransform(const LinearTransform& xform,
- TimedAudioTrack::TargetTimeline target);
-
- private:
- TimedTrack(PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- int uid);
-
- void timedYieldSamples_l(AudioBufferProvider::Buffer* buffer);
- void timedYieldSilence_l(uint32_t numFrames,
- AudioBufferProvider::Buffer* buffer);
- void trimTimedBufferQueue_l();
- void trimTimedBufferQueueHead_l(const char* logTag);
- void updateFramesPendingAfterTrim_l(const TimedBuffer& buf,
- const char* logTag);
-
- uint64_t mLocalTimeFreq;
- LinearTransform mLocalTimeToSampleTransform;
- LinearTransform mMediaTimeToSampleTransform;
- sp<MemoryDealer> mTimedMemoryDealer;
-
- Vector<TimedBuffer> mTimedBufferQueue;
- bool mQueueHeadInFlight;
- bool mTrimQueueHeadOnRelease;
- uint32_t mFramesPendingInQueue;
-
- uint8_t* mTimedSilenceBuffer;
- uint32_t mTimedSilenceBufferSize;
- mutable Mutex mTimedBufferQueueLock;
- bool mTimedAudioOutputOnTime;
- CCHelper mCCHelper;
-
- Mutex mMediaTimeTransformLock;
- LinearTransform mMediaTimeTransform;
- bool mMediaTimeTransformValid;
- TimedAudioTrack::TargetTimeline mMediaTimeTransformTarget;
-};
-
// playback track, used by DuplicatingThread
class OutputTrack : public Track {
@@ -303,8 +216,7 @@
virtual ~PatchTrack();
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
- int64_t pts);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
// PatchProxyBufferProvider interface
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 25d6d95..e2014b7 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -61,8 +61,7 @@
RecordTrack& operator = (const RecordTrack&);
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
- int64_t pts = kInvalidPTS);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
// releaseBuffer() not overridden
bool mOverflow; // overflow on most recent attempt to fill client buffer
@@ -99,8 +98,7 @@
virtual ~PatchRecord();
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer,
- int64_t pts);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
// PatchProxyBufferProvider interface
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 0458554..8ae798c 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -53,9 +53,6 @@
#include <powermanager/PowerManager.h>
-#include <common_time/cc_helper.h>
-#include <common_time/local_clock.h>
-
#include "AudioFlinger.h"
#include "AudioMixer.h"
#include "BufferProviders.h"
@@ -1666,13 +1663,9 @@
sp<Track> track;
status_t lStatus;
- bool isTimed = (*flags & IAudioFlinger::TRACK_TIMED) != 0;
-
// client expresses a preference for FAST, but we get the final say
if (*flags & IAudioFlinger::TRACK_FAST) {
if (
- // not timed
- (!isTimed) &&
// either of these use cases:
(
// use case 1: shared buffer with any frame count
@@ -1716,11 +1709,11 @@
ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
frameCount, mFrameCount);
} else {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%d "
"mFrameCount=%d format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
"sampleRate=%u mSampleRate=%u "
"hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
- isTimed, sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
+ sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
audio_is_linear_pcm(format),
channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
*flags &= ~IAudioFlinger::TRACK_FAST;
@@ -1819,17 +1812,10 @@
}
}
- if (!isTimed) {
- track = new Track(this, client, streamType, sampleRate, format,
- channelMask, frameCount, NULL, sharedBuffer,
- sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
- } else {
- track = TimedTrack::create(this, client, streamType, sampleRate, format,
- channelMask, frameCount, sharedBuffer, sessionId, uid);
- }
+ track = new Track(this, client, streamType, sampleRate, format,
+ channelMask, frameCount, NULL, sharedBuffer,
+ sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
- // new Track always returns non-NULL,
- // but TimedTrack::create() is a factory that could fail by returning NULL
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
if (lStatus != NO_ERROR) {
ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
@@ -3640,22 +3626,8 @@
void AudioFlinger::MixerThread::threadLoop_mix()
{
- // obtain the presentation timestamp of the next output buffer
- int64_t pts;
- status_t status = INVALID_OPERATION;
-
- if (mNormalSink != 0) {
- status = mNormalSink->getNextWriteTimestamp(&pts);
- } else {
- status = mOutputSink->getNextWriteTimestamp(&pts);
- }
-
- if (status != NO_ERROR) {
- pts = AudioBufferProvider::kInvalidPTS;
- }
-
// mix buffers...
- mAudioMixer->process(pts);
+ mAudioMixer->process();
mCurrentWriteLength = mSinkBufferSize;
// increase sleep time progressively when application underrun condition clears.
// Only increase sleep time if the mixer is ready for two consecutive times to avoid
@@ -5369,7 +5341,7 @@
{
// mix buffers...
if (outputsReady(outputTracks)) {
- mAudioMixer->process(AudioBufferProvider::kInvalidPTS);
+ mAudioMixer->process();
} else {
if (mMixerBufferValid) {
memset(mMixerBuffer, 0, mMixerBufferSize);
@@ -5909,7 +5881,7 @@
if (mPipeSource != 0) {
size_t framesToRead = mBufferSize / mFrameSize;
framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
- framesToRead, AudioBufferProvider::kInvalidPTS);
+ framesToRead);
if (framesRead == 0) {
// since pipe is non-blocking, simulate blocking input
sleepUs = (framesToRead * 1000000LL) / mSampleRate;
@@ -6531,7 +6503,7 @@
// AudioBufferProvider interface
status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
+ AudioBufferProvider::Buffer* buffer)
{
sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
if (threadBase == 0) {
@@ -6632,7 +6604,7 @@
AudioBufferProvider::Buffer buffer;
for (size_t i = frames; i > 0; ) {
buffer.frameCount = i;
- status_t status = provider->getNextBuffer(&buffer, 0);
+ status_t status = provider->getNextBuffer(&buffer);
if (status != OK || buffer.frameCount == 0) {
frames -= i; // cannot fill request.
break;
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 8eed50d..fa047fa 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1109,7 +1109,7 @@
virtual void sync(size_t *framesAvailable = NULL, bool *hasOverrun = NULL);
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
private:
RecordTrack * const mRecordTrack;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 98bf96e..26067e3 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -50,7 +50,6 @@
enum track_type {
TYPE_DEFAULT,
- TYPE_TIMED,
TYPE_OUTPUT,
TYPE_PATCH,
};
@@ -83,7 +82,6 @@
sp<IMemory> getBuffers() const { return mBufferMemory; }
void* buffer() const { return mBuffer; }
bool isFastTrack() const { return (mFlags & IAudioFlinger::TRACK_FAST) != 0; }
- bool isTimedTrack() const { return (mType == TYPE_TIMED); }
bool isOutputTrack() const { return (mType == TYPE_OUTPUT); }
bool isPatchTrack() const { return (mType == TYPE_PATCH); }
bool isExternalTrack() const { return !isOutputTrack() && !isPatchTrack(); }
@@ -93,7 +91,7 @@
TrackBase& operator = (const TrackBase&);
// AudioBufferProvider interface
- virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts) = 0;
+ virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer) = 0;
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
// ExtendedAudioBufferProvider interface is only needed for Track,
@@ -132,7 +130,7 @@
}
bool isOut() const { return mIsOut; }
- // true for Track and TimedTrack, false for RecordTrack,
+ // true for Track, false for RecordTrack,
// this could be a track type if needed later
const wp<ThreadBase> mThread;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index c753afd..5830f75 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -27,9 +27,6 @@
#include <private/media/AudioTrackShared.h>
-#include <common_time/cc_helper.h>
-#include <common_time/local_clock.h>
-
#include "AudioMixer.h"
#include "AudioFlinger.h"
#include "ServiceUtilities.h"
@@ -242,7 +239,7 @@
// AudioBufferProvider interface
// getNextBuffer() = 0;
-// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
+// This implementation of releaseBuffer() is used by Track and RecordTrack
void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
{
#ifdef TEE_SINK
@@ -308,43 +305,6 @@
return mTrack->attachAuxEffect(EffectId);
}
-status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
- sp<IMemory>* buffer) {
- if (!mTrack->isTimedTrack())
- return INVALID_OPERATION;
-
- PlaybackThread::TimedTrack* tt =
- reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
- return tt->allocateTimedBuffer(size, buffer);
-}
-
-status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
- int64_t pts) {
- if (!mTrack->isTimedTrack())
- return INVALID_OPERATION;
-
- if (buffer == 0 || buffer->pointer() == NULL) {
- ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()");
- return BAD_VALUE;
- }
-
- PlaybackThread::TimedTrack* tt =
- reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
- return tt->queueTimedBuffer(buffer, pts);
-}
-
-status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
- const LinearTransform& xform, int target) {
-
- if (!mTrack->isTimedTrack())
- return INVALID_OPERATION;
-
- PlaybackThread::TimedTrack* tt =
- reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
- return tt->setMediaTimeTransform(
- xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
-}
-
status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
return mTrack->setParameters(keyValuePairs);
}
@@ -590,7 +550,7 @@
// AudioBufferProvider interface
status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
+ AudioBufferProvider::Buffer* buffer)
{
ServerProxy::Buffer buf;
size_t desiredFrames = buffer->frameCount;
@@ -1119,526 +1079,6 @@
}
// ----------------------------------------------------------------------------
-sp<AudioFlinger::PlaybackThread::TimedTrack>
-AudioFlinger::PlaybackThread::TimedTrack::create(
- PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- int uid)
-{
- if (!client->reserveTimedTrack())
- return 0;
-
- return new TimedTrack(
- thread, client, streamType, sampleRate, format, channelMask, frameCount,
- sharedBuffer, sessionId, uid);
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
- PlaybackThread *thread,
- const sp<Client>& client,
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- const sp<IMemory>& sharedBuffer,
- int sessionId,
- int uid)
- : Track(thread, client, streamType, sampleRate, format, channelMask,
- frameCount, (sharedBuffer != 0) ? sharedBuffer->pointer() : NULL, sharedBuffer,
- sessionId, uid, IAudioFlinger::TRACK_TIMED, TYPE_TIMED),
- mQueueHeadInFlight(false),
- mTrimQueueHeadOnRelease(false),
- mFramesPendingInQueue(0),
- mTimedSilenceBuffer(NULL),
- mTimedSilenceBufferSize(0),
- mTimedAudioOutputOnTime(false),
- mMediaTimeTransformValid(false)
-{
- LocalClock lc;
- mLocalTimeFreq = lc.getLocalFreq();
-
- mLocalTimeToSampleTransform.a_zero = 0;
- mLocalTimeToSampleTransform.b_zero = 0;
- mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
- mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
- LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
- &mLocalTimeToSampleTransform.a_to_b_denom);
-
- mMediaTimeToSampleTransform.a_zero = 0;
- mMediaTimeToSampleTransform.b_zero = 0;
- mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
- mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
- LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
- &mMediaTimeToSampleTransform.a_to_b_denom);
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
- mClient->releaseTimedTrack();
- delete [] mTimedSilenceBuffer;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
- size_t size, sp<IMemory>* buffer) {
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- trimTimedBufferQueue_l();
-
- // lazily initialize the shared memory heap for timed buffers
- if (mTimedMemoryDealer == NULL) {
- const int kTimedBufferHeapSize = 512 << 10;
-
- mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
- "AudioFlingerTimed");
- if (mTimedMemoryDealer == NULL) {
- return NO_MEMORY;
- }
- }
-
- sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
- if (newBuffer == 0 || newBuffer->pointer() == NULL) {
- return NO_MEMORY;
- }
-
- *buffer = newBuffer;
- return NO_ERROR;
-}
-
-// caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
- int64_t mediaTimeNow;
- {
- Mutex::Autolock mttLock(mMediaTimeTransformLock);
- if (!mMediaTimeTransformValid)
- return;
-
- int64_t targetTimeNow;
- status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
- ? mCCHelper.getCommonTime(&targetTimeNow)
- : mCCHelper.getLocalTime(&targetTimeNow);
-
- if (OK != res)
- return;
-
- if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
- &mediaTimeNow)) {
- return;
- }
- }
-
- size_t trimEnd;
- for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
- int64_t bufEnd;
-
- if ((trimEnd + 1) < mTimedBufferQueue.size()) {
- // We have a next buffer. Just use its PTS as the PTS of the frame
- // following the last frame in this buffer. If the stream is sparse
- // (ie, there are deliberate gaps left in the stream which should be
- // filled with silence by the TimedAudioTrack), then this can result
- // in one extra buffer being left un-trimmed when it could have
- // been. In general, this is not typical, and we would rather
- // optimized away the TS calculation below for the more common case
- // where PTSes are contiguous.
- bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
- } else {
- // We have no next buffer. Compute the PTS of the frame following
- // the last frame in this buffer by computing the duration of of
- // this frame in media time units and adding it to the PTS of the
- // buffer.
- int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
- / mFrameSize;
-
- if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
- &bufEnd)) {
- ALOGE("Failed to convert frame count of %lld to media time"
- " duration" " (scale factor %d/%u) in %s",
- frameCount,
- mMediaTimeToSampleTransform.a_to_b_numer,
- mMediaTimeToSampleTransform.a_to_b_denom,
- __PRETTY_FUNCTION__);
- break;
- }
- bufEnd += mTimedBufferQueue[trimEnd].pts();
- }
-
- if (bufEnd > mediaTimeNow)
- break;
-
- // Is the buffer we want to use in the middle of a mix operation right
- // now? If so, don't actually trim it. Just wait for the releaseBuffer
- // from the mixer which should be coming back shortly.
- if (!trimEnd && mQueueHeadInFlight) {
- mTrimQueueHeadOnRelease = true;
- }
- }
-
- size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
- if (trimStart < trimEnd) {
- // Update the bookkeeping for framesReady()
- for (size_t i = trimStart; i < trimEnd; ++i) {
- updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
- }
-
- // Now actually remove the buffers from the queue.
- mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
- }
-}
-
-void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
- const char* logTag) {
- ALOG_ASSERT(mTimedBufferQueue.size() > 0,
- "%s called (reason \"%s\"), but timed buffer queue has no"
- " elements to trim.", __FUNCTION__, logTag);
-
- updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
- mTimedBufferQueue.removeAt(0);
-}
-
-void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
- const TimedBuffer& buf,
- const char* logTag __unused) {
- uint32_t bufBytes = buf.buffer()->size();
- uint32_t consumedAlready = buf.position();
-
- ALOG_ASSERT(consumedAlready <= bufBytes,
- "Bad bookkeeping while updating frames pending. Timed buffer is"
- " only %u bytes long, but claims to have consumed %u"
- " bytes. (update reason: \"%s\")",
- bufBytes, consumedAlready, logTag);
-
- uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
- ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
- "Bad bookkeeping while updating frames pending. Should have at"
- " least %u queued frames, but we think we have only %u. (update"
- " reason: \"%s\")",
- bufFrames, mFramesPendingInQueue, logTag);
-
- mFramesPendingInQueue -= bufFrames;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
- const sp<IMemory>& buffer, int64_t pts) {
-
- {
- Mutex::Autolock mttLock(mMediaTimeTransformLock);
- if (!mMediaTimeTransformValid)
- return INVALID_OPERATION;
- }
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- uint32_t bufFrames = buffer->size() / mFrameSize;
- mFramesPendingInQueue += bufFrames;
- mTimedBufferQueue.add(TimedBuffer(buffer, pts));
-
- return NO_ERROR;
-}
-
-status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
- const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
-
- ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
- xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
- target);
-
- if (!(target == TimedAudioTrack::LOCAL_TIME ||
- target == TimedAudioTrack::COMMON_TIME)) {
- return BAD_VALUE;
- }
-
- Mutex::Autolock lock(mMediaTimeTransformLock);
- mMediaTimeTransform = xform;
- mMediaTimeTransformTarget = target;
- mMediaTimeTransformValid = true;
-
- return NO_ERROR;
-}
-
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
-// implementation of getNextBuffer for tracks whose buffers have timestamps
-status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts)
-{
- if (pts == AudioBufferProvider::kInvalidPTS) {
- buffer->raw = NULL;
- buffer->frameCount = 0;
- mTimedAudioOutputOnTime = false;
- return INVALID_OPERATION;
- }
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- ALOG_ASSERT(!mQueueHeadInFlight,
- "getNextBuffer called without releaseBuffer!");
-
- while (true) {
-
- // if we have no timed buffers, then fail
- if (mTimedBufferQueue.isEmpty()) {
- buffer->raw = NULL;
- buffer->frameCount = 0;
- return NOT_ENOUGH_DATA;
- }
-
- TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
-
- // calculate the PTS of the head of the timed buffer queue expressed in
- // local time
- int64_t headLocalPTS;
- {
- Mutex::Autolock mttLock(mMediaTimeTransformLock);
-
- ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
-
- if (mMediaTimeTransform.a_to_b_denom == 0) {
- // the transform represents a pause, so yield silence
- timedYieldSilence_l(buffer->frameCount, buffer);
- return NO_ERROR;
- }
-
- int64_t transformedPTS;
- if (!mMediaTimeTransform.doForwardTransform(head.pts(),
- &transformedPTS)) {
- // the transform failed. this shouldn't happen, but if it does
- // then just drop this buffer
- ALOGW("timedGetNextBuffer transform failed");
- buffer->raw = NULL;
- buffer->frameCount = 0;
- trimTimedBufferQueueHead_l("getNextBuffer; no transform");
- return NO_ERROR;
- }
-
- if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
- if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
- &headLocalPTS)) {
- buffer->raw = NULL;
- buffer->frameCount = 0;
- return INVALID_OPERATION;
- }
- } else {
- headLocalPTS = transformedPTS;
- }
- }
-
- uint32_t sr = sampleRate();
-
- // adjust the head buffer's PTS to reflect the portion of the head buffer
- // that has already been consumed
- int64_t effectivePTS = headLocalPTS +
- ((head.position() / mFrameSize) * mLocalTimeFreq / sr);
-
- // Calculate the delta in samples between the head of the input buffer
- // queue and the start of the next output buffer that will be written.
- // If the transformation fails because of over or underflow, it means
- // that the sample's position in the output stream is so far out of
- // whack that it should just be dropped.
- int64_t sampleDelta;
- if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
- ALOGV("*** head buffer is too far from PTS: dropped buffer");
- trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
- " mix");
- continue;
- }
- if (!mLocalTimeToSampleTransform.doForwardTransform(
- (effectivePTS - pts) << 32, &sampleDelta)) {
- ALOGV("*** too late during sample rate transform: dropped buffer");
- trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
- continue;
- }
-
- ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
- " sampleDelta=[%d.%08x]",
- head.pts(), head.position(), pts,
- static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
- + (sampleDelta >> 32)),
- static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
-
- // if the delta between the ideal placement for the next input sample and
- // the current output position is within this threshold, then we will
- // concatenate the next input samples to the previous output
- const int64_t kSampleContinuityThreshold =
- (static_cast<int64_t>(sr) << 32) / 250;
-
- // if this is the first buffer of audio that we're emitting from this track
- // then it should be almost exactly on time.
- const int64_t kSampleStartupThreshold = 1LL << 32;
-
- if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
- (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
- // the next input is close enough to being on time, so concatenate it
- // with the last output
- timedYieldSamples_l(buffer);
-
- ALOGVV("*** on time: head.pos=%d frameCount=%u",
- head.position(), buffer->frameCount);
- return NO_ERROR;
- }
-
- // Looks like our output is not on time. Reset our on timed status.
- // Next time we mix samples from our input queue, then should be within
- // the StartupThreshold.
- mTimedAudioOutputOnTime = false;
- if (sampleDelta > 0) {
- // the gap between the current output position and the proper start of
- // the next input sample is too big, so fill it with silence
- uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
-
- timedYieldSilence_l(framesUntilNextInput, buffer);
- ALOGV("*** silence: frameCount=%u", buffer->frameCount);
- return NO_ERROR;
- } else {
- // the next input sample is late
- uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
- size_t onTimeSamplePosition =
- head.position() + lateFrames * mFrameSize;
-
- if (onTimeSamplePosition > head.buffer()->size()) {
- // all the remaining samples in the head are too late, so
- // drop it and move on
- ALOGV("*** too late: dropped buffer");
- trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
- continue;
- } else {
- // skip over the late samples
- head.setPosition(onTimeSamplePosition);
-
- // yield the available samples
- timedYieldSamples_l(buffer);
-
- ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
- return NO_ERROR;
- }
- }
- }
-}
-
-// Yield samples from the timed buffer queue head up to the given output
-// buffer's capacity.
-//
-// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
- AudioBufferProvider::Buffer* buffer) {
-
- const TimedBuffer& head = mTimedBufferQueue[0];
-
- buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
- head.position());
-
- uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
- mFrameSize);
- size_t framesRequested = buffer->frameCount;
- buffer->frameCount = min(framesLeftInHead, framesRequested);
-
- mQueueHeadInFlight = true;
- mTimedAudioOutputOnTime = true;
-}
-
-// Yield samples of silence up to the given output buffer's capacity
-//
-// Caller must hold mTimedBufferQueueLock
-void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
- uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
-
- // lazily allocate a buffer filled with silence
- if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
- delete [] mTimedSilenceBuffer;
- mTimedSilenceBufferSize = numFrames * mFrameSize;
- mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
- memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
- }
-
- buffer->raw = mTimedSilenceBuffer;
- size_t framesRequested = buffer->frameCount;
- buffer->frameCount = min(numFrames, framesRequested);
-
- mTimedAudioOutputOnTime = false;
-}
-
-// AudioBufferProvider interface
-void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
- AudioBufferProvider::Buffer* buffer) {
-
- Mutex::Autolock _l(mTimedBufferQueueLock);
-
- // If the buffer which was just released is part of the buffer at the head
- // of the queue, be sure to update the amt of the buffer which has been
- // consumed. If the buffer being returned is not part of the head of the
- // queue, its either because the buffer is part of the silence buffer, or
- // because the head of the timed queue was trimmed after the mixer called
- // getNextBuffer but before the mixer called releaseBuffer.
- if (buffer->raw == mTimedSilenceBuffer) {
- ALOG_ASSERT(!mQueueHeadInFlight,
- "Queue head in flight during release of silence buffer!");
- goto done;
- }
-
- ALOG_ASSERT(mQueueHeadInFlight,
- "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
- " head in flight.");
-
- if (mTimedBufferQueue.size()) {
- TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
-
- void* start = head.buffer()->pointer();
- void* end = reinterpret_cast<void*>(
- reinterpret_cast<uint8_t*>(head.buffer()->pointer())
- + head.buffer()->size());
-
- ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
- "released buffer not within the head of the timed buffer"
- " queue; qHead = [%p, %p], released buffer = %p",
- start, end, buffer->raw);
-
- head.setPosition(head.position() +
- (buffer->frameCount * mFrameSize));
- mQueueHeadInFlight = false;
-
- ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
- "Bad bookkeeping during releaseBuffer! Should have at"
- " least %u queued frames, but we think we have only %u",
- buffer->frameCount, mFramesPendingInQueue);
-
- mFramesPendingInQueue -= buffer->frameCount;
-
- if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
- || mTrimQueueHeadOnRelease) {
- trimTimedBufferQueueHead_l("releaseBuffer");
- mTrimQueueHeadOnRelease = false;
- }
- } else {
- LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
- " buffers in the timed buffer queue");
- }
-
-done:
- buffer->raw = 0;
- buffer->frameCount = 0;
-}
-
-size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
- Mutex::Autolock _l(mTimedBufferQueueLock);
- return mFramesPendingInQueue;
-}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
- : mPTS(0), mPosition(0) {}
-
-AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
- const sp<IMemory>& buffer, int64_t pts)
- : mBuffer(buffer), mPTS(pts), mPosition(0) {}
-
-
-// ----------------------------------------------------------------------------
-
AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
PlaybackThread *playbackThread,
DuplicatingThread *sourceThread,
@@ -1855,7 +1295,7 @@
// AudioBufferProvider interface
status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts)
+ AudioBufferProvider::Buffer* buffer)
{
ALOG_ASSERT(mPeerProxy != 0, "PatchTrack::getNextBuffer() called without peer proxy");
Proxy::Buffer buf;
@@ -1866,7 +1306,7 @@
if (buf.mFrameCount == 0) {
return WOULD_BLOCK;
}
- status = Track::getNextBuffer(buffer, pts);
+ status = Track::getNextBuffer(buffer);
return status;
}
@@ -2005,8 +1445,7 @@
}
// AudioBufferProvider interface
-status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
- int64_t pts __unused)
+status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
{
ServerProxy::Buffer buf;
buf.mFrameCount = buffer->frameCount;
@@ -2146,7 +1585,7 @@
// AudioBufferProvider interface
status_t AudioFlinger::RecordThread::PatchRecord::getNextBuffer(
- AudioBufferProvider::Buffer* buffer, int64_t pts)
+ AudioBufferProvider::Buffer* buffer)
{
ALOG_ASSERT(mPeerProxy != 0, "PatchRecord::getNextBuffer() called without peer proxy");
Proxy::Buffer buf;
@@ -2158,7 +1597,7 @@
if (buf.mFrameCount == 0) {
return WOULD_BLOCK;
}
- status = RecordTrack::getNextBuffer(buffer, pts);
+ status = RecordTrack::getNextBuffer(buffer);
return status;
}
diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp
index 7893778..bae3c5b 100644
--- a/services/audioflinger/test-resample.cpp
+++ b/services/audioflinger/test-resample.cpp
@@ -272,9 +272,7 @@
mFrameSize(frameSize),
mNextFrame(0), mUnrel(0), mPvalues(Pvalues), mNextPidx(0) {
}
- virtual status_t getNextBuffer(Buffer* buffer,
- int64_t pts = kInvalidPTS) {
- (void)pts; // suppress warning
+ virtual status_t getNextBuffer(Buffer* buffer) {
size_t requestedFrames = buffer->frameCount;
if (requestedFrames > mNumFrames - mNextFrame) {
buffer->frameCount = mNumFrames - mNextFrame;
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index e152468..6182de0 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -47,7 +47,6 @@
LOCAL_SHARED_LIBRARIES := \
libeffects \
libnbaio \
- libcommon_time_client \
libaudioresampler \
libaudioutils \
libdl \
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
index 8da6245..65e22da 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -307,7 +307,7 @@
(char *) auxAddr + i * auxFrameSize);
}
}
- mixer->process(AudioBufferProvider::kInvalidPTS);
+ mixer->process();
}
outputFrames = i; // reset output frames to the data actually produced.
diff --git a/services/audioflinger/tests/test_utils.h b/services/audioflinger/tests/test_utils.h
index 3d51cdc..283c768 100644
--- a/services/audioflinger/tests/test_utils.h
+++ b/services/audioflinger/tests/test_utils.h
@@ -112,7 +112,7 @@
mNextIdx = 0;
}
- virtual android::status_t getNextBuffer(Buffer* buffer, int64_t pts __unused = kInvalidPTS)
+ virtual android::status_t getNextBuffer(Buffer* buffer)
{
size_t requestedFrames = buffer->frameCount;
if (requestedFrames > mNumFrames - mNextFrame) {