Merge "AudioTrack: support ENCODING_IEC61937" into nyc-dev
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
index ff07b08..a171493 100644
--- a/include/media/AudioPolicy.h
+++ b/include/media/AudioPolicy.h
@@ -28,11 +28,13 @@
// Keep in sync with AudioMix.java, AudioMixingRule.java, AudioPolicyConfig.java
#define RULE_EXCLUSION_MASK 0x8000
-#define RULE_MATCH_ATTRIBUTE_USAGE 0x1
+#define RULE_MATCH_ATTRIBUTE_USAGE 0x1
#define RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET (0x1 << 1)
-#define RULE_EXCLUDE_ATTRIBUTE_USAGE (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_USAGE)
+#define RULE_MATCH_UID (0x1 << 2)
+#define RULE_EXCLUDE_ATTRIBUTE_USAGE (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_USAGE)
#define RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET \
- (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET)
+ (RULE_EXCLUSION_MASK|RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET)
+#define RULE_EXCLUDE_UID (RULE_EXCLUSION_MASK|RULE_MATCH_UID)
#define MIX_TYPE_INVALID -1
#define MIX_TYPE_PLAYERS 0
@@ -53,10 +55,10 @@
#define MAX_MIXES_PER_POLICY 10
#define MAX_CRITERIA_PER_MIX 20
-class AttributeMatchCriterion {
+class AudioMixMatchCriterion {
public:
- AttributeMatchCriterion() {}
- AttributeMatchCriterion(audio_usage_t usage, audio_source_t source, uint32_t rule);
+ AudioMixMatchCriterion() {}
+ AudioMixMatchCriterion(audio_usage_t usage, audio_source_t source, uint32_t rule);
status_t readFromParcel(Parcel *parcel);
status_t writeToParcel(Parcel *parcel) const;
@@ -64,7 +66,8 @@
union {
audio_usage_t mUsage;
audio_source_t mSource;
- } mAttr;
+ uid_t mUid;
+ } mValue;
uint32_t mRule;
};
@@ -75,7 +78,7 @@
static const uint32_t kCbFlagNotifyActivity = 0x1;
AudioMix() {}
- AudioMix(Vector<AttributeMatchCriterion> criteria, uint32_t mixType, audio_config_t format,
+ AudioMix(Vector<AudioMixMatchCriterion> criteria, uint32_t mixType, audio_config_t format,
uint32_t routeFlags, String8 registrationId, uint32_t flags) :
mCriteria(criteria), mMixType(mixType), mFormat(format),
mRouteFlags(routeFlags), mRegistrationId(registrationId), mCbFlags(flags){}
@@ -83,7 +86,7 @@
status_t readFromParcel(Parcel *parcel);
status_t writeToParcel(Parcel *parcel) const;
- Vector<AttributeMatchCriterion> mCriteria;
+ Vector<AudioMixMatchCriterion> mCriteria;
uint32_t mMixType;
audio_config_t mFormat;
uint32_t mRouteFlags;
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
index 26dc1ce..48d0407 100644
--- a/include/media/MediaCodecInfo.h
+++ b/include/media/MediaCodecInfo.h
@@ -126,9 +126,8 @@
void addQuirk(const char *name);
status_t addMime(const char *mime);
status_t updateMime(const char *mime);
-
- // after this call |caps| will be owned by MediaCodecInfo, which may modify it
- status_t setCapabilitiesFromCodec(const sp<Capabilities> &caps);
+
+ status_t initializeCapabilities(const sp<Capabilities> &caps);
void addDetail(const AString &key, const AString &value);
void addFeature(const AString &key, int32_t value);
void addFeature(const AString &key, const char *value);
diff --git a/include/media/stagefright/SimpleDecodingSource.h b/include/media/stagefright/SimpleDecodingSource.h
index 6bd82c4..534097b 100644
--- a/include/media/stagefright/SimpleDecodingSource.h
+++ b/include/media/stagefright/SimpleDecodingSource.h
@@ -43,9 +43,11 @@
// case the source will return empty buffers.
// This source cannot be restarted (hence the name "Simple"), all reads are blocking, and
// does not support secure input or pausing.
+ // if |desiredCodec| is given, use this specific codec.
static sp<SimpleDecodingSource> Create(
const sp<IMediaSource> &source, uint32_t flags = 0,
- const sp<ANativeWindow> &nativeWindow = NULL);
+ const sp<ANativeWindow> &nativeWindow = NULL,
+ const char *desiredCodec = NULL);
virtual ~SimpleDecodingSource();
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 2270c85..cae5560 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -119,8 +119,11 @@
typedef SingleStateQueue<AudioPlaybackRate> PlaybackRateQueue;
+
typedef SingleStateQueue<ExtendedTimestamp> ExtendedTimestampQueue;
+typedef SingleStateQueue<AudioTimestamp> TimestampQueue;
+
// ----------------------------------------------------------------------------
// Important: do not add any virtual methods, including ~
@@ -175,7 +178,9 @@
uint16_t mPad2; // unused
// server write-only, client read
- ExtendedTimestampQueue::Shared mExtendedTimestampQueue;
+ ExtendedTimestampQueue::Shared mExtendedTimestampQueue; // capture
+ TimestampQueue::Shared mTimestampQueue; // playback
+
public:
volatile int32_t mFlags; // combinations of CBLK_*
@@ -333,7 +338,10 @@
size_t frameSize, bool clientInServer = false)
: ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
clientInServer),
- mPlaybackRateMutator(&cblk->mPlaybackRateQueue) { }
+ mPlaybackRateMutator(&cblk->mPlaybackRateQueue),
+ mTimestampObserver(&cblk->mTimestampQueue) {
+ }
+
virtual ~AudioTrackClientProxy() { }
// No barriers on the following operations, so the ordering of loads/stores
@@ -357,6 +365,20 @@
mPlaybackRateMutator.push(playbackRate);
}
+ status_t getTimestamp(AudioTimestamp *timestamp) {
+ if (timestamp == nullptr) {
+ return BAD_VALUE;
+ }
+ (void) mTimestampObserver.poll(mTimestamp);
+ // if no data is pushed by server, mTimestamp should be initialized by its constructor
+ // to all zero elements.
+ if (mTimestamp.mTime.tv_sec == 0 && mTimestamp.mTime.tv_nsec == 0) {
+ return WOULD_BLOCK;
+ }
+ *timestamp = mTimestamp;
+ return OK;
+ }
+
virtual void flush();
virtual uint32_t getUnderrunFrames() const {
@@ -374,6 +396,8 @@
private:
PlaybackRateQueue::Mutator mPlaybackRateMutator;
+ TimestampQueue::Observer mTimestampObserver;
+ AudioTimestamp mTimestamp;
};
class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
@@ -522,7 +546,8 @@
size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
: ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer),
mPlaybackRateObserver(&cblk->mPlaybackRateQueue),
- mUnderrunCount(0), mUnderrunning(false) {
+ mUnderrunCount(0), mUnderrunning(false),
+ mTimestampMutator(&cblk->mTimestampQueue) {
mCblk->mSampleRate = sampleRate;
mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
}
@@ -562,6 +587,11 @@
// Return the playback speed and pitch read atomically. Not multi-thread safe on server side.
AudioPlaybackRate getPlaybackRate();
+ // Expose timestamp to client proxy. Should only be called by a single thread.
+ void setTimestamp(const AudioTimestamp ×tamp) {
+ mTimestampMutator.push(timestamp);
+ }
+
private:
AudioPlaybackRate mPlaybackRate; // last observed playback rate
PlaybackRateQueue::Observer mPlaybackRateObserver;
@@ -569,6 +599,8 @@
// The server keeps a copy here where it is safe from the client.
uint32_t mUnderrunCount; // echoed to mCblk
bool mUnderrunning; // used to detect edge of underrun
+
+ TimestampQueue::Mutator mTimestampMutator;
};
class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
diff --git a/media/libmedia/AudioPolicy.cpp b/media/libmedia/AudioPolicy.cpp
index 9d07011..ea22b6c 100644
--- a/media/libmedia/AudioPolicy.cpp
+++ b/media/libmedia/AudioPolicy.cpp
@@ -22,37 +22,37 @@
namespace android {
//
-// AttributeMatchCriterion implementation
+// AudioMixMatchCriterion implementation
//
-AttributeMatchCriterion::AttributeMatchCriterion(audio_usage_t usage,
+AudioMixMatchCriterion::AudioMixMatchCriterion(audio_usage_t usage,
audio_source_t source,
uint32_t rule)
: mRule(rule)
{
if (mRule == RULE_MATCH_ATTRIBUTE_USAGE ||
mRule == RULE_EXCLUDE_ATTRIBUTE_USAGE) {
- mAttr.mUsage = usage;
+ mValue.mUsage = usage;
} else {
- mAttr.mSource = source;
+ mValue.mSource = source;
}
}
-status_t AttributeMatchCriterion::readFromParcel(Parcel *parcel)
+status_t AudioMixMatchCriterion::readFromParcel(Parcel *parcel)
{
mRule = parcel->readInt32();
if (mRule == RULE_MATCH_ATTRIBUTE_USAGE ||
mRule == RULE_EXCLUDE_ATTRIBUTE_USAGE) {
- mAttr.mUsage = (audio_usage_t)parcel->readInt32();
+ mValue.mUsage = (audio_usage_t)parcel->readInt32();
} else {
- mAttr.mSource = (audio_source_t)parcel->readInt32();
+ mValue.mSource = (audio_source_t)parcel->readInt32();
}
return NO_ERROR;
}
-status_t AttributeMatchCriterion::writeToParcel(Parcel *parcel) const
+status_t AudioMixMatchCriterion::writeToParcel(Parcel *parcel) const
{
parcel->writeInt32(mRule);
- parcel->writeInt32(mAttr.mUsage);
+ parcel->writeInt32(mValue.mUsage);
return NO_ERROR;
}
@@ -74,7 +74,7 @@
size = MAX_CRITERIA_PER_MIX;
}
for (size_t i = 0; i < size; i++) {
- AttributeMatchCriterion criterion;
+ AudioMixMatchCriterion criterion;
if (criterion.readFromParcel(parcel) == NO_ERROR) {
mCriteria.add(criterion);
}
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index b2a5f14..bd229c8 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -2169,11 +2169,6 @@
// Set false here to cover all the error return cases.
mPreviousTimestampValid = false;
- // FIXME not implemented for fast tracks; should use proxy and SSQ
- if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
- return INVALID_OPERATION;
- }
-
switch (mState) {
case STATE_ACTIVE:
case STATE_PAUSED:
@@ -2203,7 +2198,10 @@
// The presented frame count must always lag behind the consumed frame count.
// To avoid a race, read the presented frames first. This ensures that presented <= consumed.
- status_t status = mAudioTrack->getTimestamp(timestamp);
+
+ // FastTrack timestamps are read through shared memory; otherwise use Binder.
+ status_t status = (mFlags & AUDIO_OUTPUT_FLAG_FAST) ?
+ mProxy->getTimestamp(×tamp) : mAudioTrack->getTimestamp(timestamp);
if (status != NO_ERROR) {
ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
return status;
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 8351af6..06abd8d 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -238,12 +238,15 @@
}
}
-status_t MediaCodecInfo::setCapabilitiesFromCodec(const sp<Capabilities> &caps) {
- if (mCurrentCaps != NULL) {
- // keep current capabilities map
- caps->mDetails = mCurrentCaps->mDetails;
- }
- mCurrentCaps = caps;
+status_t MediaCodecInfo::initializeCapabilities(const sp<Capabilities> &caps) {
+ // TRICKY: copy data to mCurrentCaps as it is a reference to
+ // an element of the capabilites map.
+ mCurrentCaps->mColorFormats.clear();
+ mCurrentCaps->mColorFormats.appendVector(caps->mColorFormats);
+ mCurrentCaps->mProfileLevels.clear();
+ mCurrentCaps->mProfileLevels.appendVector(caps->mProfileLevels);
+ mCurrentCaps->mFlags = caps->mFlags;
+ mCurrentCaps->mDetails = caps->mDetails;
return OK;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index cd3ba53..a049a30 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -72,6 +72,8 @@
// Maximum allowed delay from AudioSink, 1.5 seconds.
static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
+static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
+
// static
const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
AUDIO_CHANNEL_NONE,
@@ -116,6 +118,7 @@
mVideoRenderingStartGeneration(0),
mAudioRenderingStartGeneration(0),
mRenderingDataDelivered(false),
+ mNextAudioClockUpdateTimeUs(-1),
mLastAudioMediaTimeUs(-1),
mAudioOffloadPauseTimeoutGeneration(0),
mAudioTornDown(false),
@@ -1039,12 +1042,20 @@
}
setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
- AudioTimestamp ts;
- status_t res = mAudioSink->getTimestamp(ts);
+ // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
+ if (mNextAudioClockUpdateTimeUs == -1) {
+ AudioTimestamp ts;
+ if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
+ mNextAudioClockUpdateTimeUs = 0; // start our clock updates
+ }
+ }
int64_t nowUs = ALooper::GetNowUs();
- if (res == OK) {
- int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
- mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
+ if (mNextAudioClockUpdateTimeUs >= 0) {
+ if (nowUs >= mNextAudioClockUpdateTimeUs) {
+ int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
+ mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
+ mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
+ }
} else {
int64_t unused;
if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
@@ -1478,6 +1489,7 @@
}
mNumFramesWritten = 0;
}
+ mNextAudioClockUpdateTimeUs = -1;
} else {
flushQueue(&mVideoQueue);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 7825f12..1bc9c97 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -179,6 +179,7 @@
int32_t mAudioRenderingStartGeneration;
bool mRenderingDataDelivered;
+ int64_t mNextAudioClockUpdateTimeUs;
// the media timestamp of last audio sample right before EOS.
int64_t mLastAudioMediaTimeUs;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d39fffa..aab4fea 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -6550,7 +6550,8 @@
mCodec->freeOutputBuffersNotOwnedByComponent();
mCodec->changeState(mCodec->mOutputPortSettingsChangedState);
- } else if (data2 == OMX_IndexConfigCommonOutputCrop) {
+ } else if (data2 == OMX_IndexConfigCommonOutputCrop
+ || data2 == OMX_IndexConfigAndroidIntraRefresh) {
mCodec->mSentFormat = false;
if (mCodec->mTunneled) {
@@ -7095,7 +7096,7 @@
// prefix "flexible" standard ones with the flexible equivalent
OMX_VIDEO_PARAM_PORTFORMATTYPE portFormat;
InitOMXParams(&portFormat);
- param.nPortIndex = isEncoder ? kPortIndexInput : kPortIndexOutput;
+ portFormat.nPortIndex = isEncoder ? kPortIndexInput : kPortIndexOutput;
Vector<uint32_t> supportedColors; // shadow copy to check for duplicates
for (OMX_U32 index = 0; index <= kMaxIndicesToCheck; ++index) {
portFormat.nIndex = index;
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 200796c..0fb5072 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -767,7 +767,7 @@
return UNKNOWN_ERROR;
}
- return mCurrentInfo->setCapabilitiesFromCodec(caps);
+ return mCurrentInfo->initializeCapabilities(caps);
}
status_t MediaCodecList::addQuirk(const char **attrs) {
diff --git a/media/libstagefright/SimpleDecodingSource.cpp b/media/libstagefright/SimpleDecodingSource.cpp
index 04f9a88..1b44a00 100644
--- a/media/libstagefright/SimpleDecodingSource.cpp
+++ b/media/libstagefright/SimpleDecodingSource.cpp
@@ -31,10 +31,12 @@
using namespace android;
const int64_t kTimeoutWaitForOutputUs = 500000; // 0.5 seconds
+const int64_t kTimeoutWaitForInputUs = 5000; // 5 milliseconds
//static
sp<SimpleDecodingSource> SimpleDecodingSource::Create(
- const sp<IMediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow) {
+ const sp<IMediaSource> &source, uint32_t flags, const sp<ANativeWindow> &nativeWindow,
+ const char *desiredCodec) {
sp<Surface> surface = static_cast<Surface*>(nativeWindow.get());
const char *mime = NULL;
sp<MetaData> meta = source->getFormat();
@@ -55,6 +57,9 @@
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
+ if (desiredCodec != NULL && componentName.compare(desiredCodec)) {
+ continue;
+ }
ALOGV("Attempting to allocate codec '%s'", componentName.c_str());
@@ -205,13 +210,14 @@
return ERROR_END_OF_STREAM;
}
- for (int retries = 1; ++retries; ) {
+ for (int retries = 0; ++retries; ) {
// If we fill all available input buffers, we should expect that
// the codec produces at least one output buffer. Also, the codec
// should produce an output buffer in at most 1 seconds. Retry a
// few times nonetheless.
while (!me->mQueuedInputEOS) {
- res = mCodec->dequeueInputBuffer(&in_ix, 0);
+ // allow some time to get input buffer after flush
+ res = mCodec->dequeueInputBuffer(&in_ix, kTimeoutWaitForInputUs);
if (res == -EAGAIN) {
// no available input buffers
break;
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index cc727f2..e378a62 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -1097,6 +1097,7 @@
mAIRMode = IVE_AIR_MODE_CYCLIC;
mAIRRefreshPeriod = intraRefreshParams->nRefreshPeriod;
}
+ mUpdateFlag |= kUpdateAIRMode;
return OMX_ErrorNone;
}
@@ -1339,6 +1340,11 @@
if (mUpdateFlag & kRequestKeyFrame) {
setFrameType(IV_IDR_FRAME);
}
+ if (mUpdateFlag & kUpdateAIRMode) {
+ setAirParams();
+ notify(OMX_EventPortSettingsChanged, kOutputPortIndex,
+ OMX_IndexConfigAndroidIntraRefresh, NULL);
+ }
mUpdateFlag = 0;
}
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index 3c811d2..232c6e0 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -143,8 +143,9 @@
};
enum {
- kUpdateBitrate = 1 << 0,
- kRequestKeyFrame = 1 << 1,
+ kUpdateBitrate = 1 << 0,
+ kRequestKeyFrame = 1 << 1,
+ kUpdateAIRMode = 1 << 2,
};
// OMX input buffer's timestamp and flags
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 81a6d00..b03c769 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -84,6 +84,7 @@
<Limit name="block-size" value="16x16" />
<Limit name="blocks-per-second" range="1-244800" />
<Limit name="bitrate" range="1-12000000" />
+ <Feature name="intra-refresh" />
</MediaCodec>
<MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
<!-- profiles and levels: ProfileCore : Level2 -->
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 62a3115..f2f11e3 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -57,6 +57,7 @@
#include "AudioStreamOut.h"
#include "SpdifStreamOut.h"
#include "AudioHwDevice.h"
+#include "LinearMap.h"
#include <powermanager/IPowerManager.h>
diff --git a/services/audioflinger/LinearMap.h b/services/audioflinger/LinearMap.h
new file mode 100644
index 0000000..fca14dd
--- /dev/null
+++ b/services/audioflinger/LinearMap.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_LINEAR_MAP_H
+#define ANDROID_LINEAR_MAP_H
+
+#include <stdint.h>
+
+namespace android {
+
+/*
+A general purpose lookup utility that defines a mapping between X and Y as a
+continuous set of line segments with shared (x, y) end-points.
+The (x, y) points must be added in order, monotonically increasing in both x and y;
+a log warning is emitted if this does not happen (See general usage notes below).
+
+A limited history of (x, y) points is kept for space reasons (See general usage notes).
+
+In AudioFlinger, we use the LinearMap to associate track frames to
+sink frames. When we want to obtain a client track timestamp, we first
+get a timestamp from the sink. The sink timestamp's position (mPosition)
+corresponds to the sink frames written. We use LinearMap to figure out which track frame
+the sink frame corresponds to. This allows us to substitute a track frame for the
+the sink frame (keeping the mTime identical) and return that timestamp back to the client.
+
+The method findX() can be used to retrieve an x value from a given y value and is
+used for timestamps, similarly for findY() which is provided for completeness.
+
+We update the (track frame, sink frame) points in the LinearMap each time we write data
+to the sink by the AudioFlinger PlaybackThread (MixerThread).
+
+
+AudioFlinger Timestamp Notes:
+
+1) Example: Obtaining a track timestamp during playback. In this case, the LinearMap
+looks something like this:
+
+Track Frame Sink Frame
+(track start)
+0 50000 (track starts here, the sink may already be running)
+1000 51000
+2000 52000
+
+When we request a track timestamp, we call the sink getTimestamp() and get for example
+mPosition = 51020. Using the LinearMap, we find we have played to track frame 1020.
+We substitute the sink mPosition of 51020 with the track position 1020,
+and return that timestamp to the app.
+
+2) Example: Obtaining a track timestamp duing pause. In this case, the LinearMap
+looks something like this:
+
+Track Frame Sink Frame
+... (some time has gone by)
+15000 30000
+16000 31000
+17000 32000
+(pause here)
+(suppose we call sink getTimestamp() here and get sink mPosition = 31100; that means
+ we have played to track frame 16100. The track timestamp mPosition will
+ continue to advance until the sink timestamp returns a value of mPosition
+ greater than 32000, corresponding to track frame 17000 when the pause was called).
+17000 33000
+17000 34000
+...
+
+3) If the track underruns, it appears as if a pause was called on that track.
+
+4) If there is an underrun in the HAL layer, then it may be possible that
+the sink getTimestamp() will return a value greater than the number of frames written
+(it should always be less). This should be rare, if not impossible by some
+HAL implementations of the sink getTimestamp. In that case, timing is lost
+and we will return the most recent track frame written.
+
+5) When called with no points in the map, findX() returns the start value (default 0).
+This is consistent with starting after a stop() or flush().
+
+6) Resuming after Track standby will be similar to coming out of pause, as the HAL ensures
+framesWritten() and getTimestamp() are contiguous for non-offloaded/direct tracks.
+
+7) LinearMap works for different speeds and sample rates as it uses
+linear interpolation. Since AudioFlinger only updates speed and sample rate
+exactly at the sample points pushed into the LinearMap, the returned values
+from findX() and findY() are accurate regardless of how many speed or sample
+rate changes are made, so long as the coordinate looked up is within the
+sample history.
+
+General usage notes:
+
+1) In order for the LinearMap to work reliably, you cannot look backwards more
+than the size of its circular buffer history, set upon creation (typically 16).
+If you look back further, the position is extrapolated either from a passed in
+extrapolation parameter or from the oldest line segment.
+
+2) Points must monotonically increase in x and y. The increment between adjacent
+points cannot be greater than signed 32 bits. Wrap in the x, y coordinates are supported,
+since we use differences in our computation.
+
+3) If the frame data is discontinuous (due to stop or flush) call reset() to clear
+the sample counter.
+
+4) If (x, y) are not strictly monotonic increasing, i.e. (x2 > x1) and (y2 > y1),
+then one or both of the inverses y = f(x) or x = g(y) may have multiple solutions.
+In that case, the most recent solution is returned by findX() or findY(). We
+do not warn if (x2 == x1) or (y2 == y1), but we do logcat warn if (x2 < x1) or
+(y2 < y1).
+
+5) Due to rounding it is possible x != findX(findY(x)) or y != findY(findX(y))
+even when the inverse exists. Nevertheless, the values should be close.
+
+*/
+
+template <typename T>
+class LinearMap {
+public:
+ // This enumeration describes the reliability of the findX() or findY() estimation
+ // in descending order.
+ enum FindMethod {
+ FIND_METHOD_INTERPOLATION, // High reliability (errors due to rounding)
+ FIND_METHOD_FORWARD_EXTRAPOLATION, // Reliability based on no future speed changes
+ FIND_METHOD_BACKWARD_EXTRAPOLATION, // Reliability based on prior estimated speed
+ FIND_METHOD_START_VALUE, // No samples in history, using start value
+ };
+
+ LinearMap(size_t size)
+ : mSize(size),
+ mPos(0), // a circular buffer, so could start anywhere. the first sample is at 1.
+ mSamples(0),
+ // mStepValid(false), // only valid if mSamples > 1
+ // mExtrapolateTail(false), // only valid if mSamples > 0
+ mX(new T[size]),
+ mY(new T[size]) { }
+
+ ~LinearMap() {
+ delete[] mX;
+ delete[] mY;
+ }
+
+ // Add a new sample point to the linear map.
+ //
+ // The difference between the new sample and the previous sample
+ // in the x or y coordinate must be less than INT32_MAX for purposes
+ // of the linear interpolation or extrapolation.
+ //
+ // The value should be monotonic increasing (e.g. diff >= 0);
+ // logcat warnings are issued if they are not.
+ __attribute__((no_sanitize("integer")))
+ void push(T x, T y) {
+ // Assumption: we assume x, y are monotonic increasing values,
+ // which (can) wrap in precision no less than 32 bits and have
+ // "step" or differences between adjacent points less than 32 bits.
+
+ if (mSamples > 0) {
+ const bool lastStepValid = mStepValid;
+ int32_t xdiff;
+ int32_t ydiff;
+ // check difference assumption here
+ mStepValid = checkedDiff(&xdiff, x, mX[mPos], "x")
+ & /* bitwise AND to always warn for ydiff, though logical AND is also OK */
+ checkedDiff(&ydiff, y, mY[mPos], "y");
+
+ // Optimization: do not add a new sample if the line segment would
+ // simply extend the previous line segment. This extends the useful
+ // history by removing redundant points.
+ if (mSamples > 1 && mStepValid && lastStepValid) {
+ const size_t prev = previousPosition();
+ const int32_t xdiff2 = x - mX[prev];
+ const int32_t ydiff2 = y - mY[prev];
+
+ // if both current step and previous step are valid (non-negative and
+ // less than INT32_MAX for precision greater than 4 bytes)
+ // then the sum of the two steps is valid when the
+ // int32_t difference is non-negative.
+ if (xdiff2 >= 0 && ydiff2 >= 0
+ && (int64_t)xdiff2 * ydiff == (int64_t)ydiff2 * xdiff) {
+ // ALOGD("reusing sample! (%u, %u) sample depth %zd", x, y, mSamples);
+ mX[mPos] = x;
+ mY[mPos] = y;
+ return;
+ }
+ }
+ }
+ if (++mPos >= mSize) {
+ mPos = 0;
+ }
+ if (mSamples < mSize) {
+ mExtrapolateTail = false;
+ ++mSamples;
+ } else {
+ // we enable extrapolation beyond the oldest sample
+ // if the sample buffers are completely full and we
+ // no longer know the full history.
+ mExtrapolateTail = true;
+ }
+ mX[mPos] = x;
+ mY[mPos] = y;
+ }
+
+ // clear all samples from the circular array
+ void reset() {
+ // no need to reset mPos, we use a circular buffer.
+ // computed values such as mStepValid are set after a subsequent push().
+ mSamples = 0;
+ }
+
+ // returns true if LinearMap contains at least one sample.
+ bool hasData() const {
+ return mSamples != 0;
+ }
+
+ // find the corresponding X point from a Y point.
+ // See findU for details.
+ __attribute__((no_sanitize("integer")))
+ T findX(T y, FindMethod *method = NULL, double extrapolation = 0.0, T startValue = 0) const {
+ return findU(y, mX, mY, method, extrapolation, startValue);
+ }
+
+ // find the corresponding Y point from a X point.
+ // See findU for details.
+ __attribute__((no_sanitize("integer")))
+ T findY(T x, FindMethod *method = NULL, double extrapolation = 0.0, T startValue = 0) const {
+ return findU(x, mY, mX, method, extrapolation, startValue);
+ }
+
+protected:
+
+ // returns false if the diff is out of int32_t bounds or negative.
+ __attribute__((no_sanitize("integer")))
+ static inline bool checkedDiff(int32_t *diff, T x2, T x1, const char *coord) {
+ if (sizeof(T) >= 8) {
+ const int64_t diff64 = x2 - x1;
+ *diff = (int32_t)diff64; // intentionally lose precision
+ if (diff64 > INT32_MAX) {
+ ALOGW("LinearMap: %s overflow diff(%lld) from %llu - %llu exceeds INT32_MAX",
+ coord, (long long)diff64,
+ (unsigned long long)x2, (unsigned long long)x1);
+ return false;
+ } else if (diff64 < 0) {
+ ALOGW("LinearMap: %s negative diff(%lld) from %llu - %llu",
+ coord, (long long)diff64,
+ (unsigned long long)x2, (unsigned long long)x1);
+ return false;
+ }
+ return true;
+ }
+ // for 32 bit integers we cannot detect overflow (it
+ // shows up as a negative difference).
+ *diff = x2 - x1;
+ if (*diff < 0) {
+ ALOGW("LinearMap: %s negative diff(%d) from %u - %u",
+ coord, *diff, (unsigned)x2, (unsigned)x1);
+ return false;
+ }
+ return true;
+ }
+
+ // Returns the previous position in the mSamples array
+ // going backwards back steps.
+ //
+ // Parameters:
+ // back: number of backward steps, cannot be less than zero or greater than mSamples.
+ //
+ __attribute__((no_sanitize("integer")))
+ size_t previousPosition(ssize_t back = 1) const {
+ LOG_ALWAYS_FATAL_IF(back < 0 || (size_t)back > mSamples, "Invalid back(%zd)", back);
+ ssize_t position = mPos - back;
+ if (position < 0) position += mSize;
+ return (size_t)position;
+ }
+
+ // A generic implementation of finding the "other coordinate" with coordinates
+ // (u, v) = (x, y) or (u, v) = (y, x).
+ //
+ // Parameters:
+ // uArray: the u axis samples.
+ // vArray: the v axis samples.
+ // method: [out] how the returned value was computed.
+ // extrapolation: the slope used when extrapolating from the
+ // first sample value or the last sample value in the history.
+ // If mExtrapolateTail is set, the slope of the last line segment
+ // is used if the extrapolation parameter is zero to continue the tail of history.
+ // At this time, we do not use a different value for forward extrapolation from the
+ // head of history from backward extrapolation from the tail of history.
+ // TODO: back extrapolation value could be stored along with mX, mY in history.
+ // startValue: used only when there are no samples in history. One can detect
+ // whether there are samples in history by the method hasData().
+ //
+ __attribute__((no_sanitize("integer")))
+ T findU(T v, T *uArray, T *vArray, FindMethod *method,
+ double extrapolation, T startValue) const {
+ if (mSamples == 0) {
+ if (method != NULL) {
+ *method = FIND_METHOD_START_VALUE;
+ }
+ return startValue; // nothing yet
+ }
+ ssize_t previous = 0;
+ int32_t diff = 0;
+ for (ssize_t i = 0; i < (ssize_t)mSamples; ++i) {
+ size_t current = previousPosition(i);
+
+ // Assumption: even though the type "T" may have precision greater
+ // than 32 bits, the difference between adjacent points is limited to 32 bits.
+ diff = v - vArray[current];
+ if (diff >= 0 ||
+ (i == (ssize_t)mSamples - 1 && mExtrapolateTail && extrapolation == 0.0)) {
+ // ALOGD("depth = %zd out of %zd", i, limit);
+ if (i == 0) {
+ if (method != NULL) {
+ *method = FIND_METHOD_FORWARD_EXTRAPOLATION;
+ }
+ return uArray[current] + diff * extrapolation;
+ }
+ // interpolate / extrapolate: For this computation, we
+ // must use differentials here otherwise we have inconsistent
+ // values on modulo wrap. previous is always valid here since
+ // i > 0. we also perform rounding with the assumption
+ // that uStep, vStep, and diff are non-negative.
+ int32_t uStep = uArray[previous] - uArray[current]; // non-negative
+ int32_t vStep = vArray[previous] - vArray[current]; // positive
+ T u = uStep <= 0 || vStep <= 0 ? // we do not permit negative ustep or vstep
+ uArray[current]
+ : ((int64_t)diff * uStep + (vStep >> 1)) / vStep + uArray[current];
+ // ALOGD("u:%u diff:%d uStep:%d vStep:%d u_current:%d",
+ // u, diff, uStep, vStep, uArray[current]);
+ if (method != NULL) {
+ *method = (diff >= 0) ?
+ FIND_METHOD_INTERPOLATION : FIND_METHOD_BACKWARD_EXTRAPOLATION;
+ }
+ return u;
+ }
+ previous = current;
+ }
+ // previous is always valid here.
+ if (method != NULL) {
+ *method = FIND_METHOD_BACKWARD_EXTRAPOLATION;
+ }
+ return uArray[previous] + diff * extrapolation;
+ }
+
+private:
+ const size_t mSize; // Size of mX and mY arrays (history).
+ size_t mPos; // Index in mX and mY of last pushed data;
+ // (incremented after push) [0, mSize - 1].
+ size_t mSamples; // Number of valid samples in the array [0, mSize].
+ bool mStepValid; // Last sample step was valid (non-negative)
+ bool mExtrapolateTail; // extrapolate tail using oldest line segment
+ T * const mX; // History of X values as a circular array.
+ T * const mY; // History of Y values as a circular array.
+};
+
+} // namespace android
+
+#endif // ANDROID_LINEAR_MAP_H
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 1450ca1..fe3cc53 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -89,6 +89,7 @@
// ExtendedAudioBufferProvider interface
virtual size_t framesReady() const;
virtual size_t framesReleased() const;
+ virtual void onTimestamp(const AudioTimestamp ×tamp);
bool isPausing() const { return mState == PAUSING; }
bool isPaused() const { return mState == PAUSED; }
@@ -100,6 +101,8 @@
void flushAck();
bool isResumePending();
void resumeAck();
+ void updateTrackFrameInfo(uint32_t trackFramesReleased, uint32_t sinkFramesWritten,
+ AudioTimestamp *timeStamp = NULL);
sp<IMemory> sharedBuffer() const { return mSharedBuffer; }
@@ -137,6 +140,12 @@
size_t mPresentationCompleteFrames; // number of frames written to the
// audio HAL when this track will be fully rendered
// zero means not monitoring
+
+ // access these three variables only when holding thread lock.
+ LinearMap<uint32_t> mFrameMap; // track frame to server frame mapping
+ bool mSinkTimestampValid; // valid cached timestamp
+ AudioTimestamp mSinkTimestamp;
+
private:
// The following fields are only for fast tracks, and should be in a subclass
int mFastIndex; // index within FastMixerState::mFastTracks[];
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index cfac81d..5aff394 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -1579,9 +1579,7 @@
mScreenState(AudioFlinger::mScreenState),
// index 0 is reserved for normal mixer's submix
mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1),
- mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
- // mLatchD, mLatchQ,
- mLatchDValid(false), mLatchQValid(false)
+ mHwSupportsPause(false), mHwPaused(false), mFlushPending(false)
{
snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
@@ -2554,16 +2552,6 @@
} else {
bytesWritten = framesWritten;
}
- mLatchDValid = false;
- status_t status = mNormalSink->getTimestamp(mLatchD.mTimestamp);
- if (status == NO_ERROR) {
- size_t totalFramesWritten = mNormalSink->framesWritten();
- if (totalFramesWritten >= mLatchD.mTimestamp.mPosition) {
- mLatchD.mUnpresentedFrames = totalFramesWritten - mLatchD.mTimestamp.mPosition;
- // mLatchD.mFramesReleased is set immediately before D is clocked into Q
- mLatchDValid = true;
- }
- }
// otherwise use the HAL / AudioStreamOut directly
} else {
// Direct output and offload threads
@@ -2869,21 +2857,47 @@
}
// Gather the framesReleased counters for all active tracks,
- // and latch them atomically with the timestamp.
- // FIXME We're using raw pointers as indices. A unique track ID would be a better index.
- mLatchD.mFramesReleased.clear();
- size_t size = mActiveTracks.size();
- for (size_t i = 0; i < size; i++) {
- sp<Track> t = mActiveTracks[i].promote();
- if (t != 0) {
- mLatchD.mFramesReleased.add(t.get(),
- t->mAudioTrackServerProxy->framesReleased());
+ // and associate with the sink frames written out. We need
+ // this to convert the sink timestamp to the track timestamp.
+ if (mNormalSink != 0) {
+ bool updateTracks = true;
+ bool cacheTimestamp = false;
+ AudioTimestamp timeStamp;
+ // FIXME: Use a 64 bit mNormalSink->framesWritten() counter.
+ // At this time, we must always use cached timestamps even when
+ // going through mPipeSink (which is non-blocking). The reason is that
+ // the track may be removed from the active list for many hours and
+ // the mNormalSink->framesWritten() will wrap making the linear
+ // mapping fail.
+ //
+ // (Also mAudioTrackServerProxy->framesReleased() needs to be
+ // updated to 64 bits for 64 bit frame position.)
+ //
+ if (true /* see comment above, should be: mNormalSink == mOutputSink */) {
+ // If we use a hardware device, we must cache the sink timestamp now.
+ // hardware devices can block timestamp access during data writes.
+ if (mNormalSink->getTimestamp(timeStamp) == NO_ERROR) {
+ cacheTimestamp = true;
+ } else {
+ updateTracks = false;
+ }
}
- }
- if (mLatchDValid) {
- mLatchQ = mLatchD;
- mLatchDValid = false;
- mLatchQValid = true;
+ if (updateTracks) {
+ // sinkFramesWritten for non-offloaded tracks are contiguous
+ // even after standby() is called. This is useful for the track frame
+ // to sink frame mapping.
+ const uint32_t sinkFramesWritten = mNormalSink->framesWritten();
+ const size_t size = mActiveTracks.size();
+ for (size_t i = 0; i < size; ++i) {
+ sp<Track> t = mActiveTracks[i].promote();
+ if (t != 0 && !t->isFastTrack()) {
+ t->updateTrackFrameInfo(
+ t->mAudioTrackServerProxy->framesReleased(),
+ sinkFramesWritten,
+ cacheTimestamp ? &timeStamp : NULL);
+ }
+ }
+ }
}
saveOutputTracks();
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index ad47277..7c92c1c 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -841,19 +841,6 @@
bool mHwSupportsPause;
bool mHwPaused;
bool mFlushPending;
-private:
- // timestamp latch:
- // D input is written by threadLoop_write while mutex is unlocked, and read while locked
- // Q output is written while locked, and read while locked
- struct {
- AudioTimestamp mTimestamp;
- uint32_t mUnpresentedFrames;
- KeyedVector<Track *, uint32_t> mFramesReleased;
- } mLatchD, mLatchQ;
- bool mLatchDValid; // true means mLatchD is valid
- // (except for mFramesReleased which is filled in later),
- // and clock it into latch at next opportunity
- bool mLatchQValid; // true means mLatchQ is valid
};
class MixerThread : public PlaybackThread {
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 5e5920f..1a48e07 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -50,6 +50,10 @@
#define ALOGVV(a...) do { } while(0)
#endif
+// TODO move to a common header (Also shared with AudioTrack.cpp)
+#define NANOS_PER_SECOND 1000000000
+#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * NANOS_PER_SECOND + time.tv_nsec)
+
namespace android {
// ----------------------------------------------------------------------------
@@ -357,6 +361,9 @@
mAuxBuffer(NULL),
mAuxEffectId(0), mHasVolumeController(false),
mPresentationCompleteFrames(0),
+ mFrameMap(16 /* sink-frame-to-track-frame map memory */),
+ mSinkTimestampValid(false),
+ // mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
mIsInvalid(false),
@@ -589,6 +596,11 @@
return mAudioTrackServerProxy->framesReleased();
}
+void AudioFlinger::PlaybackThread::Track::onTimestamp(const AudioTimestamp ×tamp)
+{
+ mAudioTrackServerProxy->setTimestamp(timestamp);
+}
+
// Don't call for fast tracks; the framesReady() could result in priority inversion
bool AudioFlinger::PlaybackThread::Track::isReady() const {
if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
@@ -652,6 +664,11 @@
ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
}
+ // states to reset position info for non-offloaded/direct tracks
+ if (!isOffloaded() && !isDirect()
+ && (state == IDLE || state == STOPPED || state == FLUSHED)) {
+ mFrameMap.reset();
+ }
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
if (isFastTrack()) {
// refresh fast track underruns on start because that field is never cleared
@@ -846,7 +863,7 @@
status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
{
- // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
+ // FastTrack timestamps are read through SSQ
if (isFastTrack()) {
return INVALID_OPERATION;
}
@@ -858,36 +875,31 @@
Mutex::Autolock _l(thread->mLock);
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- status_t result = INVALID_OPERATION;
- if (!isOffloaded() && !isDirect()) {
- if (!playbackThread->mLatchQValid) {
- return INVALID_OPERATION;
- }
- // FIXME Not accurate under dynamic changes of sample rate and speed.
- // Do not use track's mSampleRate as it is not current for mixer tracks.
- uint32_t sampleRate = mAudioTrackServerProxy->getSampleRate();
- AudioPlaybackRate playbackRate = mAudioTrackServerProxy->getPlaybackRate();
- uint32_t unpresentedFrames = ((double) playbackThread->mLatchQ.mUnpresentedFrames *
- sampleRate * playbackRate.mSpeed)/ playbackThread->mSampleRate;
- // FIXME Since we're using a raw pointer as the key, it is theoretically possible
- // for a brand new track to share the same address as a recently destroyed
- // track, and thus for us to get the frames released of the wrong track.
- // It is unlikely that we would be able to call getTimestamp() so quickly
- // right after creating a new track. Nevertheless, the index here should
- // be changed to something that is unique. Or use a completely different strategy.
- ssize_t i = playbackThread->mLatchQ.mFramesReleased.indexOfKey(this);
- uint32_t framesWritten = i >= 0 ?
- playbackThread->mLatchQ.mFramesReleased[i] :
- mAudioTrackServerProxy->framesReleased();
- if (framesWritten >= unpresentedFrames) {
- timestamp.mPosition = framesWritten - unpresentedFrames;
- timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
- result = NO_ERROR;
- }
- } else { // offloaded or direct
- result = playbackThread->getTimestamp_l(timestamp);
+ if (isOffloaded() || isDirect()) {
+ return playbackThread->getTimestamp_l(timestamp);
}
+ if (!mFrameMap.hasData()) {
+ // WOULD_BLOCK is consistent with AudioTrack::getTimestamp() in the
+ // FLUSHED and STOPPED state. We should only return INVALID_OPERATION
+ // when this method is not permitted due to configuration or device.
+ return WOULD_BLOCK;
+ }
+ status_t result = OK;
+ if (!mSinkTimestampValid) { // if no sink position, try to fetch again
+ result = playbackThread->getTimestamp_l(mSinkTimestamp);
+ }
+
+ if (result == OK) {
+ // Lookup the track frame corresponding to the sink frame position.
+ timestamp.mPosition = mFrameMap.findX(mSinkTimestamp.mPosition);
+ timestamp.mTime = mSinkTimestamp.mTime;
+ // ALOGD("track (server-side) timestamp: mPosition(%u) mTime(%llu)",
+ // timestamp.mPosition, TIME_TO_NANOS(timestamp.mTime));
+ }
+ // (Possible) FIXME: mSinkTimestamp is updated only when the track is on
+ // the Thread active list. If the track is no longer on the thread active
+ // list should we use current time?
return result;
}
@@ -1077,6 +1089,19 @@
mResumeToStopping = false;
}
}
+
+//To be called with thread lock held
+void AudioFlinger::PlaybackThread::Track::updateTrackFrameInfo(
+ uint32_t trackFramesReleased, uint32_t sinkFramesWritten, AudioTimestamp *timeStamp) {
+ mFrameMap.push(trackFramesReleased, sinkFramesWritten);
+ if (timeStamp == NULL) {
+ mSinkTimestampValid = false;
+ } else {
+ mSinkTimestampValid = true;
+ mSinkTimestamp = *timeStamp;
+ }
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index d51f4e1..c952831 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -69,7 +69,8 @@
* @return NO_ERROR if an output was found for the given attribute (in this case, the
* descriptor output param is initialized), error code otherwise.
*/
- status_t getOutputForAttr(audio_attributes_t attributes, sp<SwAudioOutputDescriptor> &desc);
+ status_t getOutputForAttr(audio_attributes_t attributes, uid_t uid,
+ sp<SwAudioOutputDescriptor> &desc);
audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
audio_devices_t availableDeviceTypes,
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 6f1998c..3735c05 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -98,30 +98,111 @@
}
}
-status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes,
+status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes, uid_t uid,
sp<SwAudioOutputDescriptor> &desc)
{
+ desc = 0;
for (size_t i = 0; i < size(); i++) {
sp<AudioPolicyMix> policyMix = valueAt(i);
AudioMix *mix = policyMix->getMix();
if (mix->mMixType == MIX_TYPE_PLAYERS) {
+ // TODO if adding more player rules (currently only 2), make rule handling "generic"
+ // as there is no difference in the treatment of usage- or uid-based rules
+ bool hasUsageMatchRules = false;
+ bool hasUsageExcludeRules = false;
+ bool usageMatchFound = false;
+ bool usageExclusionFound = false;
+
+ bool hasUidMatchRules = false;
+ bool hasUidExcludeRules = false;
+ bool uidMatchFound = false;
+ bool uidExclusionFound = false;
+
+ bool hasAddrMatch = false;
+
+ // iterate over all mix criteria to list what rules this mix contains
for (size_t j = 0; j < mix->mCriteria.size(); j++) {
- if ((RULE_MATCH_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule &&
- mix->mCriteria[j].mAttr.mUsage == attributes.usage) ||
- (RULE_EXCLUDE_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule &&
- mix->mCriteria[j].mAttr.mUsage != attributes.usage)) {
- desc = policyMix->getOutput();
- break;
- }
+ ALOGV("getOutputForAttr: inspecting mix %zu of %zu", i, mix->mCriteria.size());
+
+ // if there is an address match, prioritize that match
if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
strncmp(attributes.tags + strlen("addr="),
mix->mRegistrationId.string(),
AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
- desc = policyMix->getOutput();
+ hasAddrMatch = true;
break;
}
+
+ switch (mix->mCriteria[j].mRule) {
+ case RULE_MATCH_ATTRIBUTE_USAGE:
+ ALOGV("\tmix has RULE_MATCH_ATTRIBUTE_USAGE for usage %d",
+ mix->mCriteria[j].mValue.mUsage);
+ hasUsageMatchRules = true;
+ if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+ // found one match against all allowed usages
+ usageMatchFound = true;
+ }
+ break;
+ case RULE_EXCLUDE_ATTRIBUTE_USAGE:
+ ALOGV("\tmix has RULE_EXCLUDE_ATTRIBUTE_USAGE for usage %d",
+ mix->mCriteria[j].mValue.mUsage);
+ hasUsageExcludeRules = true;
+ if (mix->mCriteria[j].mValue.mUsage == attributes.usage) {
+ // found this usage is to be excluded
+ usageExclusionFound = true;
+ }
+ break;
+ case RULE_MATCH_UID:
+ ALOGV("\tmix has RULE_MATCH_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+ hasUidMatchRules = true;
+ if (mix->mCriteria[j].mValue.mUid == uid) {
+ // found one UID match against all allowed UIDs
+ uidMatchFound = true;
+ }
+ break;
+ case RULE_EXCLUDE_UID:
+ ALOGV("\tmix has RULE_EXCLUDE_UID for uid %d", mix->mCriteria[j].mValue.mUid);
+ hasUidExcludeRules = true;
+ if (mix->mCriteria[j].mValue.mUid == uid) {
+ // found this UID is to be excluded
+ uidExclusionFound = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ // consistency checks: for each "dimension" of rules (usage, uid...), we can
+ // only have MATCH rules, or EXCLUDE rules in each dimension, not a combination
+ if (hasUsageMatchRules && hasUsageExcludeRules) {
+ ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_ATTRIBUTE_USAGE"
+ " and RULE_EXCLUDE_ATTRIBUTE_USAGE in mix %zu", i);
+ return BAD_VALUE;
+ }
+ if (hasUidMatchRules && hasUidExcludeRules) {
+ ALOGE("getOutputForAttr: invalid combination of RULE_MATCH_UID"
+ " and RULE_EXCLUDE_UID in mix %zu", i);
+ return BAD_VALUE;
+ }
+
+ if ((hasUsageExcludeRules && usageExclusionFound)
+ || (hasUidExcludeRules && uidExclusionFound)) {
+ break; // stop iterating on criteria because an exclusion was found (will fail)
+ }
+
+ }//iterate on mix criteria
+
+ // determine if exiting on success (or implicit failure as desc is 0)
+ if (hasAddrMatch ||
+ !((hasUsageExcludeRules && usageExclusionFound) ||
+ (hasUsageMatchRules && !usageMatchFound) ||
+ (hasUidExcludeRules && uidExclusionFound) ||
+ (hasUidMatchRules && !uidMatchFound))) {
+ ALOGV("\tgetOutputForAttr will use mix %zu", i);
+ desc = policyMix->getOutput();
}
+
} else if (mix->mMixType == MIX_TYPE_RECORDERS) {
if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
@@ -151,9 +232,9 @@
}
for (size_t j = 0; j < mix->mCriteria.size(); j++) {
if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
- mix->mCriteria[j].mAttr.mSource == inputSource) ||
+ mix->mCriteria[j].mValue.mSource == inputSource) ||
(RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
- mix->mCriteria[j].mAttr.mSource != inputSource)) {
+ mix->mCriteria[j].mValue.mSource != inputSource)) {
if (availDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
if (policyMix != NULL) {
*policyMix = mix;
@@ -174,6 +255,15 @@
}
String8 address(attr.tags + strlen("addr="));
+#ifdef LOG_NDEBUG
+ ALOGV("getInputMixForAttr looking for address %s\n mixes available:", address.string());
+ for (size_t i = 0; i < size(); i++) {
+ sp<AudioPolicyMix> policyMix = valueAt(i);
+ AudioMix *mix = policyMix->getMix();
+ ALOGV("\tmix %zu address=%s", i, mix->mRegistrationId.string());
+ }
+#endif
+
ssize_t index = indexOfKey(address);
if (index < 0) {
ALOGW("getInputMixForAttr() no policy for address %s", address.string());
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index a5b1e47..8bb49fa 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -709,7 +709,7 @@
stream_type_to_audio_attributes(*stream, &attributes);
}
sp<SwAudioOutputDescriptor> desc;
- if (mPolicyMixes.getOutputForAttr(attributes, desc) == NO_ERROR) {
+ if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
if (!audio_has_proportional_frames(format)) {
return BAD_VALUE;
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index eed545e..9a28137 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -374,7 +374,7 @@
// Audio Effect Config parser
// ----------------------------------------------------------------------------
-size_t AudioPolicyEffects::growParamSize(char *param,
+size_t AudioPolicyEffects::growParamSize(char **param,
size_t size,
size_t *curSize,
size_t *totSize)
@@ -386,55 +386,82 @@
while (pos + size > *totSize) {
*totSize += ((*totSize + 7) / 8) * 4;
}
- param = (char *)realloc(param, *totSize);
+ *param = (char *)realloc(*param, *totSize);
+ if (*param == NULL) {
+ ALOGE("%s realloc error for size %zu", __func__, *totSize);
+ return 0;
+ }
}
*curSize = pos + size;
return pos;
}
+
size_t AudioPolicyEffects::readParamValue(cnode *node,
- char *param,
+ char **param,
size_t *curSize,
size_t *totSize)
{
+ size_t len = 0;
+ size_t pos;
+
if (strncmp(node->name, SHORT_TAG, sizeof(SHORT_TAG) + 1) == 0) {
- size_t pos = growParamSize(param, sizeof(short), curSize, totSize);
- *(short *)((char *)param + pos) = (short)atoi(node->value);
- ALOGV("readParamValue() reading short %d", *(short *)((char *)param + pos));
- return sizeof(short);
- } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) {
- size_t pos = growParamSize(param, sizeof(int), curSize, totSize);
- *(int *)((char *)param + pos) = atoi(node->value);
- ALOGV("readParamValue() reading int %d", *(int *)((char *)param + pos));
- return sizeof(int);
- } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) {
- size_t pos = growParamSize(param, sizeof(float), curSize, totSize);
- *(float *)((char *)param + pos) = (float)atof(node->value);
- ALOGV("readParamValue() reading float %f",*(float *)((char *)param + pos));
- return sizeof(float);
- } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) {
- size_t pos = growParamSize(param, sizeof(bool), curSize, totSize);
- if (strncmp(node->value, "false", strlen("false") + 1) == 0) {
- *(bool *)((char *)param + pos) = false;
- } else {
- *(bool *)((char *)param + pos) = true;
+ pos = growParamSize(param, sizeof(short), curSize, totSize);
+ if (pos == 0) {
+ goto exit;
}
- ALOGV("readParamValue() reading bool %s",*(bool *)((char *)param + pos) ? "true" : "false");
- return sizeof(bool);
+ *(short *)(*param + pos) = (short)atoi(node->value);
+ ALOGV("readParamValue() reading short %d", *(short *)(*param + pos));
+ len = sizeof(short);
+ } else if (strncmp(node->name, INT_TAG, sizeof(INT_TAG) + 1) == 0) {
+ pos = growParamSize(param, sizeof(int), curSize, totSize);
+ if (pos == 0) {
+ goto exit;
+ }
+ *(int *)(*param + pos) = atoi(node->value);
+ ALOGV("readParamValue() reading int %d", *(int *)(*param + pos));
+ len = sizeof(int);
+ } else if (strncmp(node->name, FLOAT_TAG, sizeof(FLOAT_TAG) + 1) == 0) {
+ pos = growParamSize(param, sizeof(float), curSize, totSize);
+ if (pos == 0) {
+ goto exit;
+ }
+ *(float *)(*param + pos) = (float)atof(node->value);
+ ALOGV("readParamValue() reading float %f",*(float *)(*param + pos));
+ len = sizeof(float);
+ } else if (strncmp(node->name, BOOL_TAG, sizeof(BOOL_TAG) + 1) == 0) {
+ pos = growParamSize(param, sizeof(bool), curSize, totSize);
+ if (pos == 0) {
+ goto exit;
+ }
+ if (strncmp(node->value, "true", strlen("true") + 1) == 0) {
+ *(bool *)(*param + pos) = true;
+ } else {
+ *(bool *)(*param + pos) = false;
+ }
+ ALOGV("readParamValue() reading bool %s",
+ *(bool *)(*param + pos) ? "true" : "false");
+ len = sizeof(bool);
} else if (strncmp(node->name, STRING_TAG, sizeof(STRING_TAG) + 1) == 0) {
- size_t len = strnlen(node->value, EFFECT_STRING_LEN_MAX);
+ len = strnlen(node->value, EFFECT_STRING_LEN_MAX);
if (*curSize + len + 1 > *totSize) {
*totSize = *curSize + len + 1;
- param = (char *)realloc(param, *totSize);
+ *param = (char *)realloc(*param, *totSize);
+ if (*param == NULL) {
+ len = 0;
+ ALOGE("%s realloc error for string len %zu", __func__, *totSize);
+ goto exit;
+ }
}
- strncpy(param + *curSize, node->value, len);
+ strncpy(*param + *curSize, node->value, len);
*curSize += len;
- param[*curSize] = '\0';
- ALOGV("readParamValue() reading string %s", param + *curSize - len);
- return len;
+ (*param)[*curSize] = '\0';
+ ALOGV("readParamValue() reading string %s", *param + *curSize - len);
+ } else {
+ ALOGW("readParamValue() unknown param type %s", node->name);
}
- ALOGW("readParamValue() unknown param type %s", node->name);
- return 0;
+exit:
+ return len;
}
effect_param_t *AudioPolicyEffects::loadEffectParameter(cnode *root)
@@ -445,6 +472,12 @@
size_t totSize = sizeof(effect_param_t) + 2 * sizeof(int);
effect_param_t *fx_param = (effect_param_t *)malloc(totSize);
+ if (fx_param == NULL) {
+ ALOGE("%s malloc error for effect structure of size %zu",
+ __func__, totSize);
+ return NULL;
+ }
+
param = config_find(root, PARAM_TAG);
value = config_find(root, VALUE_TAG);
if (param == NULL && value == NULL) {
@@ -453,8 +486,10 @@
if (param != NULL) {
// Note: that a pair of random strings is read as 0 0
int *ptr = (int *)fx_param->data;
+#if LOG_NDEBUG == 0
int *ptr2 = (int *)((char *)param + sizeof(effect_param_t));
- ALOGW("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2);
+ ALOGV("loadEffectParameter() ptr %p ptr2 %p", ptr, ptr2);
+#endif
*ptr++ = atoi(param->name);
*ptr = atoi(param->value);
fx_param->psize = sizeof(int);
@@ -463,7 +498,8 @@
}
}
if (param == NULL || value == NULL) {
- ALOGW("loadEffectParameter() invalid parameter description %s", root->name);
+ ALOGW("loadEffectParameter() invalid parameter description %s",
+ root->name);
goto error;
}
@@ -471,7 +507,8 @@
param = param->first_child;
while (param) {
ALOGV("loadEffectParameter() reading param of type %s", param->name);
- size_t size = readParamValue(param, (char *)fx_param, &curSize, &totSize);
+ size_t size =
+ readParamValue(param, (char **)&fx_param, &curSize, &totSize);
if (size == 0) {
goto error;
}
@@ -486,7 +523,8 @@
value = value->first_child;
while (value) {
ALOGV("loadEffectParameter() reading value of type %s", value->name);
- size_t size = readParamValue(value, (char *)fx_param, &curSize, &totSize);
+ size_t size =
+ readParamValue(value, (char **)&fx_param, &curSize, &totSize);
if (size == 0) {
goto error;
}
@@ -497,7 +535,7 @@
return fx_param;
error:
- delete fx_param;
+ free(fx_param);
return NULL;
}
@@ -507,11 +545,9 @@
while (node) {
ALOGV("loadEffectParameters() loading param %s", node->name);
effect_param_t *param = loadEffectParameter(node);
- if (param == NULL) {
- node = node->next;
- continue;
+ if (param != NULL) {
+ params.add(param);
}
- params.add(param);
node = node->next;
}
}
@@ -529,6 +565,7 @@
EffectDescVector *desc = new EffectDescVector();
while (node) {
size_t i;
+
for (i = 0; i < effects.size(); i++) {
if (strncmp(effects[i]->mName, node->name, EFFECT_STRING_LEN_MAX) == 0) {
ALOGV("loadEffectConfig() found effect %s in list", node->name);
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 58ea24c..f302167 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -171,10 +171,10 @@
void loadEffectParameters(cnode *root, Vector <effect_param_t *>& params);
effect_param_t *loadEffectParameter(cnode *root);
size_t readParamValue(cnode *node,
- char *param,
+ char **param,
size_t *curSize,
size_t *totSize);
- size_t growParamSize(char *param,
+ size_t growParamSize(char **param,
size_t size,
size_t *curSize,
size_t *totSize);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index cac82f3..b524f61 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -64,6 +64,7 @@
mStatusWaiters(0),
mUsePartialResult(false),
mNumPartialResults(1),
+ mTimestampOffset(0),
mNextResultFrameNumber(0),
mNextReprocessResultFrameNumber(0),
mNextShutterFrameNumber(0),
@@ -204,6 +205,14 @@
mNeedConfig = true;
mPauseStateNotify = false;
+ // Measure the clock domain offset between camera and video/hw_composer
+ camera_metadata_entry timestampSource =
+ mDeviceInfo.find(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE);
+ if (timestampSource.count > 0 && timestampSource.data.u8[0] ==
+ ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME) {
+ mTimestampOffset = getMonoToBoottimeOffset();
+ }
+
// Will the HAL be sending in early partial result metadata?
if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
camera_metadata_entry partialResultsCount =
@@ -382,6 +391,24 @@
return Size(maxJpegWidth, maxJpegHeight);
}
+nsecs_t Camera3Device::getMonoToBoottimeOffset() {
+ // try three times to get the clock offset, choose the one
+ // with the minimum gap in measurements.
+ const int tries = 3;
+ nsecs_t bestGap, measured;
+ for (int i = 0; i < tries; ++i) {
+ const nsecs_t tmono = systemTime(SYSTEM_TIME_MONOTONIC);
+ const nsecs_t tbase = systemTime(SYSTEM_TIME_BOOTTIME);
+ const nsecs_t tmono2 = systemTime(SYSTEM_TIME_MONOTONIC);
+ const nsecs_t gap = tmono2 - tmono;
+ if (i == 0 || gap < bestGap) {
+ bestGap = gap;
+ measured = tbase - ((tmono + tmono2) >> 1);
+ }
+ }
+ return measured;
+}
+
ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
// Get max jpeg size (area-wise).
Size maxJpegResolution = getMaxJpegResolution();
@@ -992,7 +1019,8 @@
}
}
newStream = new Camera3OutputStream(mNextStreamId, consumer,
- width, height, blobBufferSize, format, dataSpace, rotation, streamSetId);
+ width, height, blobBufferSize, format, dataSpace, rotation,
+ mTimestampOffset, streamSetId);
} else if (format == HAL_PIXEL_FORMAT_RAW_OPAQUE) {
ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height);
if (rawOpaqueBufferSize <= 0) {
@@ -1000,10 +1028,12 @@
return BAD_VALUE;
}
newStream = new Camera3OutputStream(mNextStreamId, consumer,
- width, height, rawOpaqueBufferSize, format, dataSpace, rotation, streamSetId);
+ width, height, rawOpaqueBufferSize, format, dataSpace, rotation,
+ mTimestampOffset, streamSetId);
} else {
newStream = new Camera3OutputStream(mNextStreamId, consumer,
- width, height, format, dataSpace, rotation, streamSetId);
+ width, height, format, dataSpace, rotation,
+ mTimestampOffset, streamSetId);
}
newStream->setStatusTracker(mStatusTracker);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index f70a153..3848200 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -23,6 +23,7 @@
#include <utils/Mutex.h>
#include <utils/Thread.h>
#include <utils/KeyedVector.h>
+#include <utils/Timers.h>
#include <hardware/camera3.h>
#include <camera/CaptureResult.h>
#include <camera/camera2/ICameraDeviceUser.h>
@@ -251,6 +252,12 @@
/**** End scope for mLock ****/
+ // The offset converting from clock domain of other subsystem
+ // (video/hardware composer) to that of camera. Assumption is that this
+ // offset won't change during the life cycle of the camera device. In other
+ // words, camera device shouldn't be open during CPU suspend.
+ nsecs_t mTimestampOffset;
+
typedef struct AeTriggerCancelOverride {
bool applyAeLock;
uint8_t aeLock;
@@ -392,6 +399,12 @@
*/
Size getMaxJpegResolution() const;
+ /**
+ * Helper function to get the offset between MONOTONIC and BOOTTIME
+ * timestamp.
+ */
+ static nsecs_t getMonoToBoottimeOffset();
+
struct RequestTrigger {
// Metadata tag number, e.g. android.control.aePrecaptureTrigger
uint32_t metadataTag;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 1c7bd81..1e6452f 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -34,14 +34,15 @@
Camera3OutputStream::Camera3OutputStream(int id,
sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
- android_dataspace dataSpace, camera3_stream_rotation_t rotation, int setId) :
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ nsecs_t timestampOffset, int setId) :
Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
/*maxSize*/0, format, dataSpace, rotation, setId),
mConsumer(consumer),
mTransform(0),
mTraceFirstBuffer(true),
- mTimestampBuffer(true),
- mUseBufferManager(false) {
+ mUseBufferManager(false),
+ mTimestampOffset(timestampOffset) {
if (mConsumer == NULL) {
ALOGE("%s: Consumer is NULL!", __FUNCTION__);
@@ -56,14 +57,16 @@
Camera3OutputStream::Camera3OutputStream(int id,
sp<Surface> consumer,
uint32_t width, uint32_t height, size_t maxSize, int format,
- android_dataspace dataSpace, camera3_stream_rotation_t rotation, int setId) :
+ android_dataspace dataSpace, camera3_stream_rotation_t rotation,
+ nsecs_t timestampOffset, int setId) :
Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, maxSize,
format, dataSpace, rotation, setId),
mConsumer(consumer),
mTransform(0),
mTraceFirstBuffer(true),
- mTimestampBuffer(true),
- mUseBufferManager(false) {
+ mUseMonoTimestamp(false),
+ mUseBufferManager(false),
+ mTimestampOffset(timestampOffset) {
if (format != HAL_PIXEL_FORMAT_BLOB && format != HAL_PIXEL_FORMAT_RAW_OPAQUE) {
ALOGE("%s: Bad format for size-only stream: %d", __FUNCTION__,
@@ -92,6 +95,7 @@
format, dataSpace, rotation, setId),
mTransform(0),
mTraceFirstBuffer(true),
+ mUseMonoTimestamp(false),
mUseBufferManager(false) {
if (setId > CAMERA3_STREAM_SET_ID_INVALID) {
@@ -237,15 +241,13 @@
/* Certain consumers (such as AudioSource or HardwareComposer) use
* MONOTONIC time, causing time misalignment if camera timestamp is
- * in BOOTTIME. Avoid setting timestamp, and let BufferQueue generate it
- * instead. */
- if (mTimestampBuffer) {
- res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
- if (res != OK) {
- ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
- __FUNCTION__, mId, strerror(-res), res);
- return res;
- }
+ * in BOOTTIME. Do the conversion if necessary. */
+ res = native_window_set_buffers_timestamp(mConsumer.get(),
+ mUseMonoTimestamp ? timestamp - mTimestampOffset : timestamp);
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
}
res = currentConsumer->queueBuffer(currentConsumer.get(),
@@ -398,7 +400,7 @@
mHandoutTotalBufferCount = 0;
mFrameCount = 0;
mLastTimestamp = 0;
- mTimestampBuffer = !(isConsumedByHWComposer() | isVideoStream());
+ mUseMonoTimestamp = (isConsumedByHWComposer() | isVideoStream());
res = native_window_set_buffer_count(mConsumer.get(),
mTotalBufferCount);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index c2c3452..a883448 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -81,7 +81,7 @@
Camera3OutputStream(int id, sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
- int setId = CAMERA3_STREAM_SET_ID_INVALID);
+ nsecs_t timestampOffset, int setId = CAMERA3_STREAM_SET_ID_INVALID);
/**
* Set up a stream for formats that have a variable buffer size for the same
@@ -92,7 +92,7 @@
Camera3OutputStream(int id, sp<Surface> consumer,
uint32_t width, uint32_t height, size_t maxSize, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
- int setId = CAMERA3_STREAM_SET_ID_INVALID);
+ nsecs_t timestampOffset, int setId = CAMERA3_STREAM_SET_ID_INVALID);
virtual ~Camera3OutputStream();
@@ -167,8 +167,8 @@
// Name of Surface consumer
String8 mConsumerName;
- // Whether to timestamp the output buffer
- bool mTimestampBuffer;
+ // Whether consumer assumes MONOTONIC timestamp
+ bool mUseMonoTimestamp;
/**
* GraphicBuffer manager this stream is registered to. Used to replace the buffer
@@ -186,6 +186,12 @@
* Flag indicating if the buffer manager is used to allocate the stream buffers
*/
bool mUseBufferManager;
+
+ /**
+ * Timestamp offset for video and hardware composer consumed streams
+ */
+ nsecs_t mTimestampOffset;
+
/**
* Internal Camera3Stream interface
*/