Merge "Move WAV/FLAC/MIDI/AAC/MP3/AMR extractors to APEX"
diff --git a/include/media/MediaMetrics.h b/include/media/MediaMetrics.h
new file mode 120000
index 0000000..5f757e4
--- /dev/null
+++ b/include/media/MediaMetrics.h
@@ -0,0 +1 @@
+../../media/libmediametrics/include/MediaMetrics.h
\ No newline at end of file
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 9fb2e35..52213bd 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -5352,7 +5352,7 @@
}
}
- if ((!mIsAVC && !mIsHEVC && !mIsAC4)) {
+ if (!mIsAVC && !mIsHEVC && !mIsAC4) {
if (newBuffer) {
if (mIsPcm) {
// The twos' PCM block reader assumes that all samples has the same size.
@@ -5429,58 +5429,11 @@
}
}
- if (!mIsAVC && !mIsHEVC && !mIsAC4) {
- *out = mBuffer;
- mBuffer = NULL;
-
- return AMEDIA_OK;
- }
-
- if (mIsAC4) {
- mBuffer->release();
- mBuffer = NULL;
-
- return AMEDIA_ERROR_IO;
- }
-
- // Each NAL unit is split up into its constituent fragments and
- // each one of them returned in its own buffer.
-
- CHECK(mBuffer->range_length() >= mNALLengthSize);
-
- const uint8_t *src =
- (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
-
- size_t nal_size = parseNALSize(src);
- if (mNALLengthSize > SIZE_MAX - nal_size) {
- ALOGE("b/24441553, b/24445122");
- }
- if (mBuffer->range_length() - mNALLengthSize < nal_size) {
- ALOGE("incomplete NAL unit.");
-
- mBuffer->release();
- mBuffer = NULL;
-
- return AMEDIA_ERROR_MALFORMED;
- }
-
- MediaBufferBase *clone = mBuffer->clone();
- CHECK(clone != NULL);
- clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
-
- CHECK(mBuffer != NULL);
- mBuffer->set_range(
- mBuffer->range_offset() + mNALLengthSize + nal_size,
- mBuffer->range_length() - mNALLengthSize - nal_size);
-
- if (mBuffer->range_length() == 0) {
- mBuffer->release();
- mBuffer = NULL;
- }
-
- *out = clone;
+ *out = mBuffer;
+ mBuffer = NULL;
return AMEDIA_OK;
+
} else if (mIsAC4) {
CHECK(mBuffer != NULL);
// Make sure there is enough space to write the sync header and the raw frame
@@ -5773,7 +5726,7 @@
}
- if ((!mIsAVC && !mIsHEVC)) {
+ if (!mIsAVC && !mIsHEVC) {
if (newBuffer) {
if (!isInRange((size_t)0u, mBuffer->size(), size)) {
mBuffer->release();
@@ -5825,52 +5778,11 @@
++mCurrentSampleIndex;
}
- if (!mIsAVC && !mIsHEVC) {
- *out = mBuffer;
- mBuffer = NULL;
-
- return AMEDIA_OK;
- }
-
- // Each NAL unit is split up into its constituent fragments and
- // each one of them returned in its own buffer.
-
- CHECK(mBuffer->range_length() >= mNALLengthSize);
-
- const uint8_t *src =
- (const uint8_t *)mBuffer->data() + mBuffer->range_offset();
-
- size_t nal_size = parseNALSize(src);
- if (mNALLengthSize > SIZE_MAX - nal_size) {
- ALOGE("b/24441553, b/24445122");
- }
-
- if (mBuffer->range_length() - mNALLengthSize < nal_size) {
- ALOGE("incomplete NAL unit.");
-
- mBuffer->release();
- mBuffer = NULL;
-
- return AMEDIA_ERROR_MALFORMED;
- }
-
- MediaBufferBase *clone = mBuffer->clone();
- CHECK(clone != NULL);
- clone->set_range(mBuffer->range_offset() + mNALLengthSize, nal_size);
-
- CHECK(mBuffer != NULL);
- mBuffer->set_range(
- mBuffer->range_offset() + mNALLengthSize + nal_size,
- mBuffer->range_length() - mNALLengthSize - nal_size);
-
- if (mBuffer->range_length() == 0) {
- mBuffer->release();
- mBuffer = NULL;
- }
-
- *out = clone;
+ *out = mBuffer;
+ mBuffer = NULL;
return AMEDIA_OK;
+
} else {
ALOGV("whole NAL");
// Whole NAL units are returned but each fragment is prefixed by
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index aa036a8..3ae7104 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -78,6 +78,8 @@
DOWNMIX_TYPE = 0X4004,
MIXER_FORMAT = 0x4005, // AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
MIXER_CHANNEL_MASK = 0x4006, // Channel mask for mixer output
+ // for haptic
+ HAPTIC_ENABLED = 0x4007, // Set haptic data from this track should be played or not.
// for target RESAMPLE
SAMPLE_RATE = 0x4100, // Configure sample rate conversion on this track name;
// parameter 'value' is the new sample rate in Hz.
@@ -137,6 +139,13 @@
void setBufferProvider(int name, AudioBufferProvider* bufferProvider);
void process() {
+ for (const auto &pair : mTracks) {
+ // Clear contracted buffer before processing if contracted channels are saved
+ const std::shared_ptr<Track> &t = pair.second;
+ if (t->mKeepContractedChannels) {
+ t->clearContractedBuffer();
+ }
+ }
(this->*mHook)();
}
@@ -235,6 +244,8 @@
mPostDownmixReformatBufferProvider.reset(nullptr);
mDownmixerBufferProvider.reset(nullptr);
mReformatBufferProvider.reset(nullptr);
+ mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+ mAdjustChannelsBufferProvider.reset(nullptr);
}
bool needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
@@ -249,6 +260,11 @@
void unprepareForDownmix();
status_t prepareForReformat();
void unprepareForReformat();
+ status_t prepareForAdjustChannels();
+ void unprepareForAdjustChannels();
+ status_t prepareForAdjustChannelsNonDestructive(size_t frames);
+ void unprepareForAdjustChannelsNonDestructive();
+ void clearContractedBuffer();
bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
void reconfigureBufferProviders();
@@ -302,17 +318,22 @@
* all pre-mixer track buffer conversions outside the AudioMixer class.
*
* 1) mInputBufferProvider: The AudioTrack buffer provider.
- * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
+ * 2) mAdjustChannelsBufferProvider: Expend or contracts data
+ * 3) mAdjustChannelsNonDestructiveBufferProvider: Non-destructively adjust sample data
+ * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
* match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
* requires reformat. For example, it may convert floating point input to
* PCM_16_bit if that's required by the downmixer.
- * 3) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
* the number of channels required by the mixer sink.
- * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+ * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
* the downmixer requirements to the mixer engine input requirements.
- * 5) mTimestretchBufferProvider: Adds timestretching for playback rate
+ * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
*/
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
+ // TODO: combine AdjustChannelsBufferProvider and AdjustChannelsNonDestructiveBufferProvider
+ std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mAdjustChannelsNonDestructiveBufferProvider;
std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
@@ -341,6 +362,18 @@
AudioPlaybackRate mPlaybackRate;
+ // Haptic
+ bool mHapticPlaybackEnabled;
+ audio_channel_mask_t mHapticChannelMask;
+ uint32_t mHapticChannelCount;
+ audio_channel_mask_t mMixerHapticChannelMask;
+ uint32_t mMixerHapticChannelCount;
+ uint32_t mAdjustInChannelCount;
+ uint32_t mAdjustOutChannelCount;
+ uint32_t mAdjustNonDestructiveInChannelCount;
+ uint32_t mAdjustNonDestructiveOutChannelCount;
+ bool mKeepContractedChannels;
+
private:
// hooks
void track__genericResample(int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index b3ff757..8537608 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -31,33 +31,17 @@
#include "StreamHalHidl.h"
#include "VersionUtils.h"
-using ::android::hardware::audio::common::CPP_VERSION::AudioConfig;
-using ::android::hardware::audio::common::CPP_VERSION::AudioDevice;
-using ::android::hardware::audio::common::CPP_VERSION::AudioInputFlag;
-using ::android::hardware::audio::common::CPP_VERSION::AudioOutputFlag;
-using ::android::hardware::audio::common::CPP_VERSION::AudioPatchHandle;
-using ::android::hardware::audio::common::CPP_VERSION::AudioPort;
-using ::android::hardware::audio::common::CPP_VERSION::AudioPortConfig;
-using ::android::hardware::audio::common::CPP_VERSION::AudioMode;
-using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
-using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::audio::common::utils::EnumBitfield;
-using ::android::hardware::audio::CPP_VERSION::DeviceAddress;
-using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using ::android::hardware::audio::CPP_VERSION::Result;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
-#if MAJOR_VERSION == 4
-using ::android::hardware::audio::CPP_VERSION::SinkMetadata;
-#elif MAJOR_VERSION == 5
-using ::android::hardware::audio::common::CPP_VERSION::SinkMetadata;
-#endif
-
namespace android {
namespace CPP_VERSION {
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+using namespace ::android::hardware::audio::CPP_VERSION;
+
namespace {
status_t deviceAddressFromHal(
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 7b867b4..df79b95 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -31,9 +31,7 @@
using ::android::hardware::audio::effect::CPP_VERSION::EffectConfigParameters;
using ::android::hardware::audio::effect::CPP_VERSION::MessageQueueFlagBits;
using ::android::hardware::audio::effect::CPP_VERSION::Result;
-using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
-using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
-using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::audio::common::utils::EnumBitfield;
using ::android::hardware::hidl_vec;
using ::android::hardware::MQDescriptorSync;
@@ -42,6 +40,8 @@
namespace android {
namespace CPP_VERSION {
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+
EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
: mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
}
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index b880433..7fea466 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -25,8 +25,7 @@
#include "EffectHalHidl.h"
#include "HidlUtils.h"
-using ::android::hardware::audio::common::CPP_VERSION::HidlUtils;
-using ::android::hardware::audio::common::CPP_VERSION::Uuid;
+using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::audio::effect::CPP_VERSION::IEffect;
using ::android::hardware::audio::effect::CPP_VERSION::Result;
using ::android::hardware::Return;
@@ -34,6 +33,8 @@
namespace android {
namespace CPP_VERSION {
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+
EffectsFactoryHalHidl::EffectsFactoryHalHidl() : ConversionHelperHidl("EffectsFactory") {
mEffectsFactory = IEffectsFactory::getService();
if (mEffectsFactory == 0) {
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 6b59f5c..9765f1e 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -28,44 +28,18 @@
#include "StreamHalHidl.h"
#include "VersionUtils.h"
-using ::android::hardware::audio::common::CPP_VERSION::AudioChannelMask;
-using ::android::hardware::audio::common::CPP_VERSION::AudioFormat;
-using ::android::hardware::audio::common::CPP_VERSION::ThreadInfo;
-using ::android::hardware::audio::CPP_VERSION::AudioDrain;
-using ::android::hardware::audio::CPP_VERSION::IStreamOutCallback;
-using ::android::hardware::audio::CPP_VERSION::MessageQueueFlagBits;
-using ::android::hardware::audio::CPP_VERSION::MmapBufferInfo;
-using ::android::hardware::audio::CPP_VERSION::MmapPosition;
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using ::android::hardware::audio::CPP_VERSION::Result;
-using ::android::hardware::audio::CPP_VERSION::TimeSpec;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
-using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
-
-#if MAJOR_VERSION >= 4
-using ::android::hardware::audio::common::CPP_VERSION::AudioContentType;
-using ::android::hardware::audio::common::CPP_VERSION::AudioSource;
-using ::android::hardware::audio::common::CPP_VERSION::AudioUsage;
-using ::android::hardware::audio::CPP_VERSION::MicrophoneInfo;
-#endif
-
-#if MAJOR_VERSION == 4
-using ::android::hardware::audio::CPP_VERSION::PlaybackTrackMetadata;
-using ::android::hardware::audio::CPP_VERSION::RecordTrackMetadata;
-using HalSinkMetadata = ::android::hardware::audio::CPP_VERSION::SinkMetadata;
-using HalSourceMetadata = ::android::hardware::audio::CPP_VERSION::SourceMetadata;
-#elif MAJOR_VERSION == 5
-using ::android::hardware::audio::common::CPP_VERSION::PlaybackTrackMetadata;
-using ::android::hardware::audio::common::CPP_VERSION::RecordTrackMetadata;
-using HalSinkMetadata = ::android::hardware::audio::common::CPP_VERSION::SinkMetadata;
-using HalSourceMetadata = ::android::hardware::audio::common::CPP_VERSION::SourceMetadata;
-#endif
namespace android {
namespace CPP_VERSION {
+using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
+
+using namespace ::android::hardware::audio::common::CPP_VERSION;
+using namespace ::android::hardware::audio::CPP_VERSION;
+
StreamHalHidl::StreamHalHidl(IStream *stream)
: ConversionHelperHidl("Stream"),
mStream(stream),
@@ -609,7 +583,8 @@
}
#if MAJOR_VERSION == 2
-status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& /* sourceMetadata */) {
+status_t StreamOutHalHidl::updateSourceMetadata(
+ const StreamOutHalInterface::SourceMetadata& /* sourceMetadata */) {
// Audio HAL V2.0 does not support propagating source metadata
return INVALID_OPERATION;
}
@@ -623,8 +598,9 @@
return result;
}
-status_t StreamOutHalHidl::updateSourceMetadata(const SourceMetadata& sourceMetadata) {
- HalSourceMetadata halMetadata = {
+status_t StreamOutHalHidl::updateSourceMetadata(
+ const StreamOutHalInterface::SourceMetadata& sourceMetadata) {
+ CPP_VERSION::SourceMetadata halMetadata = {
.tracks = transformToHidlVec(sourceMetadata.tracks,
[](const playback_track_metadata& metadata) -> PlaybackTrackMetadata {
return {
@@ -838,7 +814,8 @@
return INVALID_OPERATION;
}
-status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& /* sinkMetadata */) {
+status_t StreamInHalHidl::updateSinkMetadata(
+ const StreamInHalInterface::SinkMetadata& /* sinkMetadata */) {
// Audio HAL V2.0 does not support propagating sink metadata
return INVALID_OPERATION;
}
@@ -862,8 +839,9 @@
return processReturn("getActiveMicrophones", ret, retval);
}
-status_t StreamInHalHidl::updateSinkMetadata(const SinkMetadata& sinkMetadata) {
- HalSinkMetadata halMetadata = {
+status_t StreamInHalHidl::updateSinkMetadata(const
+ StreamInHalInterface::SinkMetadata& sinkMetadata) {
+ CPP_VERSION::SinkMetadata halMetadata = {
.tracks = transformToHidlVec(sinkMetadata.tracks,
[](const record_track_metadata& metadata) -> RecordTrackMetadata {
return {
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index af54b21..2567b3b 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -136,6 +136,9 @@
// no initialization needed
// t->frameCount
+ t->mHapticChannelMask = channelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+ t->mHapticChannelCount = audio_channel_count_from_out_mask(t->mHapticChannelMask);
+ channelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
t->channelCount = audio_channel_count_from_out_mask(channelMask);
t->enabled = false;
ALOGV_IF(audio_channel_mask_get_bits(channelMask) != AUDIO_CHANNEL_OUT_STEREO,
@@ -162,6 +165,15 @@
AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
+ // haptic
+ t->mHapticPlaybackEnabled = false;
+ t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
+ t->mMixerHapticChannelCount = 0;
+ t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
+ t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
+ t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
+ t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+ t->mKeepContractedChannels = false;
// Check the downmixing (or upmixing) requirements.
status_t status = t->prepareForDownmix();
if (status != OK) {
@@ -171,6 +183,8 @@
// prepareForDownmix() may change mDownmixRequiresFormat
ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
t->prepareForReformat();
+ t->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ t->prepareForAdjustChannels();
mTracks[name] = t;
return OK;
@@ -185,13 +199,20 @@
LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
const std::shared_ptr<Track> &track = mTracks[name];
- if (trackChannelMask == track->channelMask
- && mixerChannelMask == track->mMixerChannelMask) {
+ if (trackChannelMask == (track->channelMask | track->mHapticChannelMask)
+ && mixerChannelMask == (track->mMixerChannelMask | track->mMixerHapticChannelMask)) {
return false; // no need to change
}
+ const audio_channel_mask_t hapticChannelMask = trackChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+ trackChannelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
+ const audio_channel_mask_t mixerHapticChannelMask = mixerChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+ mixerChannelMask &= ~AUDIO_CHANNEL_HAPTIC_ALL;
// always recompute for both channel masks even if only one has changed.
const uint32_t trackChannelCount = audio_channel_count_from_out_mask(trackChannelMask);
const uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mixerChannelMask);
+ const uint32_t hapticChannelCount = audio_channel_count_from_out_mask(hapticChannelMask);
+ const uint32_t mixerHapticChannelCount =
+ audio_channel_count_from_out_mask(mixerHapticChannelMask);
ALOG_ASSERT((trackChannelCount <= MAX_NUM_CHANNELS_TO_DOWNMIX)
&& trackChannelCount
@@ -200,6 +221,24 @@
track->channelCount = trackChannelCount;
track->mMixerChannelMask = mixerChannelMask;
track->mMixerChannelCount = mixerChannelCount;
+ track->mHapticChannelMask = hapticChannelMask;
+ track->mHapticChannelCount = hapticChannelCount;
+ track->mMixerHapticChannelMask = mixerHapticChannelMask;
+ track->mMixerHapticChannelCount = mixerHapticChannelCount;
+
+ if (track->mHapticChannelCount > 0) {
+ track->mAdjustInChannelCount = track->channelCount + track->mHapticChannelCount;
+ track->mAdjustOutChannelCount = track->channelCount + track->mMixerHapticChannelCount;
+ track->mAdjustNonDestructiveInChannelCount = track->mAdjustOutChannelCount;
+ track->mAdjustNonDestructiveOutChannelCount = track->channelCount;
+ track->mKeepContractedChannels = track->mHapticPlaybackEnabled;
+ } else {
+ track->mAdjustInChannelCount = 0;
+ track->mAdjustOutChannelCount = 0;
+ track->mAdjustNonDestructiveInChannelCount = 0;
+ track->mAdjustNonDestructiveOutChannelCount = 0;
+ track->mKeepContractedChannels = false;
+ }
// channel masks have changed, does this track need a downmixer?
// update to try using our desired format (if we aren't already using it)
@@ -212,6 +251,9 @@
// do it after downmix since track format may change!
track->prepareForReformat();
+ track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ track->prepareForAdjustChannels();
+
if (track->mResampler.get() != nullptr) {
// resampler channels may have changed.
const uint32_t resetToSampleRate = track->sampleRate;
@@ -335,10 +377,82 @@
return NO_ERROR;
}
+void AudioMixer::Track::unprepareForAdjustChannels()
+{
+ ALOGV("AUDIOMIXER::unprepareForAdjustChannels");
+ if (mAdjustChannelsBufferProvider.get() != nullptr) {
+ mAdjustChannelsBufferProvider.reset(nullptr);
+ reconfigureBufferProviders();
+ }
+}
+
+status_t AudioMixer::Track::prepareForAdjustChannels()
+{
+ ALOGV("AudioMixer::prepareForAdjustChannels(%p) with inChannelCount: %u, outChannelCount: %u",
+ this, mAdjustInChannelCount, mAdjustOutChannelCount);
+ unprepareForAdjustChannels();
+ if (mAdjustInChannelCount != mAdjustOutChannelCount) {
+ mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
+ mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, kCopyBufferFrameCount));
+ reconfigureBufferProviders();
+ }
+ return NO_ERROR;
+}
+
+void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
+{
+ ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
+ if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+ reconfigureBufferProviders();
+ }
+}
+
+status_t AudioMixer::Track::prepareForAdjustChannelsNonDestructive(size_t frames)
+{
+ ALOGV("AudioMixer::prepareForAdjustChannelsNonDestructive(%p) with inChannelCount: %u, "
+ "outChannelCount: %u, keepContractedChannels: %d",
+ this, mAdjustNonDestructiveInChannelCount, mAdjustNonDestructiveOutChannelCount,
+ mKeepContractedChannels);
+ unprepareForAdjustChannelsNonDestructive();
+ if (mAdjustNonDestructiveInChannelCount != mAdjustNonDestructiveOutChannelCount) {
+ uint8_t* buffer = mKeepContractedChannels
+ ? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
+ mMixerChannelCount, mMixerFormat)
+ : NULL;
+ mAdjustChannelsNonDestructiveBufferProvider.reset(
+ new AdjustChannelsNonDestructiveBufferProvider(
+ mFormat,
+ mAdjustNonDestructiveInChannelCount,
+ mAdjustNonDestructiveOutChannelCount,
+ mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
+ frames,
+ buffer));
+ reconfigureBufferProviders();
+ }
+ return NO_ERROR;
+}
+
+void AudioMixer::Track::clearContractedBuffer()
+{
+ if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ static_cast<AdjustChannelsNonDestructiveBufferProvider*>(
+ mAdjustChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+ }
+}
+
void AudioMixer::Track::reconfigureBufferProviders()
{
// configure from upstream to downstream buffer providers.
bufferProvider = mInputBufferProvider;
+ if (mAdjustChannelsBufferProvider.get() != nullptr) {
+ mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mAdjustChannelsBufferProvider.get();
+ }
+ if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ mAdjustChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mAdjustChannelsNonDestructiveBufferProvider.get();
+ }
if (mReformatBufferProvider.get() != nullptr) {
mReformatBufferProvider->setBufferProvider(bufferProvider);
bufferProvider = mReformatBufferProvider.get();
@@ -533,7 +647,8 @@
case CHANNEL_MASK: {
const audio_channel_mask_t trackChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
- if (setChannelMasks(name, trackChannelMask, track->mMixerChannelMask)) {
+ if (setChannelMasks(name, trackChannelMask,
+ (track->mMixerChannelMask | track->mMixerHapticChannelMask))) {
ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
invalidate();
}
@@ -542,6 +657,9 @@
if (track->mainBuffer != valueBuf) {
track->mainBuffer = valueBuf;
ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
+ if (track->mKeepContractedChannels) {
+ track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ }
invalidate();
}
break;
@@ -571,16 +689,29 @@
if (track->mMixerFormat != format) {
track->mMixerFormat = format;
ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
+ if (track->mKeepContractedChannels) {
+ track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ }
}
} break;
case MIXER_CHANNEL_MASK: {
const audio_channel_mask_t mixerChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
- if (setChannelMasks(name, track->channelMask, mixerChannelMask)) {
+ if (setChannelMasks(name, track->channelMask | track->mHapticChannelMask,
+ mixerChannelMask)) {
ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
invalidate();
}
} break;
+ case HAPTIC_ENABLED: {
+ const bool hapticPlaybackEnabled = static_cast<bool>(valueInt);
+ if (track->mHapticPlaybackEnabled != hapticPlaybackEnabled) {
+ track->mHapticPlaybackEnabled = hapticPlaybackEnabled;
+ track->mKeepContractedChannels = hapticPlaybackEnabled;
+ track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ track->prepareForAdjustChannels();
+ }
+ } break;
default:
LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
}
@@ -823,6 +954,10 @@
track->mDownmixerBufferProvider->reset();
} else if (track->mReformatBufferProvider.get() != nullptr) {
track->mReformatBufferProvider->reset();
+ } else if (track->mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ track->mAdjustChannelsNonDestructiveBufferProvider->reset();
+ } else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
+ track->mAdjustChannelsBufferProvider->reset();
}
track->mInputBufferProvider = bufferProvider;
@@ -1266,8 +1401,8 @@
const std::shared_ptr<Track> &t = mTracks[group[0]];
memset(t->mainBuffer, 0,
- mFrameCount * t->mMixerChannelCount
- * audio_bytes_per_sample(t->mMixerFormat));
+ mFrameCount * audio_bytes_per_frame(
+ t->mMixerChannelCount + t->mMixerHapticChannelCount, t->mMixerFormat));
// now consume data
for (const int name : group) {
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index e06a1aa..a1a1a0d 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -19,6 +19,7 @@
#include <audio_utils/primitives.h>
#include <audio_utils/format.h>
+#include <audio_utils/channels.h>
#include <external/sonic/sonic.h>
#include <media/audiohal/EffectBufferHalInterface.h>
#include <media/audiohal/EffectHalInterface.h>
@@ -630,5 +631,83 @@
}
}
}
+
+AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(audio_format_t format,
+ size_t inChannelCount, size_t outChannelCount, size_t frameCount) :
+ CopyBufferProvider(
+ audio_bytes_per_frame(inChannelCount, format),
+ audio_bytes_per_frame(outChannelCount, format),
+ frameCount),
+ mFormat(format),
+ mInChannelCount(inChannelCount),
+ mOutChannelCount(outChannelCount),
+ mSampleSizeInBytes(audio_bytes_per_sample(format))
+{
+ ALOGV("AdjustBufferProvider(%p)(%#x, %zu, %zu, %zu)",
+ this, format, inChannelCount, outChannelCount, frameCount);
+}
+
+void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+ adjust_channels(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
+ frames * mInChannelCount * mSampleSizeInBytes);
+}
+
+AdjustChannelsNonDestructiveBufferProvider::AdjustChannelsNonDestructiveBufferProvider(
+ audio_format_t format, size_t inChannelCount, size_t outChannelCount,
+ audio_format_t contractedFormat, size_t contractedFrameCount, void* contractedBuffer) :
+ CopyBufferProvider(
+ audio_bytes_per_frame(inChannelCount, format),
+ audio_bytes_per_frame(outChannelCount, format),
+ 0 /*bufferFrameCount*/),
+ mFormat(format),
+ mInChannelCount(inChannelCount),
+ mOutChannelCount(outChannelCount),
+ mSampleSizeInBytes(audio_bytes_per_sample(format)),
+ mContractedChannelCount(inChannelCount - outChannelCount),
+ mContractedFormat(contractedFormat),
+ mContractedFrameCount(contractedFrameCount),
+ mContractedBuffer(contractedBuffer),
+ mContractedWrittenFrames(0)
+{
+ ALOGV("AdjustChannelsNonDestructiveBufferProvider(%p)(%#x, %zu, %zu, %#x, %p)",
+ this, format, inChannelCount, outChannelCount, contractedFormat, contractedBuffer);
+ if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
+ mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
+ }
+}
+
+status_t AdjustChannelsNonDestructiveBufferProvider::getNextBuffer(
+ AudioBufferProvider::Buffer* pBuffer)
+{
+ const size_t outFramesLeft = mContractedFrameCount - mContractedWrittenFrames;
+ if (outFramesLeft < pBuffer->frameCount) {
+ // Restrict the frame count so that we don't write over the size of the output buffer.
+ pBuffer->frameCount = outFramesLeft;
+ }
+ return CopyBufferProvider::getNextBuffer(pBuffer);
+}
+
+void AdjustChannelsNonDestructiveBufferProvider::copyFrames(
+ void *dst, const void *src, size_t frames)
+{
+ adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
+ frames * mInChannelCount * mSampleSizeInBytes);
+ if (mContractedFormat != AUDIO_FORMAT_INVALID && mContractedBuffer != NULL
+ && mInChannelCount > mOutChannelCount) {
+ const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+ memcpy_by_audio_format(
+ (uint8_t*)mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
+ mContractedFormat, (uint8_t*)dst + contractedIdx, mFormat,
+ mContractedChannelCount * frames);
+ mContractedWrittenFrames += frames;
+ }
+}
+
+void AdjustChannelsNonDestructiveBufferProvider::reset()
+{
+ mContractedWrittenFrames = 0;
+ CopyBufferProvider::reset();
+}
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index 4984b18..cb8d375 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -56,6 +56,19 @@
setCaptureCallBack(NULL, NULL, 0, 0);
}
+void Visualizer::release()
+{
+ ALOGV("Visualizer::release()");
+ setEnabled(false);
+ Mutex::Autolock _l(mCaptureLock);
+
+ mCaptureThread.clear();
+ mCaptureCallBack = NULL;
+ mCaptureCbkUser = NULL;
+ mCaptureFlags = 0;
+ mCaptureRate = 0;
+}
+
status_t Visualizer::setEnabled(bool enabled)
{
Mutex::Autolock _l(mCaptureLock);
@@ -115,7 +128,7 @@
mCaptureRate = rate;
if (cbk != NULL) {
- mCaptureThread = new CaptureThread(*this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
+ mCaptureThread = new CaptureThread(this, rate, ((flags & CAPTURE_CALL_JAVA) != 0));
}
ALOGV("setCaptureCallBack() rate: %d thread %p flags 0x%08x",
rate, mCaptureThread.get(), mCaptureFlags);
@@ -402,7 +415,7 @@
//-------------------------------------------------------------------------
-Visualizer::CaptureThread::CaptureThread(Visualizer& receiver, uint32_t captureRate,
+Visualizer::CaptureThread::CaptureThread(Visualizer* receiver, uint32_t captureRate,
bool bCanCallJava)
: Thread(bCanCallJava), mReceiver(receiver)
{
@@ -413,10 +426,14 @@
bool Visualizer::CaptureThread::threadLoop()
{
ALOGV("CaptureThread %p enter", this);
+ sp<Visualizer> receiver = mReceiver.promote();
+ if (receiver == NULL) {
+ return false;
+ }
while (!exitPending())
{
usleep(mSleepTimeUs);
- mReceiver.periodicCapture();
+ receiver->periodicCapture();
}
ALOGV("CaptureThread %p exiting", this);
return false;
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
index 2f1a91c..ea41527 100644
--- a/media/libmedia/include/media/BufferProviders.h
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -218,6 +218,53 @@
bool mAudioPlaybackRateValid; // flag for current parameters validity
};
+// AdjustBufferProvider derives from CopyBufferProvider to adjust sample data.
+// Expands or contracts sample data from one interleaved channel format to another.
+// Expanded channels are filled with zeros and put at the end of each audio frame.
+// Contracted channels are omitted from the end of each audio frame.
+class AdjustChannelsBufferProvider : public CopyBufferProvider {
+public:
+ AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
+ size_t outChannelCount, size_t frameCount);
+ //Overrides
+ void copyFrames(void *dst, const void *src, size_t frames) override;
+
+protected:
+ const audio_format_t mFormat;
+ const size_t mInChannelCount;
+ const size_t mOutChannelCount;
+ const size_t mSampleSizeInBytes;
+};
+
+// AdjustChannelsNonDestructiveBufferProvider derives from CopyBufferProvider to adjust sample data.
+// Expands or contracts sample data from one interleaved channel format to another.
+// Extra expanded channels are interleaved in from the end of the input buffer.
+// Contracted channels are copied to the end of the output buffer.
+// Contracted channels could be written to output buffer.
+class AdjustChannelsNonDestructiveBufferProvider : public CopyBufferProvider {
+public:
+ AdjustChannelsNonDestructiveBufferProvider(audio_format_t format, size_t inChannelCount,
+ size_t outChannelCount, audio_format_t contractedFormat, size_t contractedFrameCount,
+ void* contractedBuffer);
+ //Overrides
+ status_t getNextBuffer(Buffer* pBuffer) override;
+ void copyFrames(void *dst, const void *src, size_t frames) override;
+ void reset() override;
+
+ void clearContractedFrames() { mContractedWrittenFrames = 0; }
+
+protected:
+ const audio_format_t mFormat;
+ const size_t mInChannelCount;
+ const size_t mOutChannelCount;
+ const size_t mSampleSizeInBytes;
+ const size_t mContractedChannelCount;
+ const audio_format_t mContractedFormat;
+ const size_t mContractedFrameCount;
+ void *mContractedBuffer;
+ size_t mContractedWrittenFrames;
+ size_t mContractedFrameSize;
+};
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libmedia/include/media/Visualizer.h b/media/libmedia/include/media/Visualizer.h
index f8f4f50..8078e36 100644
--- a/media/libmedia/include/media/Visualizer.h
+++ b/media/libmedia/include/media/Visualizer.h
@@ -131,6 +131,7 @@
// getCaptureSize() but the length of the FFT is half of the size (both parts of the spectrum
// are returned
status_t getFft(uint8_t *fft);
+ void release();
protected:
// from IEffectClient
@@ -146,12 +147,12 @@
class CaptureThread : public Thread
{
public:
- CaptureThread(Visualizer& receiver, uint32_t captureRate, bool bCanCallJava = false);
+ CaptureThread(Visualizer* visualizer, uint32_t captureRate, bool bCanCallJava = false);
private:
friend class Visualizer;
virtual bool threadLoop();
- Visualizer& mReceiver;
+ wp<Visualizer> mReceiver;
Mutex mLock;
uint32_t mSleepTimeUs;
};
diff --git a/media/libmediaextractor/MediaBuffer.cpp b/media/libmediaextractor/MediaBuffer.cpp
index d197b3f..26d0bd4 100644
--- a/media/libmediaextractor/MediaBuffer.cpp
+++ b/media/libmediaextractor/MediaBuffer.cpp
@@ -39,8 +39,7 @@
mRangeOffset(0),
mRangeLength(size),
mOwnsData(false),
- mMetaData(new MetaDataBase),
- mOriginal(NULL) {
+ mMetaData(new MetaDataBase) {
}
MediaBuffer::MediaBuffer(size_t size)
@@ -51,8 +50,7 @@
mRangeOffset(0),
mRangeLength(size),
mOwnsData(true),
- mMetaData(new MetaDataBase),
- mOriginal(NULL) {
+ mMetaData(new MetaDataBase) {
if (size < kSharedMemThreshold
|| std::atomic_load_explicit(&mUseSharedMemory, std::memory_order_seq_cst) == 0) {
mData = malloc(size);
@@ -84,8 +82,7 @@
mRangeLength(mSize),
mBuffer(buffer),
mOwnsData(false),
- mMetaData(new MetaDataBase),
- mOriginal(NULL) {
+ mMetaData(new MetaDataBase) {
}
void MediaBuffer::release() {
@@ -162,11 +159,6 @@
mData = NULL;
}
- if (mOriginal != NULL) {
- mOriginal->release();
- mOriginal = NULL;
- }
-
if (mMemory.get() != nullptr) {
getSharedControl()->setDeadObject();
}
@@ -178,15 +170,4 @@
mObserver = observer;
}
-MediaBufferBase *MediaBuffer::clone() {
- MediaBuffer *buffer = new MediaBuffer(mData, mSize);
- buffer->set_range(mRangeOffset, mRangeLength);
- buffer->mMetaData = new MetaDataBase(*mMetaData);
-
- add_ref();
- buffer->mOriginal = this;
-
- return buffer;
-}
-
} // namespace android
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
index 5a25965..5b362a4 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBuffer.h
@@ -80,11 +80,6 @@
virtual void setObserver(MediaBufferObserver *group);
- // Returns a clone of this MediaBuffer increasing its reference count.
- // The clone references the same data but has its own range and
- // MetaData.
- virtual MediaBufferBase *clone();
-
// sum of localRefcount() and remoteRefcount()
// Result should be treated as approximate unless the result precludes concurrent accesses.
virtual int refcount() const {
@@ -158,8 +153,6 @@
MetaDataBase* mMetaData;
- MediaBuffer *mOriginal;
-
static std::atomic_int_least32_t mUseSharedMemory;
MediaBuffer(const MediaBuffer &);
diff --git a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
index d67ddbd..eb49f4c 100644
--- a/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MediaBufferBase.h
@@ -70,11 +70,6 @@
virtual void setObserver(MediaBufferObserver *group) = 0;
- // Returns a clone of this MediaBufferBase increasing its reference
- // count. The clone references the same data but has its own range and
- // MetaData.
- virtual MediaBufferBase *clone() = 0;
-
virtual int refcount() const = 0;
virtual int localRefcount() const = 0;
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index 0a342b8..8f8c478 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -6,15 +6,16 @@
srcs: [
"IMediaAnalyticsService.cpp",
"MediaAnalyticsItem.cpp",
+ "MediaMetrics.cpp",
],
shared_libs: [
- "liblog",
- "libcutils",
- "libutils",
- "libbinder",
- "libstagefright_foundation",
"libbase",
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libstagefright_foundation",
+ "libutils",
],
export_include_dirs: ["include"],
@@ -32,4 +33,13 @@
],
cfi: true,
},
+
+ // enumerate the stable interface
+// this would mean nobody can use the C++ interface. have to rework some things.
+// stubs: {
+// symbol_file: "libmediametrics.map.txt",
+// versions: [
+// "1" ,
+// ]
+// },
}
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index d3de01e..448e2d9 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -487,6 +487,18 @@
return true;
}
+bool MediaAnalyticsItem::getString(MediaAnalyticsItem::Attr name, std::string *value) {
+ Prop *prop = findProp(name);
+ if (prop == NULL || prop->mType != kTypeCString) {
+ return false;
+ }
+ if (value != NULL) {
+ // std::string makes a copy for us
+ *value = prop->u.CStringValue;
+ }
+ return true;
+}
+
// remove indicated keys and their values
// return value is # keys removed
int32_t MediaAnalyticsItem::filter(int n, MediaAnalyticsItem::Attr attrs[]) {
@@ -726,6 +738,15 @@
}
+const char *MediaAnalyticsItem::toCString() {
+ return toCString(PROTO_LAST);
+}
+
+const char * MediaAnalyticsItem::toCString(int version) {
+ std::string val = toString(version);
+ return strdup(val.c_str());
+}
+
std::string MediaAnalyticsItem::toString() {
return toString(PROTO_LAST);
}
diff --git a/media/libmediametrics/MediaMetrics.cpp b/media/libmediametrics/MediaMetrics.cpp
new file mode 100644
index 0000000..9b08aa7
--- /dev/null
+++ b/media/libmediametrics/MediaMetrics.cpp
@@ -0,0 +1,204 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "MediaMetrics"
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#include <media/MediaAnalyticsItem.h>
+#include <media/MediaMetrics.h>
+
+//
+// provide a C-ish interface that is easier to stabilize than the existing C++
+// interface
+//
+// ALL functions returning a char * give responsibility for the allocated buffer
+// to the caller. The caller is responsible to call free() on that pointer.
+//
+
+// manage the overall record
+mediametrics_handle_t mediametrics_create(mediametricskey_t key) {
+ android::MediaAnalyticsItem *item = new android::MediaAnalyticsItem(key);
+ return (mediametrics_handle_t) item;
+}
+
+void mediametrics_delete(mediametrics_handle_t handle) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return;
+ delete item;
+}
+
+mediametricskey_t mediametrics_getKey(mediametrics_handle_t handle) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return NULL;
+ return strdup(item->getKey().c_str());
+}
+
+// nuplayer, et al use it when acting as proxies
+void mediametrics_setUid(mediametrics_handle_t handle, uid_t uid) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->setUid(uid);
+}
+
+// set attributes
+//
+
+void mediametrics_setInt32(mediametrics_handle_t handle, attr_t attr,
+ int32_t value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->setInt32(attr, value);
+}
+
+void mediametrics_setInt64(mediametrics_handle_t handle, attr_t attr,
+ int64_t value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->setInt64(attr, value);
+}
+
+void mediametrics_setDouble(mediametrics_handle_t handle, attr_t attr,
+ double value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->setDouble(attr, value);
+}
+
+void mediametrics_setRate(mediametrics_handle_t handle, attr_t attr,
+ int64_t count, int64_t duration) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->setRate(attr, count, duration);
+}
+
+void mediametrics_setCString(mediametrics_handle_t handle, attr_t attr,
+ const char *value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->setCString(attr, value);
+}
+
+// fused get/add/set; if attr wasn't there, it's a simple set.
+//
+
+void mediametrics_addInt32(mediametrics_handle_t handle, attr_t attr,
+ int32_t value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->addInt32(attr, value);
+}
+
+void mediametrics_addInt64(mediametrics_handle_t handle, attr_t attr,
+ int64_t value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->addInt64(attr, value);
+}
+
+void mediametrics_addDouble(mediametrics_handle_t handle, attr_t attr,
+ double value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->addDouble(attr, value);
+}
+
+void mediametrics_addRate(mediametrics_handle_t handle, attr_t attr,
+ int64_t count, int64_t duration) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item != NULL) item->addRate(attr, count, duration);
+}
+
+// find & extract values
+// return indicates whether attr exists (and thus whether value filled in)
+// NULL parameter value suppresses storage of value.
+//
+
+bool mediametrics_getInt32(mediametrics_handle_t handle, attr_t attr,
+ int32_t * value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return false;
+ return item->getInt32(attr, value);
+}
+
+bool mediametrics_getInt64(mediametrics_handle_t handle, attr_t attr,
+ int64_t * value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return false;
+ return item->getInt64(attr, value);
+}
+
+bool mediametrics_getDouble(mediametrics_handle_t handle, attr_t attr,
+ double *value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return false;
+ return item->getDouble(attr, value);
+}
+
+bool mediametrics_getRate(mediametrics_handle_t handle, attr_t attr,
+ int64_t * count, int64_t * duration, double *rate) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return false;
+ return item->getRate(attr, count, duration, rate);
+}
+
+// NB: caller owns the string that comes back, is responsible for freeing it
+bool mediametrics_getCString(mediametrics_handle_t handle, attr_t attr,
+ char **value) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return false;
+
+ return item->getCString(attr, value);
+}
+
+// to release strings returned via getCString()
+void mediametrics_freeCString(char *value) {
+ free(value);
+}
+
+bool mediametrics_selfRecord(mediametrics_handle_t handle) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return false;
+ return item->selfrecord();
+}
+
+
+const char *mediametrics_readable(mediametrics_handle_t handle) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return "";
+ return item->toCString();
+}
+
+int32_t mediametrics_count(mediametrics_handle_t handle) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) return 0;
+ return item->count();
+}
+
+bool mediametrics_isEnabled() {
+ // static, so doesn't need an instance
+ return android::MediaAnalyticsItem::isEnabled();
+}
+
+#if 0
+// do not expose this as is.
+// need to revisit (or redefine) how the android::Parcel parameter is handled
+// so that it meets the stable-API criteria for updateable components.
+//
+int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel) {
+ android::MediaAnalyticsItem *item = (android::MediaAnalyticsItem *) handle;
+ if (item == NULL) {
+ return -1;
+ }
+ return item->writeToParcel(parcel);
+}
+#endif
+
+
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index 263cde7..b99cd91 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -134,6 +134,7 @@
bool getRate(Attr, int64_t *count, int64_t *duration, double *rate);
// Caller owns the returned string
bool getCString(Attr, char **value);
+ bool getString(Attr, std::string *value);
// parameter indicates whether to close any existing open
// record with same key before establishing a new record
@@ -176,6 +177,8 @@
std::string toString();
std::string toString(int version);
+ const char *toCString();
+ const char *toCString(int version);
// are we collecting analytics data
static bool isEnabled();
diff --git a/media/libmediametrics/include/MediaMetrics.h b/media/libmediametrics/include/MediaMetrics.h
new file mode 100644
index 0000000..4d2f352
--- /dev/null
+++ b/media/libmediametrics/include/MediaMetrics.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_MEDIAMETRICS_H
+#define ANDROID_MEDIA_MEDIAMETRICS_H
+
+//
+// define a C interface to the media metrics functionality
+//
+// All functions that return a char * or const char * also give responsibility
+// for that string to the caller. The caller is responsible for calling free()
+// on that pointer when done using the value.
+
+__BEGIN_DECLS
+
+// internally re-cast to the behind-the-scenes C++ class instance
+typedef int64_t mediametrics_handle_t;
+typedef const char *mediametricskey_t;
+typedef const char *attr_t;
+
+mediametrics_handle_t mediametrics_create(mediametricskey_t key);
+void mediametrics_delete(mediametrics_handle_t handle);
+
+mediametricskey_t mediametrics_getKey(mediametrics_handle_t handle);
+
+
+// set
+void mediametrics_setInt32(mediametrics_handle_t handle, attr_t attr,
+ int32_t value);
+void mediametrics_setInt64(mediametrics_handle_t handle, attr_t attr,
+ int64_t value);
+void mediametrics_setDouble(mediametrics_handle_t handle, attr_t attr,
+ double value);
+void mediametrics_setRate(mediametrics_handle_t handle, attr_t attr,
+ int64_t count, int64_t duration);
+void mediametrics_setCString(mediametrics_handle_t handle, attr_t attr,
+ const char * value);
+
+// fused get/add/set; if attr wasn't there, it's a simple set.
+// these do not provide atomicity or mutual exclusion, only simpler code sequences.
+void mediametrics_addInt32(mediametrics_handle_t handle, attr_t attr,
+ int32_t value);
+void mediametrics_addInt64(mediametrics_handle_t handle, attr_t attr,
+ int64_t value);
+void mediametrics_addDouble(mediametrics_handle_t handle, attr_t attr,
+ double value);
+void mediametrics_addRate(mediametrics_handle_t handle, attr_t attr,
+ int64_t count, int64_t duration);
+
+// find & extract values
+// return indicates whether attr exists (and thus whether value filled in)
+// NULL parameter value suppresses storage of value.
+bool mediametrics_getInt32(mediametrics_handle_t handle, attr_t attr,
+ int32_t * value);
+bool mediametrics_getInt64(mediametrics_handle_t handle, attr_t attr,
+ int64_t * value);
+bool mediametrics_getDouble(mediametrics_handle_t handle, attr_t attr,
+ double *value);
+bool mediametrics_getRate(mediametrics_handle_t handle, attr_t attr,
+ int64_t * count, int64_t * duration, double *rate);
+bool mediametrics_getCString(mediametrics_handle_t handle, attr_t attr,
+ char **value);
+// to release strings returned via getCString()
+void mediametrics_freeCString(char *value);
+
+// # of attributes set within this record.
+int32_t mediametrics_count(mediametrics_handle_t handle);
+
+bool mediametrics_selfRecord(mediametrics_handle_t handle);
+
+const char *mediametrics_readable(mediametrics_handle_t handle);
+void mediametrics_setUid(mediametrics_handle_t handle, uid_t uid);
+bool mediametrics_isEnabled();
+
+#if 0
+// do not expose this as is.
+// need to revisit (or redefine) how the android::Parcel parameter is handled
+// so that it meets the stable-API criteria for updateable components.
+//
+int32_t mediametrics_writeToParcel(mediametrics_handle_t handle, android::Parcel *parcel);
+#endif
+
+__END_DECLS
+
+#endif
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index cf93fcf..590131e 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -121,6 +121,8 @@
ATSParser *mParser;
unsigned mProgramNumber;
unsigned mProgramMapPID;
+ uint32_t mPMTVersion;
+ uint32_t mPMT_CRC;
KeyedVector<unsigned, sp<Stream> > mStreams;
bool mFirstPTSValid;
uint64_t mFirstPTS;
@@ -143,6 +145,9 @@
unsigned typeExt() const { return mStreamTypeExt; }
unsigned pid() const { return mElementaryPID; }
void setPID(unsigned pid) { mElementaryPID = pid; }
+ void setAudioPresentations(AudioPresentationCollection audioPresentations) {
+ mAudioPresentations = audioPresentations;
+ }
void setCasInfo(
int32_t systemId,
@@ -293,6 +298,8 @@
: mParser(parser),
mProgramNumber(programNumber),
mProgramMapPID(programMapPID),
+ mPMTVersion(0xffffffff),
+ mPMT_CRC(0xffffffff),
mFirstPTSValid(false),
mFirstPTS(0),
mLastRecoveredPTS(lastRecoveredPTS) {
@@ -480,7 +487,13 @@
MY_LOGV(" program_number = %u", br->getBits(16));
MY_LOGV(" reserved = %u", br->getBits(2));
- MY_LOGV(" version_number = %u", br->getBits(5));
+ bool audioPresentationsChanged = false;
+ unsigned pmtVersion = br->getBits(5);
+ if (pmtVersion != mPMTVersion) {
+ audioPresentationsChanged = true;
+ mPMTVersion = pmtVersion;
+ }
+ MY_LOGV(" version_number = %u", pmtVersion);
MY_LOGV(" current_next_indicator = %u", br->getBits(1));
MY_LOGV(" section_number = %u", br->getBits(8));
MY_LOGV(" last_section_number = %u", br->getBits(8));
@@ -661,7 +674,12 @@
if (infoBytesRemaining != 0) {
ALOGW("Section data remains unconsumed");
}
- MY_LOGV(" CRC = 0x%08x", br->getBits(32));
+ unsigned crc = br->getBits(32);
+ if (crc != mPMT_CRC) {
+ audioPresentationsChanged = true;
+ mPMT_CRC = crc;
+ }
+ MY_LOGV(" CRC = 0x%08x", crc);
bool PIDsChanged = false;
for (size_t i = 0; i < infos.size(); ++i) {
@@ -722,6 +740,10 @@
isAddingScrambledStream |= info.mCADescriptor.mSystemID >= 0;
mStreams.add(info.mPID, stream);
}
+ else if (index >= 0 && mStreams.editValueAt(index)->isAudio()
+ && audioPresentationsChanged) {
+ mStreams.editValueAt(index)->setAudioPresentations(info.mAudioPresentations);
+ }
}
if (isAddingScrambledStream) {
@@ -1732,6 +1754,7 @@
mSource->setFormat(mQueue->getFormat());
}
mSource->queueAccessUnit(accessUnit);
+ mSource->convertAudioPresentationInfoToMetadata(mAudioPresentations);
}
// Every access unit has a pesStartOffset queued in |mPesStartOffsets|.
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index d15841f..f328577 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -37,8 +37,9 @@
#include <cpustats/ThreadCpuUsage.h>
#endif
#endif
-#include <audio_utils/mono_blend.h>
+#include <audio_utils/channels.h>
#include <audio_utils/format.h>
+#include <audio_utils/mono_blend.h>
#include <media/AudioMixer.h>
#include "FastMixer.h"
#include "TypedLogger.h"
@@ -159,20 +160,24 @@
if (current->mOutputSinkGen != mOutputSinkGen) {
mOutputSink = current->mOutputSink;
mOutputSinkGen = current->mOutputSinkGen;
+ mSinkChannelMask = current->mSinkChannelMask;
if (mOutputSink == NULL) {
mFormat = Format_Invalid;
mSampleRate = 0;
mSinkChannelCount = 0;
mSinkChannelMask = AUDIO_CHANNEL_NONE;
+ mAudioChannelCount = 0;
} else {
mFormat = mOutputSink->format();
mSampleRate = Format_sampleRate(mFormat);
mSinkChannelCount = Format_channelCount(mFormat);
LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
- // TODO: Add channel mask to NBAIO_Format
- // We assume that the channel mask must be a valid positional channel mask.
- mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+ if (mSinkChannelMask == AUDIO_CHANNEL_NONE) {
+ mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
+ }
+ mAudioChannelCount = mSinkChannelCount - audio_channel_count_from_out_mask(
+ mSinkChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
}
dumpState->mSampleRate = mSampleRate;
}
@@ -288,6 +293,8 @@
(void *)(uintptr_t)fastTrack->mChannelMask);
mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
(void *)(uintptr_t)mSinkChannelMask);
+ mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
+ (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
mMixer->enable(name);
}
mGenerations[i] = fastTrack->mGeneration;
@@ -324,6 +331,8 @@
(void *)(uintptr_t)fastTrack->mChannelMask);
mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
(void *)(uintptr_t)mSinkChannelMask);
+ mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::HAPTIC_ENABLED,
+ (void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
// already enabled
}
mGenerations[i] = fastTrack->mGeneration;
@@ -468,6 +477,13 @@
memcpy_by_audio_format(buffer, mFormat.mFormat, mMixerBuffer, mMixerBufferFormat,
frameCount * Format_channelCount(mFormat));
}
+ if (mSinkChannelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
+ // When there are haptic channels, the sample data is partially interleaved.
+ // Make the sample data fully interleaved here.
+ adjust_channels_non_destructive(buffer, mAudioChannelCount, buffer, mSinkChannelCount,
+ audio_bytes_per_sample(mFormat.mFormat),
+ frameCount * audio_bytes_per_frame(mAudioChannelCount, mFormat.mFormat));
+ }
// if non-NULL, then duplicate write() to this non-blocking sink
#ifdef TEE_SINK
mTee.write(buffer, frameCount);
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 1c86d9a..1d332e0 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -76,6 +76,8 @@
size_t mMixerBufferSize;
audio_format_t mMixerBufferFormat; // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT).
+ uint32_t mAudioChannelCount; // audio channel count, excludes haptic channels.
+
enum {UNDEFINED, MIXED, ZEROED} mMixerBufferState;
NBAIO_Format mFormat;
unsigned mSampleRate;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index c7fcbd8..9d2a733 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -47,6 +47,7 @@
audio_channel_mask_t mChannelMask; // AUDIO_CHANNEL_OUT_MONO or AUDIO_CHANNEL_OUT_STEREO
audio_format_t mFormat; // track format
int mGeneration; // increment when any field is assigned
+ bool mHapticPlaybackEnabled = false; // haptic playback is enabled or not
};
// Represents a single state of the fast mixer
@@ -69,6 +70,9 @@
NBAIO_Sink* mOutputSink; // HAL output device, must already be negotiated
int mOutputSinkGen; // increment when mOutputSink is assigned
size_t mFrameCount; // number of frames per fast mix buffer
+ audio_channel_mask_t mSinkChannelMask; // If not AUDIO_CHANNEL_NONE, specifies sink channel
+ // mask when it cannot be directly calculated from
+ // channel count
// Extends FastThreadState::Command
static const Command
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 971f6a5..d9f570d 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -112,6 +112,14 @@
/** Copy the track metadata in the provided iterator. Thread safe. */
virtual void copyMetadataTo(MetadataInserter& backInserter) const;
+ /** Return haptic playback of the track is enabled or not, used in mixer. */
+ bool getHapticPlaybackEnabled() const { return mHapticPlaybackEnabled; }
+ /** Set haptic playback of the track is enabled or not, should be
+ * set after query or get callback from vibrator service */
+ void setHapticPlaybackEnabled(bool hapticPlaybackEnabled) {
+ mHapticPlaybackEnabled = hapticPlaybackEnabled;
+ }
+
protected:
// for numerous
friend class PlaybackThread;
@@ -188,6 +196,8 @@
sp<media::VolumeHandler> mVolumeHandler; // handles multiple VolumeShaper configs and operations
+ bool mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
+
private:
// The following fields are only for fast tracks, and should be in a subclass
int mFastIndex; // index within FastMixerState::mFastTracks[];
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 3dae1e9..c6941c0 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -38,6 +38,7 @@
#include <private/media/AudioTrackShared.h>
#include <private/android_filesystem_config.h>
+#include <audio_utils/channels.h>
#include <audio_utils/mono_blend.h>
#include <audio_utils/primitives.h>
#include <audio_utils/format.h>
@@ -751,6 +752,7 @@
audio_channel_mask_get_representation(mask);
switch (representation) {
+ // Travel all single bit channel mask to convert channel mask to string.
case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
if (output) {
if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
@@ -773,6 +775,8 @@
if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, " );
if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, " );
+ if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, " );
+ if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, " );
if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
} else {
if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
@@ -1845,6 +1849,10 @@
dumpBase(fd, args);
dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off");
+ if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
+ dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
+ channelMaskToString(mHapticChannelMask, true /* output */).c_str());
+ }
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
dprintf(fd, " Last write occurred (msecs): %llu\n",
(unsigned long long) ns2ms(systemTime() - mLastWriteTime));
@@ -1946,7 +1954,7 @@
audio_is_linear_pcm(format) &&
// TODO: extract as a data library function that checks that a computationally
// expensive downmixer is not required: isFastOutputChannelConversion()
- (channelMask == mChannelMask ||
+ (channelMask == (mChannelMask | mHapticChannelMask) ||
mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
(channelMask == AUDIO_CHANNEL_OUT_MONO
/* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
@@ -2348,6 +2356,17 @@
track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
}
+ // Disable all haptic playback for all other active tracks when haptic playback is supported
+ // and the track contains haptic channels. Enable haptic playback for current track.
+ // TODO: Request actual haptic playback status from vibrator service
+ if ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
+ && mHapticChannelMask != AUDIO_CHANNEL_NONE) {
+ for (auto &t : mActiveTracks) {
+ t->setHapticPlaybackEnabled(false);
+ }
+ track->setHapticPlaybackEnabled(true);
+ }
+
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
@@ -2635,6 +2654,11 @@
(void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
}
+ mHapticChannelMask = mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL;
+ mChannelMask &= ~mHapticChannelMask;
+ mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
+ mChannelCount -= mHapticChannelCount;
+
// force reconfiguration of effect chains and engines to take new buffer size and audio
// parameters into account
// Note that mLock is not held when readOutputParameters_l() is called from the constructor
@@ -3007,7 +3031,7 @@
// Only one effect chain can be present in direct output thread and it uses
// the sink buffer as input
if (mType != DIRECT) {
- size_t numSamples = mNormalFrameCount * mChannelCount;
+ size_t numSamples = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
numSamples * sizeof(effect_buffer_t),
&halInBuffer);
@@ -3506,7 +3530,17 @@
}
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
- mNormalFrameCount * mChannelCount);
+ mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+
+ // If we're going directly to the sink and there are haptic channels,
+ // we should adjust channels as the sample data is partially interleaved
+ // in this case.
+ if (!mEffectBufferValid && mHapticChannelCount > 0) {
+ adjust_channels_non_destructive(buffer, mChannelCount, buffer,
+ mChannelCount + mHapticChannelCount,
+ audio_bytes_per_sample(format),
+ audio_bytes_per_frame(mChannelCount, format) * mNormalFrameCount);
+ }
}
mBytesRemaining = mCurrentWriteLength;
@@ -3550,7 +3584,15 @@
}
memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
- mNormalFrameCount * mChannelCount);
+ mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+ // The sample data is partially interleaved when haptic channels exist,
+ // we need to adjust channels here.
+ if (mHapticChannelCount > 0) {
+ adjust_channels_non_destructive(mSinkBuffer, mChannelCount, mSinkBuffer,
+ mChannelCount + mHapticChannelCount,
+ audio_bytes_per_sample(mFormat),
+ audio_bytes_per_frame(mChannelCount, mFormat) * mNormalFrameCount);
+ }
}
// enable changes in effect chain
@@ -3716,6 +3758,7 @@
// removeTracks_l() must be called with ThreadBase::mLock held
void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
{
+ bool enabledHapticTracksRemoved = false;
for (const auto& track : tracksToRemove) {
mActiveTracks.remove(track);
ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
@@ -3737,6 +3780,18 @@
// remove from our tracks vector
removeTrack_l(track);
}
+ enabledHapticTracksRemoved |= track->getHapticPlaybackEnabled();
+ }
+ // If the thread supports haptic playback and the track playing haptic data was removed,
+ // enable haptic playback on the first active track that contains haptic channels.
+ // TODO: Query vibrator service to know which track should enable haptic playback.
+ if (enabledHapticTracksRemoved && mHapticChannelMask != AUDIO_CHANNEL_NONE) {
+ for (auto &t : mActiveTracks) {
+ if (t->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) {
+ t->setHapticPlaybackEnabled(true);
+ break;
+ }
+ }
}
}
@@ -3942,7 +3997,8 @@
// create an NBAIO sink for the HAL output stream, and negotiate
mOutputSink = new AudioStreamOutSink(output->stream);
size_t numCounterOffers = 0;
- const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
+ const NBAIO_Format offers[1] = {Format_from_SR_C(
+ mSampleRate, mChannelCount + mHapticChannelCount, mFormat)};
#if !LOG_NDEBUG
ssize_t index =
#else
@@ -3984,7 +4040,7 @@
// change our Sink format to accept our intermediate precision
mFormat = fastMixerFormat;
free(mSinkBuffer);
- mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
+ mFrameSize = audio_bytes_per_frame(mChannelCount + mHapticChannelCount, mFormat);
const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
(void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
}
@@ -4026,8 +4082,10 @@
// wrap the source side of the MonoPipe to make it an AudioBufferProvider
fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
fastTrack->mVolumeProvider = NULL;
- fastTrack->mChannelMask = mChannelMask; // mPipeSink channel mask for audio to FastMixer
+ fastTrack->mChannelMask = mChannelMask | mHapticChannelMask; // mPipeSink channel mask for
+ // audio to FastMixer
fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
+ fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
fastTrack->mGeneration++;
state->mFastTracksGen++;
state->mTrackMask = 1;
@@ -4035,6 +4093,10 @@
state->mOutputSink = mOutputSink.get();
state->mOutputSinkGen++;
state->mFrameCount = mFrameCount;
+ // specify sink channel mask when haptic channel mask present as it can not
+ // be calculated directly from channel count
+ state->mSinkChannelMask = mHapticChannelMask == AUDIO_CHANNEL_NONE
+ ? AUDIO_CHANNEL_NONE : mChannelMask | mHapticChannelMask;
state->mCommand = FastMixerState::COLD_IDLE;
// already done in constructor initialization list
//mFastMixerFutex = 0;
@@ -4411,6 +4473,7 @@
std::vector<std::pair<sp<Track>, size_t>> mUnderrunFrames;
} deferredOperations(&mixerStatus); // implicit nested scope for variable capture
+ bool noFastHapticTrack = true;
for (size_t i=0 ; i<count ; i++) {
const sp<Track> t = mActiveTracks[i];
@@ -4419,6 +4482,9 @@
// process fast tracks
if (track->isFastTrack()) {
+ if (track->getHapticPlaybackEnabled()) {
+ noFastHapticTrack = false;
+ }
// It's theoretically possible (though unlikely) for a fast track to be created
// and then removed within the same normal mix cycle. This is not a problem, as
@@ -4544,6 +4610,7 @@
fastTrack->mVolumeProvider = vp;
fastTrack->mChannelMask = track->mChannelMask;
fastTrack->mFormat = track->mFormat;
+ fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
fastTrack->mGeneration++;
state->mTrackMask |= 1 << j;
didModify = true;
@@ -4589,6 +4656,10 @@
// Avoids a misleading display in dumpsys
track->mObservedUnderruns.mBitFields.mMostRecent = UNDERRUN_FULL;
}
+ if (fastTrack->mHapticPlaybackEnabled != track->getHapticPlaybackEnabled()) {
+ fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
+ didModify = true;
+ }
continue;
}
@@ -4796,7 +4867,8 @@
mAudioMixer->setParameter(
trackId,
AudioMixer::TRACK,
- AudioMixer::MIXER_CHANNEL_MASK, (void *)(uintptr_t)mChannelMask);
+ AudioMixer::MIXER_CHANNEL_MASK,
+ (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
// limit track sample rate to 2 x output sample rate, which changes at re-configuration
uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
uint32_t reqSampleRate = track->mAudioTrackServerProxy->getSampleRate();
@@ -4857,6 +4929,10 @@
trackId,
AudioMixer::TRACK,
AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::HAPTIC_ENABLED, (void *)(uintptr_t)track->getHapticPlaybackEnabled());
// reset retry count
track->mRetryCount = kMaxTrackRetries;
@@ -4924,6 +5000,17 @@
}
+ if (mHapticChannelMask != AUDIO_CHANNEL_NONE && sq != NULL) {
+ // When there is no fast track playing haptic and FastMixer exists,
+ // enabling the first FastTrack, which provides mixed data from normal
+ // tracks, to play haptic data.
+ FastTrack *fastTrack = &state->mFastTracks[0];
+ if (fastTrack->mHapticPlaybackEnabled != noFastHapticTrack) {
+ fastTrack->mHapticPlaybackEnabled = noFastHapticTrack;
+ didModify = true;
+ }
+ }
+
// Push the new FastMixer state if necessary
bool pauseAudioWatchdog = false;
if (didModify) {
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 7f3ea0f..e8b2158 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -907,6 +907,11 @@
int64_t mBytesWritten;
int64_t mFramesWritten; // not reset on standby
int64_t mSuspendedFrames; // not reset on standby
+
+ // mHapticChannelMask and mHapticChannelCount will only be valid when the thread support
+ // haptic playback.
+ audio_channel_mask_t mHapticChannelMask = AUDIO_CHANNEL_NONE;
+ uint32_t mHapticChannelCount = 0;
private:
// mMasterMute is in both PlaybackThread and in AudioFlinger. When a
// PlaybackThread needs to find out if master-muted, it checks it's local