Merge "use dedicated looper for GenericSource" into lmp-dev
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
index ab7a4b8..cd56adb 100644
--- a/include/media/MediaCodecInfo.h
+++ b/include/media/MediaCodecInfo.h
@@ -107,6 +107,7 @@
status_t initializeCapabilities(const CodecCapabilities &caps);
void addDetail(const AString &key, const AString &value);
void addFeature(const AString &key, int32_t value);
+ void addFeature(const AString &key, const char *value);
void removeMime(const char *mime);
void complete();
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 446c582..7b4c4e2 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -257,4 +257,10 @@
mCurrentCaps->mDetails->setInt32(tag.c_str(), value);
}
+void MediaCodecInfo::addFeature(const AString &key, const char *value) {
+ AString tag = "feature-";
+ tag.append(key);
+ mCurrentCaps->mDetails->setString(tag.c_str(), value);
+}
+
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index fea43f1..5e7ecfa 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -147,6 +147,7 @@
mSourceFlags(0),
mVideoIsAVC(false),
mOffloadAudio(false),
+ mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
mAudioEOS(false),
mVideoEOS(false),
mScanSourcesPending(false),
@@ -638,11 +639,18 @@
bool mHadAnySourcesBefore =
(mAudioDecoder != NULL) || (mVideoDecoder != NULL);
+ // initialize video before audio because successful initialization of
+ // video may change deep buffer mode of audio.
if (mNativeWindow != NULL) {
instantiateDecoder(false, &mVideoDecoder);
}
if (mAudioSink != NULL) {
+ if (mOffloadAudio) {
+ // open audio sink early under offload mode.
+ sp<AMessage> format = mSource->getFormat(true /*audio*/);
+ openAudioSink(format, true /*offloadOnly*/);
+ }
instantiateDecoder(true, &mAudioDecoder);
}
@@ -742,138 +750,7 @@
CHECK(msg->findMessage("format", &format));
if (audio) {
- int32_t numChannels;
- CHECK(format->findInt32(
- "channel-count", &numChannels));
-
- int32_t sampleRate;
- CHECK(format->findInt32("sample-rate", &sampleRate));
-
- ALOGV("Audio output format changed to %d Hz, %d channels",
- sampleRate, numChannels);
-
- mAudioSink->close();
-
- uint32_t flags;
- int64_t durationUs;
- // FIXME: we should handle the case where the video decoder
- // is created after we receive the format change indication.
- // Current code will just make that we select deep buffer
- // with video which should not be a problem as it should
- // not prevent from keeping A/V sync.
- if (mVideoDecoder == NULL &&
- mSource->getDuration(&durationUs) == OK &&
- durationUs
- > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
- flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
- } else {
- flags = AUDIO_OUTPUT_FLAG_NONE;
- }
-
- int32_t channelMask;
- if (!format->findInt32("channel-mask", &channelMask)) {
- channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
- }
-
- if (mOffloadAudio) {
- audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
- audio_offload_info_t offloadInfo =
- AUDIO_INFO_INITIALIZER;
-
- AString mime;
- CHECK(format->findString("mime", &mime));
-
- status_t err =
- mapMimeToAudioFormat(audioFormat, mime.c_str());
- if (err != OK) {
- ALOGE("Couldn't map mime \"%s\" to a valid "
- "audio_format", mime.c_str());
- mOffloadAudio = false;
- } else {
- ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
- mime.c_str(), audioFormat);
-
- int32_t aacProfile = -1;
- if (audioFormat == AUDIO_FORMAT_AAC
- && format->findInt32("aac-profile", &aacProfile)) {
- // Redefine AAC format as per aac profile
- mapAACProfileToAudioFormat(
- audioFormat,
- aacProfile);
- }
-
- flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
-
- offloadInfo.duration_us = -1;
- format->findInt64(
- "durationUs", &offloadInfo.duration_us);
-
- int avgBitRate = -1;
- format->findInt32("bit-rate", &avgBitRate);
-
- offloadInfo.sample_rate = sampleRate;
- offloadInfo.channel_mask = channelMask;
- offloadInfo.format = audioFormat;
- offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
- offloadInfo.bit_rate = avgBitRate;
- offloadInfo.has_video = (mVideoDecoder != NULL);
- offloadInfo.is_streaming = true;
-
- ALOGV("try to open AudioSink in offload mode");
- err = mAudioSink->open(
- sampleRate,
- numChannels,
- (audio_channel_mask_t)channelMask,
- audioFormat,
- 8 /* bufferCount */,
- &NuPlayer::Renderer::AudioSinkCallback,
- mRenderer.get(),
- (audio_output_flags_t)flags,
- &offloadInfo);
-
- if (err == OK) {
- // If the playback is offloaded to h/w, we pass
- // the HAL some metadata information.
- // We don't want to do this for PCM because it
- // will be going through the AudioFlinger mixer
- // before reaching the hardware.
- sp<MetaData> audioMeta =
- mSource->getFormatMeta(true /* audio */);
- sendMetaDataToHal(mAudioSink, audioMeta);
-
- err = mAudioSink->start();
- }
- }
-
- if (err != OK) {
- // Clean up, fall back to non offload mode.
- mAudioSink->close();
- mAudioDecoder.clear();
- mRenderer->signalDisableOffloadAudio();
- mOffloadAudio = false;
-
- instantiateDecoder(
- true /* audio */, &mAudioDecoder);
- }
- }
-
- if (!mOffloadAudio) {
- flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
- ALOGV("open AudioSink in NON-offload mode");
- CHECK_EQ(mAudioSink->open(
- sampleRate,
- numChannels,
- (audio_channel_mask_t)channelMask,
- AUDIO_FORMAT_PCM_16_BIT,
- 8 /* bufferCount */,
- NULL,
- NULL,
- (audio_output_flags_t)flags),
- (status_t)OK);
- mAudioSink->start();
- }
-
- mRenderer->signalAudioSinkChanged();
+ openAudioSink(format, false /*offloadOnly*/);
} else {
// video
sp<AMessage> inputFormat =
@@ -980,7 +857,7 @@
ALOGV("Tear down audio offload, fall back to s/w path");
int64_t positionUs;
CHECK(msg->findInt64("positionUs", &positionUs));
- mAudioSink->close();
+ closeAudioSink();
mAudioDecoder.clear();
mRenderer->flush(true /* audio */);
if (mVideoDecoder != NULL) {
@@ -1107,6 +984,148 @@
mScanSourcesPending = true;
}
+void NuPlayer::openAudioSink(const sp<AMessage> &format, bool offloadOnly) {
+ ALOGV("openAudioSink: offloadOnly(%d) mOffloadAudio(%d)",
+ offloadOnly, mOffloadAudio);
+ bool audioSinkChanged = false;
+
+ int32_t numChannels;
+ CHECK(format->findInt32("channel-count", &numChannels));
+
+ int32_t channelMask;
+ if (!format->findInt32("channel-mask", &channelMask)) {
+ // signal to the AudioSink to derive the mask from count.
+ channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ }
+
+ int32_t sampleRate;
+ CHECK(format->findInt32("sample-rate", &sampleRate));
+
+ uint32_t flags;
+ int64_t durationUs;
+ // FIXME: we should handle the case where the video decoder
+ // is created after we receive the format change indication.
+ // Current code will just make that we select deep buffer
+ // with video which should not be a problem as it should
+ // not prevent from keeping A/V sync.
+ if (mVideoDecoder == NULL &&
+ mSource->getDuration(&durationUs) == OK &&
+ durationUs
+ > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
+ flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ } else {
+ flags = AUDIO_OUTPUT_FLAG_NONE;
+ }
+
+ if (mOffloadAudio) {
+ audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+ AString mime;
+ CHECK(format->findString("mime", &mime));
+ status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
+
+ if (err != OK) {
+ ALOGE("Couldn't map mime \"%s\" to a valid "
+ "audio_format", mime.c_str());
+ mOffloadAudio = false;
+ } else {
+ ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
+ mime.c_str(), audioFormat);
+
+ int avgBitRate = -1;
+ format->findInt32("bit-rate", &avgBitRate);
+
+ int32_t aacProfile = -1;
+ if (audioFormat == AUDIO_FORMAT_AAC
+ && format->findInt32("aac-profile", &aacProfile)) {
+ // Redefine AAC format as per aac profile
+ mapAACProfileToAudioFormat(
+ audioFormat,
+ aacProfile);
+ }
+
+ audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
+ offloadInfo.duration_us = -1;
+ format->findInt64(
+ "durationUs", &offloadInfo.duration_us);
+ offloadInfo.sample_rate = sampleRate;
+ offloadInfo.channel_mask = channelMask;
+ offloadInfo.format = audioFormat;
+ offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
+ offloadInfo.bit_rate = avgBitRate;
+ offloadInfo.has_video = (mVideoDecoder != NULL);
+ offloadInfo.is_streaming = true;
+
+ if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
+ ALOGV("openAudioSink: no change in offload mode");
+ return; // no change from previous configuration, everything ok.
+ }
+ ALOGV("openAudioSink: try to open AudioSink in offload mode");
+ flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+ audioSinkChanged = true;
+ mAudioSink->close();
+ err = mAudioSink->open(
+ sampleRate,
+ numChannels,
+ (audio_channel_mask_t)channelMask,
+ audioFormat,
+ 8 /* bufferCount */,
+ &NuPlayer::Renderer::AudioSinkCallback,
+ mRenderer.get(),
+ (audio_output_flags_t)flags,
+ &offloadInfo);
+
+ if (err == OK) {
+ // If the playback is offloaded to h/w, we pass
+ // the HAL some metadata information.
+ // We don't want to do this for PCM because it
+ // will be going through the AudioFlinger mixer
+ // before reaching the hardware.
+ sp<MetaData> audioMeta =
+ mSource->getFormatMeta(true /* audio */);
+ sendMetaDataToHal(mAudioSink, audioMeta);
+ mCurrentOffloadInfo = offloadInfo;
+ err = mAudioSink->start();
+ ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
+ }
+ if (err != OK) {
+ // Clean up, fall back to non offload mode.
+ mAudioSink->close();
+ mRenderer->signalDisableOffloadAudio();
+ mOffloadAudio = false;
+ mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+ ALOGV("openAudioSink: offload failed");
+ }
+ }
+ }
+ if (!offloadOnly && !mOffloadAudio) {
+ flags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+ ALOGV("openAudioSink: open AudioSink in NON-offload mode");
+
+ audioSinkChanged = true;
+ mAudioSink->close();
+ mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+ CHECK_EQ(mAudioSink->open(
+ sampleRate,
+ numChannels,
+ (audio_channel_mask_t)channelMask,
+ AUDIO_FORMAT_PCM_16_BIT,
+ 8 /* bufferCount */,
+ NULL,
+ NULL,
+ (audio_output_flags_t)flags),
+ (status_t)OK);
+ mAudioSink->start();
+ }
+ if (audioSinkChanged) {
+ mRenderer->signalAudioSinkChanged();
+ }
+}
+
+void NuPlayer::closeAudioSink() {
+ mAudioSink->close();
+ mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+}
+
status_t NuPlayer::instantiateDecoder(bool audio, sp<Decoder> *decoder) {
if (*decoder != NULL) {
return OK;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index fc456a4..48882c5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -124,6 +124,7 @@
sp<Decoder> mVideoDecoder;
bool mVideoIsAVC;
bool mOffloadAudio;
+ audio_offload_info_t mCurrentOffloadInfo;
sp<Decoder> mAudioDecoder;
sp<CCDecoder> mCCDecoder;
sp<Renderer> mRenderer;
@@ -167,6 +168,9 @@
bool mStarted;
+ void openAudioSink(const sp<AMessage> &format, bool offloadOnly);
+ void closeAudioSink();
+
status_t instantiateDecoder(bool audio, sp<Decoder> *decoder);
void updateVideoSize(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index eec6960..c9be0dd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -31,6 +31,7 @@
namespace android {
static const int kMaxPendingBuffers = 10;
+static const int kMaxCachedBytes = 200000;
NuPlayer::DecoderPassThrough::DecoderPassThrough(
const sp<AMessage> ¬ify)
@@ -39,6 +40,7 @@
mBufferGeneration(0),
mReachedEOS(true),
mPendingBuffers(0),
+ mCachedBytes(0),
mComponentName("pass through decoder") {
mDecoderLooper = new ALooper;
mDecoderLooper->setName("NuPlayerDecoderPassThrough");
@@ -78,6 +80,7 @@
void NuPlayer::DecoderPassThrough::onConfigure(const sp<AMessage> &format) {
ALOGV("[%s] onConfigure", mComponentName.c_str());
mPendingBuffers = 0;
+ mCachedBytes = 0;
mReachedEOS = false;
++mBufferGeneration;
@@ -96,7 +99,7 @@
}
void NuPlayer::DecoderPassThrough::requestABuffer() {
- if (mPendingBuffers >= kMaxPendingBuffers || mReachedEOS) {
+ if (mCachedBytes >= kMaxCachedBytes || mReachedEOS) {
ALOGV("[%s] mReachedEOS=%d, max pending buffers(%d:%d)",
mComponentName.c_str(), (mReachedEOS ? 1 : 0),
mPendingBuffers, kMaxPendingBuffers);
@@ -136,8 +139,11 @@
return;
}
+ mCachedBytes += buffer->size();
+
sp<AMessage> reply = new AMessage(kWhatBufferConsumed, id());
reply->setInt32("generation", mBufferGeneration);
+ reply->setInt32("size", buffer->size());
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatDrainThisBuffer);
@@ -146,8 +152,9 @@
notify->post();
}
-void NuPlayer::DecoderPassThrough::onBufferConsumed() {
+void NuPlayer::DecoderPassThrough::onBufferConsumed(int32_t size) {
mPendingBuffers--;
+ mCachedBytes -= size;
sp<AMessage> message = new AMessage(kWhatRequestABuffer, id());
message->setInt32("generation", mBufferGeneration);
message->post();
@@ -160,6 +167,7 @@
notify->setInt32("what", kWhatFlushCompleted);
notify->post();
mPendingBuffers = 0;
+ mCachedBytes = 0;
mReachedEOS = false;
}
@@ -205,7 +213,9 @@
case kWhatBufferConsumed:
{
if (!isStaleReply(msg)) {
- onBufferConsumed();
+ int32_t size;
+ CHECK(msg->findInt32("size", &size));
+ onBufferConsumed(size);
}
break;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index e9e5658..8590856 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -61,12 +61,13 @@
void onConfigure(const sp<AMessage> &format);
void onFlush();
void onInputBufferFilled(const sp<AMessage> &msg);
- void onBufferConsumed();
+ void onBufferConsumed(int32_t size);
void onShutdown();
int32_t mBufferGeneration;
bool mReachedEOS;
int32_t mPendingBuffers;
+ int32_t mCachedBytes;
AString mComponentName;
DISALLOW_EVIL_CONSTRUCTORS(DecoderPassThrough);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index b77e1cd..b44d5cc 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -1419,8 +1419,10 @@
} else {
if (encoder) {
if (!msg->findInt32(
+ "complexity", &compressionLevel) &&
+ !msg->findInt32(
"flac-compression-level", &compressionLevel)) {
- compressionLevel = 5;// default FLAC compression level
+ compressionLevel = 5; // default FLAC compression level
} else if (compressionLevel < 0) {
ALOGW("compression level %d outside [0..8] range, "
"using 0",
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 908cdca..008da5a 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -209,25 +209,29 @@
uri = tmp.string();
}
- if (httpSource->connect(uri, headers) != OK) {
+ String8 cacheConfig;
+ bool disconnectAtHighwatermark;
+ KeyedVector<String8, String8> nonCacheSpecificHeaders;
+ if (headers != NULL) {
+ nonCacheSpecificHeaders = *headers;
+ NuCachedSource2::RemoveCacheSpecificHeaders(
+ &nonCacheSpecificHeaders,
+ &cacheConfig,
+ &disconnectAtHighwatermark);
+ }
+
+ if (httpSource->connect(uri, &nonCacheSpecificHeaders) != OK) {
ALOGE("Failed to connect http source!");
return NULL;
}
if (!isWidevine) {
- String8 cacheConfig;
- bool disconnectAtHighwatermark;
- if (headers != NULL) {
- KeyedVector<String8, String8> copy = *headers;
- NuCachedSource2::RemoveCacheSpecificHeaders(
- ©, &cacheConfig, &disconnectAtHighwatermark);
- }
+ String8 contentType = httpSource->getMIMEType();
sp<NuCachedSource2> cachedSource = new NuCachedSource2(
httpSource,
- cacheConfig.isEmpty() ? NULL : cacheConfig.string());
-
- String8 contentType = httpSource->getMIMEType();
+ cacheConfig.isEmpty() ? NULL : cacheConfig.string(),
+ disconnectAtHighwatermark);
if (strncasecmp(contentType.string(), "audio/", 6)) {
// We're not doing this for streams that appear to be audio-only
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index 2f2a0b3..5b8be46 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -783,6 +783,7 @@
const char *name = NULL;
int32_t optional = -1;
int32_t required = -1;
+ const char *value = NULL;
while (attrs[i] != NULL) {
if (attrs[i + 1] == NULL) {
@@ -801,6 +802,9 @@
required = value;
}
++i;
+ } else if (!strcmp(attrs[i], "value")) {
+ value = attrs[i + 1];
+ ++i;
} else {
return -EINVAL;
}
@@ -816,7 +820,16 @@
return -EINVAL;
}
- mCurrentInfo->addFeature(name, (required == 1) || (optional == 0));
+ if ((optional != -1 || required != -1) && (value != NULL)) {
+ ALOGE("feature '%s' has both a value and optional/required attribute", name);
+ return -EINVAL;
+ }
+
+ if (value != NULL) {
+ mCurrentInfo->addFeature(name, value);
+ } else {
+ mCurrentInfo->addFeature(name, (required == 1) || (optional == 0));
+ }
return OK;
}
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
index 9e59488..14fdec5 100644
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ b/services/audiopolicy/AudioPolicyManager.cpp
@@ -1845,6 +1845,11 @@
mEffects.valueAt(i)->dump(fd);
}
+ snprintf(buffer, SIZE, "\nAudio Patches:\n");
+ write(fd, buffer, strlen(buffer));
+ for (size_t i = 0; i < mAudioPatches.size(); i++) {
+ mAudioPatches[i]->dump(fd, 2, i);
+ }
return NO_ERROR;
}
@@ -5214,6 +5219,8 @@
char buffer[SIZE];
String8 result;
+ snprintf(buffer, SIZE, " ID: %d\n", mId);
+ result.append(buffer);
snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
result.append(buffer);
snprintf(buffer, SIZE, " Format: %08x\n", mFormat);
@@ -5297,6 +5304,8 @@
char buffer[SIZE];
String8 result;
+ snprintf(buffer, SIZE, " ID: %d\n", mId);
+ result.append(buffer);
snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
result.append(buffer);
snprintf(buffer, SIZE, " Format: %d\n", mFormat);
@@ -6719,6 +6728,53 @@
return NO_ERROR;
}
+status_t AudioPolicyManager::AudioPatch::dump(int fd, int spaces, int index) const
+{
+ const size_t SIZE = 256;
+ char buffer[SIZE];
+ String8 result;
+
+
+ snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
+ result.append(buffer);
+ snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
+ result.append(buffer);
+ for (size_t i = 0; i < mPatch.num_sources; i++) {
+ if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
+ snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
+ mPatch.sources[i].id, enumToString(sDeviceNameToEnumTable,
+ ARRAY_SIZE(sDeviceNameToEnumTable),
+ mPatch.sources[i].ext.device.type));
+ } else {
+ snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
+ mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
+ }
+ result.append(buffer);
+ }
+ snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
+ result.append(buffer);
+ for (size_t i = 0; i < mPatch.num_sinks; i++) {
+ if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
+ snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
+ mPatch.sinks[i].id, enumToString(sDeviceNameToEnumTable,
+ ARRAY_SIZE(sDeviceNameToEnumTable),
+ mPatch.sinks[i].ext.device.type));
+ } else {
+ snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
+ mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
+ }
+ result.append(buffer);
+ }
+
+ write(fd, result.string(), result.size());
+ return NO_ERROR;
+}
// --- audio_policy.conf file parsing
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
index f071675..e3e3172 100644
--- a/services/audiopolicy/AudioPolicyManager.h
+++ b/services/audiopolicy/AudioPolicyManager.h
@@ -312,6 +312,8 @@
const struct audio_patch *patch, uid_t uid) :
mHandle(handle), mPatch(*patch), mUid(uid), mAfPatchHandle(0) {}
+ status_t dump(int fd, int spaces, int index) const;
+
audio_patch_handle_t mHandle;
struct audio_patch mPatch;
uid_t mUid;